From 0429c19e1f734693791ab88d2d2de0f2d231afbe Mon Sep 17 00:00:00 2001
From: Alexander Aghili <alexander.w.aghili@gmail.com>
Date: Mon, 10 Mar 2025 16:39:56 -0700
Subject: [PATCH] update for asgn4

---
 .gitignore                         |   2 +
 README.md                          | 162 ++++++----
 __main__.py                        | 171 ++++++++++
 lamport-example.png                | Bin 0 -> 28246 bytes
 pkg/.gitignore                     | 175 +++++++++++
 pkg/README.md                      |   3 +
 pkg/cse138_asgn3_tests/__main__.py |   1 +
 pkg/cse138_asgn3_tests/tests       |   1 +
 pkg/cse138_asgn3_tests/utils       |   1 +
 pkg/pyproject.toml                 |  12 +
 pkg/uv.lock                        |  77 +++++
 tests/basic/basic.py               |  42 +++
 tests/hello.py                     |  44 +++
 tests/helper.py                    | 117 +++++++
 utils/containers.py                | 487 +++++++++++++++++++++++++++++
 utils/kvs_api.py                   | 112 +++++++
 utils/testcase.py                  |  24 ++
 utils/util.py                      |  45 +++
 18 files changed, 1418 insertions(+), 58 deletions(-)
 create mode 100644 .gitignore
 create mode 100755 __main__.py
 create mode 100644 lamport-example.png
 create mode 100644 pkg/.gitignore
 create mode 100644 pkg/README.md
 create mode 120000 pkg/cse138_asgn3_tests/__main__.py
 create mode 120000 pkg/cse138_asgn3_tests/tests
 create mode 120000 pkg/cse138_asgn3_tests/utils
 create mode 100644 pkg/pyproject.toml
 create mode 100644 pkg/uv.lock
 create mode 100644 tests/basic/basic.py
 create mode 100644 tests/hello.py
 create mode 100644 tests/helper.py
 create mode 100644 utils/containers.py
 create mode 100644 utils/kvs_api.py
 create mode 100644 utils/testcase.py
 create mode 100644 utils/util.py

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..372c13e
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+__pycache__/
+
diff --git a/README.md b/README.md
index f38588f..40f0f6e 100644
--- a/README.md
+++ b/README.md
@@ -1,93 +1,139 @@
-# cse138-assignment-4-test-suite
-
+# Distributed Systems Assignment 4 Community Test Suite
 
+## Description
+This is a test suite designed to test various cases in assignment 4. It is designed to be easy to both test and add additional tests. Community contributions are encouraged. 
 
-## Getting started
+## How to use
 
-To make it easy for you to get started with GitLab, here's a list of recommended next steps.
+### Downloading
+To use this program, it is recommended to clone the project project or add it using the package manager. In the root directory of your project (with the Dockerfile)
+```bash
+git clone git@git.ucsc.edu:awaghili/cse138-assignment-4-test-suite.git
+```
 
-Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
+#### UV Package Manager Users
 
-## Add your files
+Alternatively, if you use the `uv` package manager, you can use `uvx` to run the tests, without needing to manually download or install anything.
+This package manager is generally fast enough that redownloading the package from git and the dependencies every time you run the command is not
+a problem.
 
-- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
-- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
+Instructions for installing `uv` [here](https://docs.astral.sh/uv/getting-started/installation/).
 
+```bash
+uvx --force-reinstall --from git+https://git.ucsc.edu/awaghili/cse138-assignment-4-test-suite#subdirectory=pkg cse138-asgn4-tests
+# You can also pass arguments as normal:
+uvx --force-reinstall --from git+https://git.ucsc.edu/awaghili/cse138-assignment-4-test-suite#subdirectory=pkg cse138-asgn4-tests --no-build
+# Or when the repository is cloned locally:
+uvx --force-reinstall --from /path/to/repo/cse138-assignment-4-test-suite/pkg cse138-asgn4-tests
 ```
-cd existing_repo
-git remote add origin https://git.ucsc.edu/awaghili/cse138-assignment-4-test-suite.git
-git branch -M main
-git push -uf origin main
+
+### Running Tests
+Running all tests is a simple. Simply run (just like assignment two)
+```bash
+python3 -m cse138-assignment-4-test-suite
 ```
 
-## Integrate with your tools
+The tests are segmented into categories:
+- basic
+- availability
+- eventual consistency
+- causual consistency
 
-- [ ] [Set up project integrations](https://git.ucsc.edu/awaghili/cse138-assignment-4-test-suite/-/settings/integrations)
+To run a single category of tests
 
-## Collaborate with your team
+To run a individual test
 
-- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
-- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
-- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
-- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
-- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
+### Debugging Tests
+Each test run is saved in a 
+```bash
+./test_results/YY_MM_DD_HH:MM:SS
+```
+directory. The directory has the following structure:
+- summary.txt
+- basic/
+- availability/
+- eventual_consistency/
+- causal_consistency/
 
-## Test and Deploy
+The summary contains the overall result of the tests. Each categories directory contains a directory for each test in the category.
+Each test's directory holds a result.txt, a replay.txt (what messages were sent when, to where, from who, etc.), and logs from each server. 
 
-Use the built-in continuous integration in GitLab.
+### Command Line Arguments
 
-- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)
-- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
-- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
-- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
-- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
+By default, the image will be built on each invocation of the test script. For people with slow builds, this can be skipped
+with the `--no-build` flag.
 
-***
+**(Currently broken)** The script will also stop after the first failure. To run all tests, use the `--run-all` flag.
 
-# Editing this README
+To filter tests by name, add the name or a substring of the name as the positional argument.
 
-When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template.
+The main script accepts the following command line arguments:
 
-## Suggestions for a good README
+```
+usage: python3 -m cse138-assignment-4-test-suite [-h] [--no-build] [--run-all] [filter]
+
+positional arguments:
+  filter      filter tests by name (optional)
+
+options:
+  -h, --help  show this help message and exit
+  --no-build  skip building the container image
+  --run-all   run all tests instead of stopping at first failure. Note: this is currently broken due to issues with cleanup code
+  --num-threads <NUM_THREADS>
+              number of threads to run tests in
+  --port-offset <PORT_OFFSET>
+              port offset for each test (default: 1000)
+```
 
-Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
+#### Running tests in parallel
 
-## Name
-Choose a self-explaining name for your project.
+The number of threads to run tests in can be set with the `--num-threads` flag. The default is 1. `--num-threads` greater than 1 causes all
+tests to be run, and there is no way to cancel ongoing tests on the first failure.
 
-## Description
-Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
+Note that using too many threads can cause too many docker networks to be created, which will result in tests tests failing with
+the following error message:
 
-## Badges
-On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
+```
+Error response from daemon: all predefined address pools have been fully subnetted
+```
 
-## Visuals
-Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
+I believe this has to do with the default options for docker giving large subnets to each network, so there are fewer available networks
+(I think 32 by default). If this happens, you can try to change the docker config, or just lower the number of threads until the error goes away.
 
-## Installation
-Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
+## Community Contributions 
+I will not be able to do most of the extensive testing on my own. I encourage all teams to use this as a core resource for testing by both using and contributing to the test suite. However, to ensure correct and orderly testing, I will utilize a pull request format where submissions will go through a review process. I also will require some components to ensure that the testing is easy to follow and use. The test requirements are listed below. While it will be more tedious than just cranking out a bunch of scuffed tests, I believe this format will be better as it will be clear what the test is doing and what it is for (if you really don't want to do the steps, just do a PR with your code, and if someone else will add the context then it maybe approved). 
 
-## Usage
-Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
+### Test Requirements
 
-## Support
-Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
+#### Basics
+Tests must have the following name format:
+category_type_#
+ex.
+availability_advanced_2
 
-## Roadmap
-If you have ideas for releases in the future, it is a good idea to list them in the README.
+#### Test Description
+Each test requires a short description that described the overall use of the test. Include in this description any special behaviors, clients, servers, and partitions. This should be brief enough that it is clear what the test does in three to four sentences. 
 
-## Contributing
-State if you are open to contributions and what your requirements are for accepting them.
+#### Explicit Violation 
+For non-basic tests, described what explicit violation you are looking for in the test (aka the finite witness). Note: For the liveness property of eventually consistency, the time limit is 10 seconds after which point a finite witness of eventual consistency can be shown. If applicable, point out where you expect this violation to be and how it violates the property described. Then describe the expected behavior.
 
-For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
+#### Lamport Diagram 
+Utilize a basic lamport diagram that shows the test. It does not have to exactly mirror the test (eg you have 100 clients, you don't need to show all 100 clients), just a diagram that gets the point across for the test. This, again, can be very basic. Ensure that the happens-before relation is accurately modeled. When showing partitions, outline the details of the partition above the symbol in the diagram. An example is shown below:
+![Example Lamport Diagram](./lamport-example.png)
+Note: You can use a hand drawing, google drawings, or whatever floats your boat as long as its readable. I used a software product called LucidChart here.
 
-You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
+#### Code Comments and Assertions
+Assertions are critical to show what behavior the system expects. Ensure that the behavior is not tailored to your specific implementation but just the described properties. Code comments are not required but should be used to help guide the test (just as seen above).
 
-## Authors and acknowledgment
-Show your appreciation to those who have contributed to the project.
+### Pull Request
+#### Tests
+In order to make the collaborative environment work, a pull request system will be utilized. First, create a test (or many) and add the appropriate documents and adjustments for each test directory: 
+- README.md (w/ Test Description, Explicit Violation, and Lamport Diagram as image)
+- test_name.py (with proper test)
+- adding test to category 
+Add to the PR request the header: "ADD TEST(s)" before adding the proper title. Example:
+[Example Merge Request](https://git.ucsc.edu/awaghili/cse138-assignment-3-test-suite/-/merge_requests/1)
 
-## License
-For open source projects, say how it is licensed.
+#### Other changes
+If you want to suggest a change other then a test, please create a PR (or an issue beforehand if you aren't sure it'll be an improvement) that describe what was wrong (or could be improved), why you're suggested fix will work better, and any effect on existing tests it will have. 
 
-## Project status
-If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
diff --git a/__main__.py b/__main__.py
new file mode 100755
index 0000000..e73bccf
--- /dev/null
+++ b/__main__.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python3
+
+import os 
+import sys
+import json
+import subprocess
+import time
+import datetime
+import requests
+from typing import List, Dict, Any, Optional
+import argparse
+from multiprocessing.pool import ThreadPool
+
+from .utils.containers import ContainerBuilder, ClusterConductor, CONTAINER_ENGINE
+from .utils.testcase import TestCase
+from .utils.util import global_logger, log, Logger
+import logging
+
+# TODO: for parallel test runs, use generated group id
+CONTAINER_IMAGE_ID = "kvstore-hw3-test"
+TEST_GROUP_ID = "hw3"
+
+class TestRunner:
+    def __init__(self, project_dir: str, debug_output_dir: str):
+        self.project_dir = project_dir
+        self.debug_output_dir = debug_output_dir
+        # builder to build container image
+        self.builder = ContainerBuilder(
+            project_dir=project_dir, image_id=CONTAINER_IMAGE_ID
+        )
+        # network manager to mess with container networking
+        self.conductor = ClusterConductor(
+            group_id=TEST_GROUP_ID,
+            base_image=CONTAINER_IMAGE_ID,
+            external_port_base=9000,
+            log=global_logger()
+        )
+
+    def prepare_environment(self, build: bool = True) -> None:
+        log("\n-- prepare_environment --")
+        # build the container image
+        if build:
+            self.builder.build_image(log=global_logger())
+        else:
+            log("Skipping build")
+
+        # aggressively clean up anything kvs-related
+        # NOTE: this disallows parallel run processes, so turn it off for that
+        self.conductor.cleanup_hanging(group_only=False)
+
+    def cleanup_environment(self) -> None:
+        log("\n-- cleanup_environment --")
+        # destroy the cluster
+        self.conductor.destroy_cluster()
+        # aggressively clean up anything kvs-related
+        self.conductor.cleanup_hanging(group_only=True)
+
+
+timestamp = datetime.datetime.now().strftime("test_results/%Y_%m_%d_%H:%M:%S")
+DEBUG_OUTPUT_DIR = os.path.join(os.getcwd(), timestamp)
+os.makedirs(DEBUG_OUTPUT_DIR, exist_ok=True)
+log(f"Debug output will be saved in: {DEBUG_OUTPUT_DIR}")
+
+
+def create_test_dir(base_dir: str, test_set: str, test_name: str) -> str:
+    test_set_dir = os.path.join(base_dir, test_set)
+    os.makedirs(test_set_dir, exist_ok=True)
+    test_dir = os.path.join(test_set_dir, test_name)
+    os.makedirs(test_dir, exist_ok=True)
+    return test_dir
+
+
+"""
+TEST SET: this list the test cases to run
+add more tests by appending to this list
+"""
+from .tests.hello import hello_cluster
+from .tests.basic.basic import BASIC_TESTS
+
+TEST_SET = []
+TEST_SET.append(TestCase("hello_cluster", hello_cluster))
+TEST_SET.extend(BASIC_TESTS)
+
+# set to True to stop at the first failing test
+FAIL_FAST = True
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--no-build",
+        action="store_false",
+        dest="build",
+        help="skip building the container image",
+    )
+    parser.add_argument(
+        "--run-all",
+        action="store_true",
+        help="run all tests instead of stopping at first failure. Note: this is currently broken due to issues with cleanup code",
+    )
+    parser.add_argument("--num-threads", type=int, default=1, help="number of threads to run tests in")
+    parser.add_argument("--port-offset", type=int, default=1000, help="port offset for each test")
+    parser.add_argument("filter", nargs="?", help="filter tests by name")
+    args = parser.parse_args()
+
+    project_dir = os.getcwd()
+    runner = TestRunner(project_dir=project_dir, debug_output_dir=DEBUG_OUTPUT_DIR)
+    runner.prepare_environment(build=args.build)
+
+    if args.filter is not None:
+        test_filter = args.filter
+        log(f"filtering tests by: {test_filter}")
+        global TEST_SET
+        TEST_SET = [t for t in TEST_SET if test_filter in t.name]
+
+    if args.run_all:
+        global FAIL_FAST
+        FAIL_FAST = False
+
+    log("\n== RUNNING TESTS ==")
+    run_tests = []
+    def run_test(test: TestCase, gid: str, port_offset: int):
+        log(f"\n== TEST: [{test.name}] ==\n")
+        test_set_name = test.name.lower().split('_')[0]
+        test_dir = create_test_dir(DEBUG_OUTPUT_DIR, test_set_name, test.name)
+        log_file_path = os.path.join(test_dir, f"{test.name}.log")
+
+        with open(log_file_path, "w") as log_file:
+            log_file.write(f"Logs for test {test.name}\n")
+
+            logger = Logger(files=(log_file, sys.stderr))
+            conductor = ClusterConductor(
+                group_id=gid,
+                base_image=CONTAINER_IMAGE_ID,
+                external_port_base=9000 + port_offset,
+                log=logger
+            )
+            score, reason = test.execute(conductor, test_dir, log=logger)
+
+            # Save logs or any other output to test_dir
+            run_tests.append(test)
+            logger("\n")
+            if score:
+                logger(f"✓ PASSED {test.name}")
+            else:
+                logger(f"✗ FAILED {test.name}: {reason}")
+            return score
+
+    if args.num_threads == 1:
+        print("Running tests sequentially")
+        for test in TEST_SET:
+            if not run_test(test, gid="0", port_offset=0):
+                if not args.run_all:
+                    print("--run-all not set, stopping at first failure")
+                    break
+    else:
+        print("Running tests in a threadpool ({args.num_threads} threads)")
+        pool = ThreadPool(processes=args.num_threads)
+        pool.map(lambda a: run_test(a[1], gid=f"{a[0]}", port_offset=a[0]*args.port_offset), enumerate(TEST_SET))
+
+    summary_log = os.path.join(DEBUG_OUTPUT_DIR, "summary.log")
+    with open(summary_log, "w") as log_file:
+        logger = Logger(files=(log_file, sys.stderr))
+        logger("\n== TEST SUMMARY ==\n")
+        for test in run_tests:
+            logger(f"  - {test.name}: {'✓' if test.score else '✗'}\n")
+
+    runner.cleanup_environment()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/lamport-example.png b/lamport-example.png
new file mode 100644
index 0000000000000000000000000000000000000000..44ab81b64ac03684fcc78edcd85dba5c433601b5
GIT binary patch
literal 28246
zcmeGEc{tQv_y>-+s2&MVRLWXUN)cLwF_t_frHm9~>{KHAlAW32kxG^lim^nDWh~jZ
zAxVmC3E9^%wy_PyFvIsftLgLk`}e!9@Avw2UC;G+zt1`Mx$oDx&pG$@p69n9n%sK@
z_io#^jr;nwE4R08+wHn-+b)~Gb^>pbYCl4O{~f(|?Y8!|Z9XTrZF~IRwry*`tH%@D
zwz;3(wr$E{+cu@Q+qMZfBg=J_fri9eH}y0&H#cYJ=Erdpb@dHp71d#{-@*f*o8Gfk
zQrC)qpX%u5la-UdPA0dtcig;fys|><>mLOEMMg$mxpD=darp3ILqo%&qN4Tn^{J_;
z)YR0UKYuPQEjc<me);m{$&)8CGBWNyPj22eih38XZ+!1X*qedDVLju!1;u3}W7xpp
zP#<`Z@8hSVW7yDF(Z%H$FaN;g)J#O+i{O{9?3_LOLr>=d(`>(cL-WeEXS+nM0S}zc
z*9_gZZId}p{cpQhs+tGza?hjd+8TQ%cL?qH>-6*VRMBnQ?0v6aQP=b8!S_GBJYrOs
zZK-j^e*Zx|`xaGPUsW}6bF<j)-x!Wt4`AYMVU;a0n5@DxZ;vZU_BVXz1nB+0=-*=)
z#Mhcas2RFj>U{1nlGx&da;TP>3E@aNUNdpcSq-y+?QlkAHNtc82G0gxSv(RyEW$aP
z;i+<X*3!A@6Y?LR*I6aLzV;?Osb4iRwgOy@dANQs<t2lFQwa`hIJQT`H%oAXG&qxY
z-#Zpp;!tgiW|Ep-DnA=kL4Z3IPG8W(z(3Z_<$w;3@8G<u8PN7>Xkd0vka~aE#>UQ+
zk>=5%Dts3&-^(y6ZA`xzy0Oz)DXS!BDFiZmWKA9D53pBS(vdzZGnb86*;vf68+Cog
zU}UqTLbyX8!zJP97}J$t-d92~D}T;pG6maKU$ea{m<Tn$A6^@CjVZp9LRW~VxA#zm
zu&GQ{tGlP{Ij3C=&Nh5PFg5A{(*%fsh=1F1#X}(tCIeO=^AkS47^&qKD^D3SAW#IN
zhiCNcjfAG$iqfebZEfjebmZhJKYG7QkdSVVt25%|-1$j{3NRr2dUY1*d1l4>FY{Hn
zSzAm%MNm})1AQmCK@nl$Vd?%KI*(0Hn@`}jF?oW_g_VuU4|Z2Zf)7k`GT%DRg{?b|
z8}_+dPCkuh5}EU;YVr6o`kw+p1@io%_Z0^7)g-h`_<D><=FP_A45TSd)*Wgq=Ivc?
zqcxe#RX9v(%dH5wbJOQYYAiW=Ryv%j3Bc;aS3Q|;Ut|K0AWY8+`jSet45HUBF=x>&
zC@_ND80YC(Vd{YsJf7H=n}2FbY`tgMkqJ(z{g9R`F9IdQ<OLhDVsepEVkY8pBsquC
zt8efj{7T*erXY^|&AK(xoC%bm*^>SZ*)_SDC)cbQta0ZU_g-xD)X43HZ!R#W&W*v0
zsDlxn8-?%9dikHx)C<skgn~e}aDWLJ+x_HQ2n-xF%YaUB4E!I_fr2Lvd+epKSUBZd
zgu_!Pxb8u*#T7$Y=6t)QXa9b%82>ps?1}AB#$FlppH|?DFDkzwD8_)VMng#9N3>N}
zfq9`ya4BOf(Q|revD|DTShX;eu12jxpKFhOQhyV$!UDHMCK?9{z1_;g5Ju2s6}Gx|
z{;Ik!Yejt}4BWy;OdN;zF=zW!d+wTnthk^;pc~=eWS0wy3d{!&u)Fj7I+K>VwpK5u
z2sypgv3`@1KLyp{Oa%H!WnYd}OpNB>1@`kYs4nbMGF({us_X2+?1fw}Eq0pC?~j7r
zV;&QOp;>lQi&EV2<a@{V($`ddr}dtV$~k@|2E#Vav(t-D?FQHX6J^TO=Us;M%fU@2
za_wi(-VJwY{!9pG!1AeI=52wg-X@dd-2)K0{x$Ps-vpRbNHiwqV7cBGQ~vpfxnhYG
zfe}@_ms!)c5<UKQg&)ozTnTv=$i#aw&p;mT<>%uQds8gIJX6dk{(%|ZykPF(<X@Y%
z5{kSwvz9AR?s-be1Gu=2o>WcCzdi-sUW8HecCwJ0J>@fqPcCEg&Jq?cwa&Sw;=kHQ
z{!OUAxANXF-s#_JPBW4G&E;5aJInJ@n?GWk5EWk!4H63WdT|u-3#&!>NA2*7`_&ZA
z5o&uvQ7TyBU6iVl8NwP~39DY><P_}K5H<0O!B(kx!tEclv^oU@BZ+IsH^pmnCCkBP
zX*xwx^$YK0!)+yC*+|rP!df8jGU2#-iV<gX=A?hPr=Rm@;?Tzqim2ZyO_);JyzX9N
z|5|GQNiU<#%C1J&laW>!<?NI4W>LEFVuqfxVz#(z6b=PR+~mV<&~$&7PrcQ()+Dol
zf~F;?VN;|(+3PJFr<Q9wgNsN~Z<?#=@a!f|Av&)&5#r%0HwlTUcL)Z=Sc}{ZY%k%Q
zKgNo(zWC0mH?nJ_v}$VOn_wyC_viO(oMBwXi>uO`p0)Qk!ZS#-(@P&cumC*_N5S03
zx|+h61#<74%fkDZ@3ttMRWxNYn4<BD+B`^@%l}@hIQ@>OoQ|ru!d2!FeasfcK3tfN
zA;u0F;%`VV`hWhu2J0dC+$C*7RCT7H5RZ>_hP{!`li>lQi)smrm+RZHd-5Ipj4E8|
zf-Q!>Hyp0Ny|NjJZOLi*IUJ0kO+`SAspKTTQ)uiOF%YYBY9?D}FE{#X=b004hmh-k
z-dp(VUX=sR`qP6D-Og-ek8Ebh6mQmT5CL(z@{DRk{I+y+khsk)Cs<U&cf?g`54W^N
zr!O6QR2|GKRzjxIjcOaJ`?X?b#^MWBUn!lYTpq6YswPx<5>~M{Wb`x{TE(?V=%0_E
zlsIC4Z3g@9>Da8lS>(FN?W?@`Np2lVCoWrKi>=JvJfVD=5~sM18VV#eMkc9e10WX9
zH4VmEubC-Ae9jLxecULhu?ukLUBx(oDp$+x-*=CA8TU7UhB)pJf2rNC&;_;E?cx~K
z$X}KU=HQ+>0VvXPb%>N&j{MyLE>6K;*Uze*S}G>_4w5!8Q7xaE5@saa09J{6I^7nW
z7k-W|YIfe83vPIL>2rgj<L|i;e=kYY{^yd`<>dV<=S9Q0G%ZE<TK~188L-=(kNkU4
z_DRgLnh^ZFpDZF@3v2R={G_OBA5R7{q*#Jqqo1Qhui4<luUg4q4x*lHO=v^6yZ^9f
zsF;Dc&25RA=fiQ+sm(gKb2&RP<~I2)By<na-3t@)J!P$+$Hziio2EvF4tmXDbH&8i
z!65O{YQX}ZB9s4C%J6T0o-@<ux2-}GF}tr~<&K(S?a9u`u7a=9Tn9Y-Zm9lJin<(d
z=;DLr2hXjHrhlimbJ$g(DUj>8LEP62K;2JOKpEuSQErc}OX95ZDaMNqjfPXlt1=xk
z&q+1z+Fr1%-J=QD*G_UxzM&5qokKZDe8@ni+|UKJMB+OZl`@uj$9i%z<HC$VPIam2
zVe6T(m#|knVOYNwS202mvUA#DEFF3ES1xi=88dnJRc({4Hz}v=iy)_waFMH-os8kw
zMRiGu@>)q0I-dx^$|b|GZ&V!h8XNCf4%c2a(^n8Fa9#YGv7Eab#d#alb^?veZCJPq
zEX6}f>wN4x$?&X;>9Icg{g1u?OK;8%;a47dQS?1AQG*Tzk~4`0MKRKBLn+Ax*c@Nc
zszk6+v;w5b>0cB8_^bKJw?wQL=K1$j@sa??>VfCR>OwB_*-4|Q?9KZ!**EPq0(M{I
z6P{ZRUy0edatCt~=2`Y;QZ>?|CT{l!!}$lr(o%;iAAT9;8n{`~FX?q_G$KhOp2KtI
zU9`V&UgFfnnsT+g2XT!_jyVCB0j9LEV-=wAC>?+=7cXq3<HMMiK*3-R=_&Ej<(1|J
zNyR-PN0wtP`cvo3OJ*9Y4MHj$v0kPgy{T7tWmo(1-3jvnSH-=&YQ|HpEGkc%7*$-N
zYM|f1V*5gYUm-q^q6erEJgg<!pJcc!*5EExKV2JSqP6_2SUP-eXum_~wfSeoMB5ra
z$7<WP-?Eu0Q~T_CC);O)94R&_-InVaX8dT=Ad>XQRO=pp$3Ai664y%Ae>+n3c#}8!
z`kb)pze2Aq9G%KtB=$_d4+T`Kh|ll$sy9i${M7WP<0+2(Xv8O3wSpqJE3q=Lfyf7(
zBWoxLrF3{eR9u!k(RkW`2iSc#@=QQj#06XlujnsDaT{H!8b7ONsBm{Jl3CN#*|ztg
zqw+QFiCyjn(>miqr1mjyv-6{*SRw3m+BCGeza>P;!S}rGFP*&GptzWx&Rk(Wx%3zd
ztl4M7;pR*ZDp$*VKRh)Os|Lr_{l~cr*AxbOYt}Oy`G3p`2zIPR=7xv#CZKpj4wh%4
z678VP-zHUVLOLMMy;?t0-CB!)6GPkfM5)7x$vM*IxLRcuN~5#4>w4>l0pd{aNc}jm
zzUyvOWykrQq-wzg(Hlp4s!9hra;N6t7-c=j$2mD3Mg^|<{lBt`aeSJ1j;yE)IkOF2
zIRKLxQ5NoXZ*U#eB>Ar9EaJ86e0|2ieM5W*O8qXG)*Zdi!MxWgAST$PD-S){*Y!12
z`=XRY#jlZkeAyRWa7{LEvW%!uwkdE3-<GNg)hMruskgeV$HN)>YeV!5^oC1RWvx`b
zQvf9}X8?{c6E=;5J9N9}8iH_;+*!9wj{u>lJD6X^)NAQ_HgF$WyVu#gXgH=Gxu&Ku
zC?*<%l6WEE$dfBv&ADQm=x^pD7S0R4W9W`sy3K{+(K!p)1AzU(b7S7!#BHH(P!W>1
zvAk1rgjgAKhhbXp?#A{44$`w^W0c*_GKNoK<BWGWoiKBB%)M9kA3mm0<GJUxh}eHi
z3@_&Fe2AMs0)k1V*`uZppS(u(q5!wS#(6sH@43qOXDpj(=wt<QwhC|X@lhfsHm=xQ
z;|ycuAzVWA)zU{8&4~jH%TB#YXW|2&li%EaB_=<^6T6ha;(`okD5<6>(LE>E&vbP^
z^Nsq-EwGqClyywxy5F04CnoKkvRsR$-pATXk09d&_(J1q%9sy}A-)n^@54+?A0B2n
zE=hGk;Zo#$yLFi!`2KS&Td-VlEk=yn-L*|qj!9KOlxLIs-(LkR3`=-qN+xz?SSK<p
zLTQ<v>P)iaH5D*ZfCzI(m(Fo1MJ|{3LFP<4fze%mHFWK=rChSP&v>_+)X^uMqv|s|
zmbt<-%%yXT`%g4_UiZ)C8WUyO)J+;8;2;vua8P7Xbqd4WE&L{%Yd5Jl^r;3L#Pl|d
zOiDa3b%tn>e@mF=s$-6k(Z^%vDZ`SjJnsz{6ORk@lb?Je|9yWwg#R?db13}g*(6x)
zzmmBul>Ufe2^-z#GFQNC^Aut^N$P>5C*R<a2rIRbW7n#g2we5#&b<<wTje7#{?w3Z
zDN94*;yeAI!w?_us|-bfleD$7+N#k~jDb{Vu3(X&W35=@zu!jbFj!nsa9NU)S=+nS
zd{;I5xH;B8<D3)=`SEt8arXg&UDv?}Sbhdrb@+al+=@7J4KInUp7^<OSjfs)<KMfg
zUMufl=y*t*U!sHafBm21YW%`4O!VjLGQ^G@_(}fptKWZ0)=w>WqcK8ihsM7Q3C<6?
z2qj4P8z|sv@6FmLGhC(Ckw;a=UNQ5b@lA~+t?_LiSURcDML3U{N+tPVNK3(8Miqf!
z3|;FP7^oxTx%m!EO_KlXn*XP5;v*yapdMnS!7IyKDR=h~=#7jub0r<-Ty?W*Yqn#0
zjQ#h&4_QlQteae!`HTQhuA`m%bQ6ALVsw6_3Qzp0&4BS_Nux`@Y3$%E1>5hc_s@+B
zSD2nqzF^n<Y)_uu?+P<WCOF0Q1;Z^FgL@xcur|Y$_zK???oY@)c~KaMXD}lmHm>C|
zz4jKAf#&C-J`}<T&WvlI@W6fs_CTB_Z^F@eKy7*%y8cXU)3;MitS?G+vhv1yz$`=X
z*uDdvOB+iCgmk~{-q#pxUd2q;s74CHAg|tEXT1WVL$e@L)0HD{+!*hmD3W@8&D788
z5Op{#KN3}Tm$4bpIBPwJY7XHmDm5i26so1z{EKde#HsLY(r=-UKJp<CDu*kmlA|f`
zG7C|-B#wfso=a6*D__tYOcYCttPk&=zApEX@5}BTg%+^3I<5JPnQ6>wO-wFX0hOw@
zKv`Ofr+AudL`HVsm~4L#iPcP-llb^W>+iU0t&c4=1f*)Tuw=RVwR^dPI&o2i4T2DF
z_vXti#L{~9ykU4<I0u+VKuX>IAN8G251OJm66Mi*$Gt~A&nK!?zw&JKB82Bm+YTrW
zwrdbx65%SM?1bHqRfbuMd8_^Kgqn?4uVz9aD?+4Wg_W-3)e535X(Ve|-uF%U=6+*|
zD@YNGb$Iu5o#KABxf<7xj@swme@(m$zlC3SH_1kRU+fBM`0fYkOeC50VZ9UItRQwg
zi#jU*DrtM|3mIrYRIOQVcWz>g*lM><2DEuPQ%y4Jf*Oy)=Iw6xU*V~|H+6PAXY1Z^
zLE5}AYbSigBYz6$PM9KOMMWl7zcW8|)_4Uvd!pCPcHQ5IT_nJNkd_*sJXifb%?Q-4
z?{BZyTx+|>#%LosvO&U!9Vp;tHk*B|T#1IONFz4t*Dl=9&R2cBq9hvLC9tnjO&4Ty
zfK%`+dgAVh(rniR$?Ak?1gVm8fl&Lfx4B!{&4Rm$33*l&!EiEE3wy?(0z_K=WMt@3
zv~18j*ZvPQ;P;h)-TS1<VVqDY+$gcZ+XU_5mTw4>9qg6Dsq|rz0JS*DCr49@yq|lV
zI+h<cL=kTPwgiMld&%0=+$*&E5P0I5wz$owyyTE5iwsw@#UjNUI8&gT;i(U<*h++o
zLvHj8QR3sFm6vSDk&@*Jd;V6wS*;*2e8!;JwkWPnGvQE+%xdiT^i>^sF}?Zk`3W|{
zObN^6ahE4Yu?-{{v}B%@VM~+kcOi*iMLQ+h*cj{)jMQ>?BZ<TOz81$T`@8b$(M@Fi
zEjyotD>t^3bt;l8bNN;#64#;BE&=Fk9;wb;V!x0CYzvF=Mz1ip_72IPyHXoSr_9bq
z7&Mc0!X$KhWkkLSS@SKZ|8l6_A^ens`!IEToiX_+KTw=|I}>^RBkY9%TEz(*Nf0e`
z31EX|;h)J|Y0u6BQmDM1rt8>$CTEOF<`xtEy!Z7e*04zc1I-!B$Vve&bOFw8@_1GQ
z$xpJQ?R&Dz-E5`218kao12bRL%~#j(F4~b_dT{R=476X)32C7)c7|QqZhx<IGTVJC
z9T~G6$Q3P?VRG{p%n~o<ILg5W$RfPS{+<Hr7RU+9mx2B$FD-4)U-k$0W%tY$g&w{T
zlhtd!Od1K$!oCcV2&j^W;IHtpu_RS-vA`Vq8h*z74b=4d+C6`aF}!K%KJ>uDa0zxr
z3;&>>>tSX9c~}$|+0^ho*K-}_-!_%M+o4)F5XO0zpRweCOWe|6wQahAbS4MTNo+>a
z^mKo^aN!FE?0uzXLlu5SgmTvxyfQHO2R#sbdnPADQsB_UZS6cT?Ua&;9kL@mjyvDR
z0sfChAemED)3oX<M6QPC81-a|w+RqS?Qs0?{N(RrG-c-3KKmPUG5BxoyvsBSOmh#=
zb87q^h{wv=xWd|hhVt`<L3paQ6F!?Ats57mMVNPI>q9z~=7JGT;-cfZ!42v{^!^2|
z2`3RU8(_zwtBYOr5|VrH)R0+g(qzNC@oBQ^O$kEcB0|K(F3DAu5Oe;$Yb(tl<&Mok
zc7!j-`00P0iy7SfwjhPGs|vi3!fZCmLXz%?ZLXa|kQ=^xx_p(;<LRN^*h%UkQIT^N
z<d3fRjz?Ya2Lbd1sh2-@4;mTM;kq>1Z4dQ>j)f;~`ZYf=l=8Q(Mw|3SQ#y@FF&A^o
z%qqnt_c+k`1D1)`7V}{94(UmCdh*CY$EbhbWp7L@8brn#m>{ZisyLzo<*DX_O{*>T
zW{m0G9AsquXZ14s_;r+8Zpnb93uQ-CU@^0YUso=Y_*uUxoD;Ho3uMDZy+3zni@_R+
z=Zg)RzdOb7D(CW~iMYZXf*RC~nb4^wMz>ta&;=2MZs;6Z2Rrquj#`#m;_Lt%4$;Ii
z0>qjr?a|z*c9)dMja?w$o(_*knAbITUBxCzQ!i1$LIcoT`?9_@&_S(mB_8p0%nCQC
ziTwno<C1}lS-@ScD(tQ$A1{)W@P#h4+m}!02}T7TrgPzbVzPtJZW)M1oHq|6jE*_8
zpa#y^JmsU%T-WieLc9|C!DZHzFi`lkuY)ZV0=IGKvb-q7LNy+Ty)X6Q;M|7{Ll8;B
zuBc1o6tF}LSuh75r9Qy^@Qgc|D-p~R@gD<-N7N&i>>^3WT^bxTkyg`@xaH-a5+PPf
zxXpEA>{xKefSQNrE`jUEX{H{xpB|*la9|}1`EcM)qsCtzemkOe&e6n~*9Fa`MuEhL
znaJ*uwF$%FjfC?&=wqy5@RbMNnz+lBh%tX$&UN?_{=#~7{ZHMpSYR6l0S1%$PKl_<
z>?V?2(~4VuYS+o>1n-_@>rUS9M^L38K<nvC=X@I^8%bJjCQZhKPnNr?j#pDChNPW!
z!DGteuz<&G-ADy)(OkJ00ZD6k39h}}fYe@J=X;4uM?F21*=znbNMdwOWAvsuWN%af
znKJiN0(YXI>X1dHQtllgmX3|(sagZkVw|S=zyq=N;F6d+i5#_g^4WNJ)b!9r<MTKS
zIf~bq_<%fZM|jz!Ld&gCPjiN)1zd@=SAd9t!}I1jb51?*gX!VSj!(;^Y$eX%oBT^#
zSEdzbeeKr|4xJUZIV+qHKPYXB^Z2!r^(1~}9D@C%#wYhJ*u5MbA9BN1LJ;2+-|z$>
zZj&e2If=*aD&mh|b~?pvSC|=3C}=M>KeCd7DZ2xj59i>Pe_E6o54M%yU=4JKDG<91
zenI~3i33ON+`QImfy;vyT@YUKb*R>wNn;L#iySKAf6OGl>I=hcJnnJ1>Fb7a9TMXd
zoKZmQ<MVDl0kf!VT2g*BixiE9Gsom{Pd~yaHLDOq){>Z1XKL@d3!4`zcuMZspayJ#
zF+5qLitm~2Q@nP1us5OW8oewDc!UmRjTY)KCl*#kw@Cvpt}|bJI0CiQm7f)Gc*~sl
zn!*`q{lj3^BN#0NJR_OUil6g3i)zpB;%*)2%Me}#&J9h$Lm~MWuQDkLyTCA?rxbrR
zx7j$&>hX|)6+F&shdCwGctH3$lZuT6>`a4)9#j0J_m&3@>fd|3{>dcD%@fk$680~X
zsGRZ{TL}<z6<R;%?adm!1rBa!c9-S`y1#B?J@QPWJYXU?XVwIG%Yy<|qwqGR9?1Of
zxv+*Op5it`mN0af+h4q#qJ(~3&^&N1SMJN|vUW%MREm3{J!z+s+`Jrmc}lUXPqth^
zhf{Qv*^xRQy7I)^HO32Np6R8R9WE=lb2V?6<lSLG8r`MlO9p2AH<xGAg}nf<5`ycd
z6xZjG7B86eRnhyiM#(k40l`&Z$vp$Wy8dUGEns!;U4Zs}KIJh6{p5Wi8r>exUt9Du
z#yyim$Bj%WI#eH4_~!%+$LRK%+s+hx=1v4xOwt15)xSKz$i4rbwDI!P<EaEzBd=Bx
z9oCNT;fxzfV6ghP4;2YM<r<X%5$f60dX$_8w?*}4jsB-IAI_LD;xN?G9ogI5FDv<K
zNR~13=mm6hPT29tp)h7I!A>3gBPVy^433gYx<F6*@*MYu<hzf+b;~%Um;*s*NOsuq
zcNW78KJN#+yA8hTaJkQK!W_4f2Mqf@Ae$L~l(yF{Az)Kkqc#%PX#0BNYo3pPe}_vU
z{|DNpdSp$-`$P2UR!9VGm$znHo|b7yV+>C6tyPjnho#&C_W+?{>h3NFp_h~G7h<*s
z^jl_}*&5JWE146<U*y4@05J8pX19R7`&N{~p#9h~d+F}hRF5sAI7~z|!RoYqy1lk%
z{toyRw7!kO*QdN<U<KUf$Z%ae2YnCivqK$ar(M04Au`AQ0%4RJ<~F&&K0-FkfQ{se
ze7Sp>QkX{oZOiR^YIPg-rm2yV%oO%;^t*yBtj?m19LWF}EdD!6a^mH;koc6x)4q{x
z&l=o0>=V@C5}PZ+Vpu8^z)oBCP7;emYI`%=2&fS-KroYS0OkQZzhR>&4?!a~kdzXD
zQ^UHx!d#Dqn^MkbE*p;`gF1L&7k6P64_U|RG`e4J>BnQsixa2VQRALo=4Yo+WC0wh
z%frMFTZRr<z@$HBhq=Ma+`_N1rnPOG?5@fh?c<(v^z>tUZD^@m+~0TrIUZB);B<n;
zMdK4S0V{CaW*^>Z{VfZB?3eUBrRJ-6q%3^i%)TsgODfm<m{Z19?*b~YIs?PwRvavl
zqc8LxvJ&F%+5lydw{MY80k*kXb*0E5?uPn)mM)&j1&%=SbRODVxtyuuk1f56)8i3Y
zdXUNUJGKZ^v=<=69F@%R6U?n+0o&+z%s?))PmYMG%{~;fpRJ4OKF`wTu$+f1tY$->
zf=&Cfwx0WW5H6}eS!&%^LC$u4>bWJ5uR(8k2KR0p1-@B@VkwHZXF*Tmk$t<nC>svB
z2BrHSEs1T3HFuS=q4V8dH36UI>7g?5Df}MoP~2X-cVekn4Q2-Nf6pQmbbe}C)&S?R
zyUQ9Qw7}d-QZHr{dlq#a3~-cn?0fkw9ID&bH%N6;f&m*2q=&z@xU^OPDuFBjy*dvL
zD^_QzforB`g=(0G%RseS|MK&#?%))FOvjXBk2_YeXZIHQS8)T~Yl=M`R*1%a#kehr
z#I#C!_Q65yq-}fCOH{*(OFO<^VNGYNi-pEv!crWV$JeJkBNv})pqqDA=yR4{b>#Uw
z7M<HVN@5F$+43h@+(G$Ymh`;cTEu$&X{ZoCi^*x5sX^O&$t(m#YQAR6*OMDr+@cGi
zI>D-A>|>;&rn2QVmDu@L>eHT_Y~}oe5uV$aj`7v{Hu<f|FNSLA1Ven;*|#co$<j{w
zqt~JCY<XbQ)>p8j(W8^{ha3po4R)TU1swX><#6bj{R4%lKx2oSTS8q__=DKHpV{((
zolaXYwvsHDg$K3>Ce_wT4%oWkrr73;7w!xtuu*;F0IM^0kf~%J2CfyXE87x7UE-;J
zWyiSWCpO-`1iK3^vXRZNec=ZHWFHX1xIk|SLsx-m4)C90*aJ^G^R7^l)^cW=DSA&&
zxC3Lf0xB5S^(70K4<O-R;;YjYDXv3+pY@+;G_YzX-}Cn|O_pSy8*5OZ$aK!xb%rgn
z@KnF5BQ4h}EA)CLhXd0TcnIrme-Cy6UVh-1X57T;5tdPoMIeLfj<4d^0*VIdM#3jb
zUOwuwCmxW7$SJzYpc)noP#w>|4Dh1}Is2W#&#G|eqyq|oeBqr%IYZKUdGunod{=*E
z$JZ~$0U1oHZF%{lHhs4D#!uCxFfD-)OP8#s?Iz-0Ul?O;ykUK|Zf98*;Xqz#$4PG1
z<5%kA3D)Bb^-+cO_#1c}zs9noch*X(io_Z_yk?n`^&(I#R46P(<O$PYRqV`rp%Xj~
zjl0J(Q741-%Q~Kp+GuWB{X(7HMGGMZ6M34l<+)kbXhpEQ@43pJ^+r9%qG3fNH|E4O
zIhM;gE99%ylkujmS=u42_bZmmhfXZ6zxvpaEibHT7sO=Zw>-KHyC_6~*C&oZh16L6
z$MYPaK^KHq6gIb&^eD1y|IFI*$;|>s<goX`<v!cd*Gw|$`?3%UXUgAh$^gAt%xl$_
zV-7=|vqF7YM)O(pz|~<i@XDO|%Hkq=z{0qTTu-LL17Zz6R4W+oG>9STI8>{WHJDcW
zSgoKbkLTe+%G$gD6a4uL=tcLos7SzaW$U>wIBGgsN-=bl<+UB=k%ikKk9B2uqnPeZ
zzGeK%T34V*lUwHs*B3FS`0AE0<GA{@H}Z1Y%!%!Jq?{6#&$l^LUm~kBU(N`3rh9g_
z#G>#5&(v%*uh5LhRXdn++$S)TJ0P~y&brS2?@y|){C|+py@~(N;gED5jSBewXeD8W
zO9xS9(d#s8AjvuFL-!JQMp>%p|G4dMDT%@|FQy~VBG-f}^LD>KUshR1uD_L0!kzdo
zkmhOx-x_}~&XW9=vUu!&zK@C<KECe$+U~D(WHjT@zhngzjd{N#lh-cP{Ygb%5_cb3
zWsA2`USEBSaWJ_(FTWlO5tGl&fK&WdN%)!e68&aHu=UdI)qe`Q-R$N}PSlt(?lM3^
zTdeXX*{7Ubw?4Y4RGj<s;x67u?3~Wvi)yt5w3@#$wo>N>>V;WB%LmsmrjIUJn+a_$
z1;X)>%SG*x%WcJ7^{9Y<aX&h%*Gnm%jG#6;jizb!cPlRH{4tgJit8cnMT*MioY%q;
z*^qzM63q2f3`HKT!^rTBg<L$&HO*Tjdn4_7c?ZuAJ-)s_rn?LkAZ=pnF;#<RaIJCg
z+ok3bPSKNYopMft)4RVD!{HuJ;aX!V>*Vj9Kqxr73A))eV0jTJB*HL!?1ntas5_?u
z?1RVd>0Zbk%!)kg)Sb8r^IJ0>zSLcq_o!ytPsgTlkXY7N@~+qyrY*salIw*ZxdPvO
zAD9?D8*=ckZl10sj!qG~y7UmyvlB<=GLe1~llL|_9Ymq+*PXi<S2}QwwC<A~?Iz3J
z+{k)3ywo%jV?5hMGBUlRVoLemAzK0jTqSLg=HS-x|G}wz^s=AP%;)>4dwUa!CXR1{
zV(Rs=2XZyQhBJp8C_)9{eu!fK|G}bZsM~GDdyz++Jiaupx|RD3)YvV5Xl-sVH0$o1
z{U7u*!)0Ki%0f=F8|WPE#!l{4*dX$PF?^$NEvRHODkKX%2<b_EJ(RPh*zh_+-<ops
z_YvF2nWSJd(UYlz{bry+u$1*iks8D(?T^lu)?-;-P2Gg%25(b==KE{6P_C*C6*r}I
z|G?%Jh@=8)?D-}+<^uh=hDM?0f?F0E$?fPpodbKpn$oxb=>2<N!RK=MxeA%njh+b6
z=&rOVNz~GT|Kg(}-8;uB{QQlD{?Hgn`Pv&U3b+zuP_;p|kq8WYx}S5cSL0_lD8-O;
zwH=x+UOLT)tLQz|w|(lTVC~XmgoU<7U0)!YLvN&Ei5|0VQZ`kwICA&yhY4P$F>KbP
z6%6}e)okwbj|S9gb(Pq_BIfmV7pHMI|Jx7bb6*D$O}PffPyqvnHv6FuL;sYzZ=ZM2
zkFNLLB$?S@znm3>7FVyIK+eupnLf?6#nLQSXqI=hROYTf4jTQ~VP8`YQuo`3RhJ2`
zH#a>udJC2vkRjGZ(vlKRsr6)nd3B<PUb8)B=F`1K1>B#)J$zd7Z}1PXnw8asJeZBe
zz=e>I4{8rcmw$KKp_{;CsJB(BRfOIfCHjzs?)Kaueocp4e;{9e;k@bG(@NzxvpY?T
zYmJAAs$YYEqX>vrQ}T;kwOSHnW)=$7|JdoaK>n?9;AIyVw{lL*72kC;%J_|suF&wD
zX0eXqY*)z>1j?^**Pnj~DJ^gi<}I_)+}Jxax<2W<re9t>gY{mE7jao9H4zhaWc+59
zNz?0-F0uH({h`(KBeGKHaZ)U$yr^F|inYyw965yLF|z)wabA_}A}Tmk@BPIxmc|EL
zfiN|UWw<FSgR6~yHRIFq20K~iP6Y4tBSWx2%rA%>tK&s<dv7C`t;A8PO|)QP;`eFT
z`1AU9Nd<{3ST%J`ty;lsTO4_&5O465LXGf-?J4-VCZaRw6pZ*@wkZAElBCo6D%29_
z6xPE@GrpA5^_tt;HIr3DBvHP_Jk#SFYcF~@X|Y}9-|0xbkiEA-{RM8Ytezu0YnWp5
z6CN}}ocBT21Fs_p)iJ3<X@+_c4U-Ax!5VlDS~OhrhgQ@4;;R{N5j_Zztb#T}T*A{$
zS<xSoP4jPU6@r2O6d-&r5`EfPlWGDgrvudlUkhB#zxIlKRTZsvq)@s@PIrKrH-T6u
z!0ZCE8)U;Pvtt*BYyo+DdE}%UNeAQKlz3;EVv>{66+B5U@zjHmNOwpn2jT}SMgd6d
zYn4oa=v)gWS(OfmP!s`{{-moTg(p#ztK+QN=pF9^!1h`G^kph#zrfczwT0LIE0=qB
zt_E&$h<xM)LdkwKXMq=<g~Vltd#icJzkws%z3^DOuAM_&`4aUyhkUP^lSYSnML&s=
zB_^)@5&9RkSILvlMYfP-;={YWR&>kFaV}M2gtHrdrzjibLy40!4$&f}agN!@82jqU
z<C}vnk85rB^F8{ok$06Ap)ORRuKuWck?Ys2#yyLpX5j?YBRZWDmWvNav2*d2I-LmM
z7X3cGMkp~;&6A9+^s5wuhD0PC52l8}duTc*st4rc`~whB<&MoN)xwo|!s+nF!A&E?
z?czoPrf>9}m|q-zm8&{ta~Tek{x=}(k5q*6DTC%!Uw!Fro|~5VxVzgi%~1V?;-1rz
zZ+<QkO3cM6ZxKfaC0r4-k$>haydn&~k96-63n`Ez?HOpO$C$~_c4d%PaChgdNKd}!
zG<Y|)Z2AQ$q7NNY*ZfIin3gaRwU?PDt>&*?TDZPD+?c2(Y7j-ZH5oCP)9|ju<RuLt
zaeSU5Oa$dZL#O5{*6*N<CH$0f6~4j*Bs+@hu79+lxj6V1I@|URk4Dy=aMI}&kahlw
z0EDxIP4`&73)oFph>ixsc+5Lh2csPIvm28Au?1^n7cAFYI<m_&rT{bs=unv4nL}1~
zFR3n?SH#8IE=2_?(`|XFE{CKS)?lmIfEXl)&?53bL4O5z{3AZ`ZGUiw6guoOUO1FC
z<W}Dil1zeycb`5et8P!4SV0N#VJHSkO6bt+b@G?p;D0pst=34k;p_Qbn#ojuO7uXS
zrHS&u#eVl<tV`9(r|GqJ=w`uS8L~^m3KP#U1h6D{7|830MA7NnK7DAptFXI$#kvR^
zqk+hVlQ2N7wN{<qOeT`-*xqtIU&2f*22kgRHn6%5m-P4#RQIet@HVxcIYcU6&9hID
z05yST=czUms)$(SF@>n;PI>1a09bO0K*kBQmv-JH`H`pZ`M34hS%ymx$mJV5d9iAF
zYvj)#^KqsEsiC+oP29Rti^MGTka-Fm2jX<)dn1LzIRCOYQmRvHJQUTufy*vcAXHUd
zQ*uhIx6;Ih=4ZPu^4_mb)NlE+nGFKq_z+}y5DH|Rco@ptyod|0d}r<z-SxdT`SJkN
zRL|rF-pVK)NR$8EH5Xpth%r<X$Bky>DW@A#ho@S8RDztRZ<|!t#eq|*sYC1D<i$1e
zIYTj*#>AVHWs>W8GgXd93)Y14wS-XfuLX&`%D^7{T!8XS#4mgS1TR!XC-wk2mD-22
zbF{<moplf-{miAj-$mZi8v|XBOhK}B#@?cX#Rs(}yW4}-78<FeRL~>I_I<KOn!TK&
z>%c9K&w@hC_6~-aE9=M$SuYf}YJ5Z<kVLJDPkmqS1irs`p%4d!V<UmZSvX5Y`oCa~
zaMEl#a>b$VqoL8t`0_nnrAWi!ld;6Q*_N^4R6UTIL*ef>lFwlIkhWFzIHt5^^bK|N
zd9+z#hs)H$>!FW8;EDfJqnEsp0WSixNggM7P4hIY;&$$zUFeIv6wtKre`zy@b@~6_
zw3%7_O7mJwAlCx^!Z{%4dB`df!$_#y*LPDb70M~1yx7qu7QF_<uQ@%2jg8j(Pfsqd
zQEHM}<vark8-{ag!1pu?Pdlckb7-lTz|MYqy{5XMd_=FY>A@QTyZdPw;|#04)bMI+
zY}|nVrjK)x?nNOrRiKc@xc0Wm8*QqI5-POpxE_<5Ste*gobv_qMgSi>5udsn!9+CQ
z(^6`!Z`&(vW%lY{IJ|_&iao(6H!+^q8IoUTo_2{kUbDn|yAkvvhBB9ZR6TH(=Z2Lo
zEtW5=?V*1u!**OCsT&~(&@2hoHxZGr#Z$x1N6VxIk~(O_fQmVKx=CHq*ZlE=tJEqx
z{xaIc&gnBd3a5ddsTE$&fLf$ZE!lB;<|^H;5DPT<%Urqu)K)yz=X0iKn_x|{m$J;J
z?Mt^cy`&C!n%MKake+N?=>Qs#rjI>-Wg1e;{fhK~4eg7gr<jIh!H0V7=lS%g6@1`b
zhV*n7_=oJJUK9I*pgDc&d$wF((p`cPE~_~6!E@>?iJe)aCO(z)_2Y^YNHP_f+#z>?
zI>y(fk~TdAmz}s`Qnjr~z2gkEg0iP~q7zG9+S{Ob_j2#;J+E@zb3y@P(bs`Azbu`r
zN6q9Ng&ZLc^1zo*TWP`ayX)-<{sv8a)bcM@b}(hu3+RaI<rF-JUh6DsA3wi~N<8z2
zj;{wgj<fU2R-(}Ms(a$f46KAt`R`gKliWNd2w{-Mi6jrar{G@ywX^%5Mt$|sEl#87
z8J|1Ww<ny)d~h9H^W`v92~W?w^6M|6RKRKPhF+aT7Xm3RIXzmww&s+{;|C|BhCZG{
zFU6YDaFlq~7F{Rbu8QwI_naf{a~@kpu^v<pa4qpDK>Hr7{ti$GFym!?qX^Jfr0vGT
zmaw3kdD8o)OT(&Z7=~TItKhpo-r_6{wPa5*O7tKAwvX{G1z2AEP<qPv=V%{QbbN!%
zrRjAqpecd>QSesAv9NZyJwb%<HpzHjmL&#0LQl5F*6p%!_tI)rdD;;`ts@8?q-C?)
zUv2{c{Bb$J@+>;~H*h9b_mMEUKw8Gl<VnKH%-fZfcZX~LR~{0lxPvM!9@yJYJMup#
z+gHS#R%{A?WKYXRB0v12#%#rYN({-5Z$c<9mH6KT*W?&9Yj1jN$}Tm=Ope@*Yib)%
z<-Z4myae#I|J6)d#C?grHv^mp6|Prfz#H)&WmBnZfbl>1HBZ=3wgGCGX5s(CpFmA2
z0;Z4Q@V?+cCG*=00F)1%5}%(tV_huWATLRtSjC|i_Z0HaIAWKgv-DwAZx#Va+G&%C
z0tFr<5*D9lb4ztLE#AUF3RR+7SKSgb@tM`f47id>U6zK`UVc}=HIC3`hlPmWPKd%A
z09H4B*KfZ3Grw5@1EqdG!c(%eM<9umOyjl^tg=oVP9t=L0h#dlEM(k<>4~Y&YO+d{
zLhJg%l9n%rqY)*>lGgr5o_GX^P}|jnt|xx{o#xroYkhZLV|C*&G^Ft_Km<MP%i@z1
zb6}mBO(DZPUES5LMf@|jurEEx?IYDZ-aQ3w^nsW*XH<yqG6(n$2vd_2u_+{w1O%?B
z0QOagY>fDQh_}#`wDue(lPi+(UBT5hG~9?Xh%DrAyhlKPIx@ukxa0cru<ZT&pMWtv
zYi<*FBSZ$~28jSOpTqly^AH=xlg^xl40bBwIyQb-oGqOtI{DVMe@od+injnhVHK~a
zC_t1^!~`zq2+l+3m?<>K==Zo53~JBK__RI|s~5>Ymg%Qy5b4ozDy+L6`ShCi3wXVc
zx2Ttth^aF+2^dO}7*E<x6He-l?!tWI&>GtFc!Gt1l!QXLdHhhB<NB@<UV8hBnYWFL
zgH^1AE7%xRM&e8`;(d5IkO4Ys8ZrN%g$pB{2<e360S?k2f}-dd;6ukqxp5<^LMdLk
zRJpe}EQJQg%+X1eu7875X&54>`g6ilOp(Mn&y70<fIjO%L<!g<V7X&-{qxM{uQvd4
zh+QR!zGy%Q0EiEs9`F)n=Z52)pXu5j*0<E$S>Ki;v+k?na|2uxrU!ZO$QQF3^R;7H
zTC@8fdN0*CjzvevuEGw1fPQ2me`q^--)Qb99m(Rk@}9;@kNi}2c#1SF^_h1sDiW+x
zQ3-S8Aqrb@U82EVln&%_Fq*&ZB!1U5eDCX%HK-a+-myu;0V)NV>jILWwA5oBP^m6u
zs@J-b^~I5fW0vJQkoHQBw?IirTqwtkCGVJ{k8)e7q9zq<nhhW)E7C2>x}9~OT&54<
z{K`m4M(-6kxKx(@Nq}}bxSJF)lC!4;56VcEVv~0$&}enG{Q{DB)U31h<=hsHUClH_
zL?8bRWW?Sg-T`UmW@uf=UKnqoJAG}lM<V~TW8{nr+ME0VS;F#*uG2(K1ya6g8DcxA
z$6_^D1&m7$=%g(Oy0-sK0#E!9J<o}(4xKfae&VXDE=A`-J+q{z&>3b*<w9pVWN&j>
zdeTF>JZZW>V)-E>HG57Gn;hUl-zo*6#tuCy|K$K@<UPI8^vBDW;^<rcOun%LP+Jq`
zNl!CFL@=K<b}8RqTBXkg7sSxzMdf_Hd{x$vcd3-#kQmDV0(Ueh_@%3E%>o??^FnHF
zl?p@+#L4^H#4EZ)**y$85}Y!Uijp*x^|4F|A8GL(SzdQae@v$x8^>6cpOPkepDtgG
zq$}4SdFShMX}~h3+>qT#Js_vac|JT0u|cl^PRr7VZc!l-5rr;5<G==t1h#M+H}4Vc
z%K-f7$l7ujID^%X3lVLA7m@hf{m-m)`+p-;QlD}t96KkQ^RBuuwBn(bhi=Vc=i~G}
z?|Mg$*Bn(khaSn6H`vZ>)&rJV^nAIRCg+-jD!M&_F}s}O7mZ!eX1*nD57;Y!+gIAo
z3xFdbKOdpX70y|ns)(KkdY(E7^8;ce#$Mkhswz)Ty#6D9?I4NX<J0VuRN)0_6^XLE
zPEF&}qpO9pg#aBH$={#-0}}q)cViCFo+lg#e^0$Iu9S5E67Y)j3hHl8pZz{CK!M)=
z2XUsBrmfa!?CPz%-ufiH_BF6g#BBrCfy8-z%+JeLE7lzh_@rv0>x7^rXj~N_;mHNQ
zLLg#ZFD824=#Xs9Aq_$3%A*Mgu<iIx*C+vL1D8yL()6U!!58ZM_L~1I>pcn^K9&+A
zV#X`y9MI&ZhMt*hH3iBRzW4|1q0U)BWzt7ieThQ|>jlFC)dAo~zyghOL;`tiiGZIP
zyW+tOX%QCp{D5k~;&woW)thJgfpB{ADb@Rh#sUY^#ee_hR4X}X>(<`{Xb_{m@cmNr
zUfv`428CR`fckM8ShtHDuug7ohIFZ37-Tp(z)|_Wo=nX>*K=opEdApUj&L9ukBo2x
zj)ZUKgyZjqJ{BrdTWB?&SD;?s(w8)Lm2--!`__&?<K(EGx<q*#4?qMk#d;1OD(U&~
zff^*>>zsbVbHJ&8@>0O092lT*omNPC|0TfA!HV>x<D6-2Te);inohyWTiZf;PI#5>
zeyZ=MBv;Zy_t1R+UY;r6FSO$n9V@NP?6a+PDLhUqA=OUl(AY(l?`}Ymh#jmBb=fVe
z=^V`?KKm~CrH6ct(ofzj*DP*ekJAchzr-Jsk^HM-=o*T@aLoZ`-St%#N}EaU3N_BB
zlr$gFMu?bx;-RV@(oaolo8c)|k|jOP!r36I!`^-n1#I}M73q&TxIMVDJd1D&Yrxg`
z+StK}CbjAL@^DgwE}z_QRrEme75L!yY#O^zYAgqW<M8`-ic`>DsB^-V6bi2%)d5N+
z#AtM{rv|@V8|*)(9@u2rkKSvoIODVd>?jvnX{=Hfu-l2jFJC9A12x1HL}%y)Wm)($
zV+RvzK>^Zn8aSrsea{4Oa7)%s4RVLbj8`{i%LDcmP(ur#<L;xoom245Q=pOn4IDZ|
zTJjsz#e3w0df?|gAEBUv&)M>DT5o8d>isty+|b%7?qc^Q#1E&Ug*P&iv;imTf?t-G
zQ3e0s3H!VZPpW1FBK`&(K%h(*>m01!o6C!!MJDa3yJDHjeN9VVLzjjDWQ3K~uW?j>
z$VdV{+Hjt$>7$@P43w-zU5NQTduZ$p&ySRRJ>nW4>q$i^P~|0{3e=qrRAxVQ@0m8|
z^^j{Z?3Mi+K~skrJ4!^GdhPTR0H{9``xfUU0R<`x*$Wh1yTQB(=$Pkp8w8-bGixR}
zpiZ~=DUFf%TEXS!HA6xnWyiuE{Ihi7Uq2@_c8S}}cjcMnBnR)HMw-+efYlNuB=4Yo
zTt%&M8#)VIMPWoLV}SEWd*h4>0$we!y*e5S?D3X6xqGyz4y6*cuuA-qk=Hky&s4sE
zukom5Zh>F41P*=xHPD<+S|8!dmMX0(vXZ46Rw$~=34KJpGZmxTdjWErJ^e?hwTAGB
zt(G+46026bza(#$X1acvm4&ZqTL#dZ@%c94zOm4V7dt`wa)w$QCb-ab`Ko{z*?Rit
z8mqhdJ-F|;30ntBmb7Nq7JCm4cJ9QO4VJe{DYs%>?@4TNWHX=*oN$DYy=!&B&nWG=
zeyX&OfoQJ&>j6WcZ8L>D))2DTc9Dfrk_LcVW^)Mp4AI$Q=ZzM15dJlCECNHH@WmqW
zA?w)S>r9|_?#U?;GZi`$mP7mekCxitnNfUyLAW`b{nb9t27z^PmaIKpLK}CTwE_+?
zR-iCfjofjOHBoC7LB!PU;$aiwI#FedO@Gh2NQJ-}?`lNv^ugI(ThJLP7|k><`G6DJ
zwib+9*v_E-e}B^T{Qq$HKOATueEk1lUmzc;THCdZ77x0AUnkTqO)l82^sI1YXwuva
z+L;>Jyp%CEioq*XEuA;6PLC-zzbQiZP^bqi@{wc|LC$-F5dIS3@hq%LZE4NaPdBk?
z-|^ElOZ_0=W9vbbIvTuvVmFpG^ZUcQUzv9{R=QN7!U@eT1%x6s_I`%IcVC7B^#!?P
z+`4(i0c}0VhL&jlPFvZ6?=73E@)tti(CmErDL#n?+_h#8nUe^l(!(TIvycvj0n{D}
z-Q=g;BOmSnpW_i{>et0kZi4B^ncUS=l&SacT*N=nTuNG<*>x1~VHGbTu?Zlds+BwC
zo_OKrEZrlSpdP<Jfe(JY+($mResfs3SM61%PRcJLUVmtP-{YI%=FF<uaLWUP;f2v_
z*VrI~0KcIFRU62Ta$7`F1{SNDx>Uki@$wrVH}qGEPBy(fevDn{js{SMIfzsr9TTl`
zQu%OkeT`}`yO{RiJo)a$S5{cyW5w=0Y27<O9kmZg?~6!=34uIW`u+;e`W`;)OOhEy
zDDFx5Z3dF4HdURkJOktni$$SAPOEO5KqhO_ix^-_je?+A@)4&59}7lBZoZot84r&S
z{w#k8yh?~Ltopmg;6nQ~y3a{h21u&wgK0N8;48wxiX$!I%?^XI9}Sx8MLkhIe_*P2
zW{y<1UCdZD-x@nskF6r8trb)Fe?$?A@c#z=#V*}_{Ct#}G|o=xwDfz!EZ0T<*Y!Vp
zwp$f&SIb&OntO{lO+MjYo})%8X;=58JzAVXfv!HoxSAcpL`_`#jl1)MA0#m~7|1SB
z?Tv-wE7XD=yfXShltS~pPyeIOY5nHWT07G5i;@!NRYTyW5MS;-#Su0l-7tTPdd-y_
zWO!xe>%KBltjLFTo}NFtTsH-^govb>DtHMx?rIXUUc|*Go6)^iI<(hKK%`JmqE3a_
zXxXO5PuY(`nq=Rz=YVf+X}h9A3e|U}fFfxfi<r4b3~KT<Xl}eW^L78GC=eQAY`)nB
zZyD1B<^C+!1Rw_|d+6Bj2ZIgYg>a{xN1q{hfiM?ciLE_>(g-zz8W;A~Y;rPr=7xv=
zl0D9Q)4v5f7D`w*O>SoAtJ@SbM>^O*Nd0TzWKe^l+F+6`B?38k=;-fygVp~z|2x?Q
zkoT&vwa&NG!m!4<N^d_=HK+BRAy`*4-U>0`+w1zD<%+}6hV*bFecusoV8?X0VLsAG
zcW)Tk4BHZwph9tO%~Xi9_8U=2(PrB#&9cBQSt})bs~t%^Ac@1e*m1o{P>Cn%<58M-
zKq9pa0dXY_AkaH3SgC#HReP%HJBPB)nc<A32;{;s>wD?QFiX1SC}&eSIufv;IHe`W
z?GkCh@xPBu#ja_J4$C<CgretBI-XEtyR>!T@jF6Ipk!r!H=)RGbqr=EO{iHF1qpGR
z&veezE9gIVYMNoYMls7>1%3pGQL(5F_QAjrXHUsB^$%4dI>G%bWm|12;|9A9WR2f-
zF3Ylw-m(y{zSI&(!W!%%>oqwz6}E_-WZ7O}wuLVGpE{;89|OM`$1F$D{Me$u-Itm&
zp|k!v*8JeD^w^oq$L0D3v>!O2S-~Hi3tYDM)Xj3W{@9x8a-Y4)LFbQ~e-N0u=Boc`
zfYoVd2uXiC*6QJ!CNsA~HP0=?FjKp1Jof;Zfi1(0{5Ahf(?SG`HDft9c1nQ((!5zm
zOiBzc-e7@H^L>st%LQ<X7<)TD`jKJmDE*^!>IRV@l}cJTvAR#bHu|HxsdH<st?*H%
z@L~B6Vp2f~vnOFY>BTylx)E7BRyr{pyo?3Wb$t`uq1YPF!O!qB{!*L;C`9O-J;B(L
z7R}MAAkl7)@F(>B#~;xEz!Q06{w;&WCxwJO!9Dl+?03-Tka+AN1|7Y$$`SX4t`06|
ze&@^8^UK%B17EFC?+^0i4}e5Sv_%$!1YFS{7P?cH?-65;c2yi8)+gV99L9*Alwtr$
zFYQ()SA<f@PYuQaIgArqkbl|oB9G$rv%BeBH@9gZ0E%okK+0qvz(fk$B^Q>p1E{j6
zCLe$e`kT~Hb{+T;Iw~2~O*i9(?OU2#+XHte0U#wY$L6(1T_`78D^{XO`i9YIAl58k
z6A&tfz%TtZ_pAQ)rY+jt4U^KmL8f?;zS24X73-+?D0Fq?!kKNXw#Wa$E!YrLQs*Bv
z9M*pzI(2~>V56OV7DP#0nt7$wxTZHEC~JrCPZsk<4M$8jFQ7{k5s!mAR_SqS;1J9+
z1{f7~TpLtIe+9FySJO=7fJJE&!qQNX5z_^35f7B>Y>7a#vsaz|_5s$pIHQGqBYRuw
z;{ex-uUjv#VwXGx!d88|E?uSjB^C4cJQc}BE*}OfcAq~_Q>5QPVijt0vuRywgWX_c
z{Q^Dw4;<h?>Oq3o4vsM}mJH7NRk;OHYQwwfczs_SnpP6eyo_heuwz;nw~5O=&h}e4
z8-dEK=8z>cmzsG<ndTt-Ch^O_FLzPBcjmH&Hjvax(9K8Q(Xv=Tm0FFXUUPUzqc3#o
zr4|cVAV%*4*J<v!*mXIWb!P*a0XNt4<rt*eI|t{l@rlD*dZC5A4{2#85PdwGGvZ-e
z>zUzt9<y-}<-rK^Q}wE-f#l-Wk`ZsQ)ho~SJhF#VgEhORltgVt{MoGzrL+#}tPb^3
zxc}~e_p|2zKy6=OyD<vBWQbI=#@_~M0L^SB%AJ}F5q9&**x9!J2umOInt6~tE<ouZ
z^M)mGLGZwb+|+~-?ItCOpgfdWkY2-*L+^kcSPORzI};2%vy>5d4hNpuOwPB?wy1-w
z#mf-B0Kv<#^x<gK9WLPOOP0<?G-LqeL)nH42iyTSu}v@h;tsf;o~xl>^iP0iw(-}4
zfM@oN{_GAIC^TY;bNwao%-YNm4WJzmyf{6<NTUbh1D;u00JMuj@E_R5gJghb_VKC^
zyl^I-<1+KU4ZH&mo{z9NOuuN6UjgyuB{;t_PTYqFP#(4}Z;}wqW*6J~ABF){jFZd=
zSS$Wu&b2x^_74GYR&W>8QH+z)VdPgcoP2WPZ~9q_azOTw;^sKw8FPDm%7`BX>XG)-
zkHIj0pjqP?2ZnLDD}K(17x^qc!MH2yk3jJD`z33N8FLGd{DNqda`ZCJ?S<!ez#9Ac
z?YZf5)yqQgXEe0)4l)dgLB*oT=SYiD24@>Cwh}%8p?B#<$A7I0cfdP1<%1cQ#?@dQ
z{4??2`xuwXDG`;T<8Ou*7!!Y<9Lf;+$PpgG=&joY!B3p<y25}cz5J1_=7pN$3?Z*{
zM%@7eA!CLV66G?;o;C>6h78+Ax&vP1Ok)VK|Cqlmik~x$VVdm!$JZ8hfisP<82gWV
z+oJApioT#voT54R3)01>{1@Z4jO@&ampKU&Vm!tGyFKS5jEnIY_Z4WBuwm?yR}#QF
zk8<`fZY-r^s~I9JQ|Zywp*!HMTl-BC%7bN4v|rq>gJ&5DsF39No~`@dKtrkMGBf;x
zlnHUQr4~xi?S2t~kzc?sR5B(U+GKl(aPw#D2y^jedpfn~&GwYO=*jl<`)d~)*A@b5
zY<vcqD}uIo5hm9Fn#vN;f>%0mi*25jUnA^|2nRNqc<g%=+35Vf$j)jy>N^`y>JFjo
z19^UaW1~7`p{d43N8IHX`$)-pFk9n?M@aTY+etRW_tZzMutDc6&dJ`Wd0(-}ZKZMG
zZx+zb_t)clj&RF*%yDg5o!qPIKtXehW{amjt!Ra2=d`|5gWyf9#ny24>xo-2zF0zI
zMm))^b>4RRv=|G)BttN;zZAX<FGnBWGVs(1@;dk>Xw-$u(!o`~t<DF5EkLoX`D(?%
z(kgYky(o_jr;o1kGRIjKAdgXza?u^#s+U_PrHyIs=BE4&l-Cg;iqBM@Pxxq&_|I(A
zp6rb2<;lsGWg{hA+G7N&MXV8cBUvUbzT(KNYm{slOGkwZa~1leE7<2V-;8NEGSDBH
zw2QuR{-3eGmFB58ML6+R`Sy+J#y<-0{=fLU5HXg$CGX2+{ppeW@1(PCeku&lE}(->
zpkv#kOh~ac8V;l306vstG+#gtXBiEL(QqKHl9-W@pBicT`hMo`txK2BNZ1Z(hZ!DR
z{m?HwcmK98BEoOPz_qTXTE({AMdxnS<-XKZyP*rNb%T8hwg8tYqNwv!+wu9ttOtdr
zXIZR)6X?LE+9b|*Hk;HcVtkDc`av3YDNWY>YdGb?FZ*l=hE!%VAB2|otp*-9s0yjh
zeKzdhTl6b;|GMN0J{!DYYF>UjWtscCRJJ*-Db@+xD7?s`zs>ZV>+SWgG+cz=2!LJM
zsaCOfZPB{hb)oKxvl7}TJh;!59l@y|x75e>VDQh!S+6)xS>N>u$YS;dZgXeXO*yH4
zLb@e?`fSE|+yBn`bF1iv)8_@PIzL$|n?J1GF8Lt$_WG0Sb=!4ssPnkmIo?y4bNh~e
z=$>H9slAyS{pEXVn)W#UDD!P#lfIlZ-%bLo?d=o>UbAIK?P?7^ao!8R|Dg8vdJV&$
zEKg%Q@)u@?E<381XaSU%djCP}?e*%GKUuDZPEXDH{q;xK&DH7C4P<_@Xp1k5+xE9=
z(q#XD*wll?dK3SJKHYE>XbgMUC(ci74`V7179VZ5TpD7Pv#|WaBIYN;Pk0Y)|FoFJ
zQvCbwONrcjo5f@m<}2h2?3>!Rrcm!4|I)p`es7ECeFIjwQ~Tf9gn%2X)92f%K-@d6
z-dG~$()oiY)hqVZHO~rXKB>Oq-Le2n;5Igi@9$ndfAB<D=Z|zy@j(}$FV_AKHOctB
z^hdd~J-a%{l5;?jEBU{dvTV@GI19IV?f;7hR4>&>i*9-5*v&V=e}Xy3^J&W;@A#9<
zzq%=HzMTfh>VmvVtG?6r-wMy!)fRxgc1L#i%HO+FBY<uJMoV$Ga<+{?iM;un%TT+{
z_OFTE`q!vwM-oVhMZo^2j&_cfKa=|_J{_t1)EW?3WNiR8S+D+0An-CN!6O~b6QG7I
zk5S4k07eVcCf0D7ckg>@e*t|548EP&LU-<Wz<8EITXq2B(*kVNyDs*ockgF)|8AP>
z|H0?)g4UX*Pv?La_bgviFEyv8N%?ED*eA|Y(uJKSL*_=V1<C*HWsAM@=7t2A0v)Yd
z;u=wsl30>zm0Xkxq!^40j7)V6jC766Lkx_p3@ofn47CjmtPBjiFYz~^Xvob^$xN%n
qt--2)JLn=e4Y&;@nYpROC5gEOxb@`Dn)Vo|hr!d;&t;ucLK6Ts1EVbf

literal 0
HcmV?d00001

diff --git a/pkg/.gitignore b/pkg/.gitignore
new file mode 100644
index 0000000..c286749
--- /dev/null
+++ b/pkg/.gitignore
@@ -0,0 +1,175 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+.python-version
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+#   For a library or package, you might want to ignore these files since the code is
+#   intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# UV
+#   Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
+#   This is especially recommended for binary packages to ensure reproducibility, and is more
+#   commonly ignored for libraries.
+#uv.lock
+
+# poetry
+#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+#   This is especially recommended for binary packages to ensure reproducibility, and is more
+#   commonly ignored for libraries.
+#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+#   in version control.
+#   https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+#  and can be added to the global gitignore or merged into this file.  For a more nuclear
+#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+# Ruff stuff:
+.ruff_cache/
+
+# PyPI configuration file
+.pypirc
diff --git a/pkg/README.md b/pkg/README.md
new file mode 100644
index 0000000..a6e9499
--- /dev/null
+++ b/pkg/README.md
@@ -0,0 +1,3 @@
+# Tests package
+
+These files are used to build a python package from the test scripts.
diff --git a/pkg/cse138_asgn3_tests/__main__.py b/pkg/cse138_asgn3_tests/__main__.py
new file mode 120000
index 0000000..74b2cfc
--- /dev/null
+++ b/pkg/cse138_asgn3_tests/__main__.py
@@ -0,0 +1 @@
+../../__main__.py
\ No newline at end of file
diff --git a/pkg/cse138_asgn3_tests/tests b/pkg/cse138_asgn3_tests/tests
new file mode 120000
index 0000000..eddac70
--- /dev/null
+++ b/pkg/cse138_asgn3_tests/tests
@@ -0,0 +1 @@
+../../tests/
\ No newline at end of file
diff --git a/pkg/cse138_asgn3_tests/utils b/pkg/cse138_asgn3_tests/utils
new file mode 120000
index 0000000..7d6b64a
--- /dev/null
+++ b/pkg/cse138_asgn3_tests/utils
@@ -0,0 +1 @@
+../../utils/
\ No newline at end of file
diff --git a/pkg/pyproject.toml b/pkg/pyproject.toml
new file mode 100644
index 0000000..3c86903
--- /dev/null
+++ b/pkg/pyproject.toml
@@ -0,0 +1,12 @@
+[project]
+name = "cse138-asgn3-tests"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.10"
+dependencies = [
+    "requests>=2.32.3",
+]
+
+[project.scripts]
+cse138-asgn3-tests = "cse138_asgn3_tests.__main__:main"
diff --git a/pkg/uv.lock b/pkg/uv.lock
new file mode 100644
index 0000000..453e587
--- /dev/null
+++ b/pkg/uv.lock
@@ -0,0 +1,77 @@
+version = 1
+requires-python = ">=3.13"
+
+[[package]]
+name = "certifi"
+version = "2025.1.31"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 },
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 },
+    { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 },
+    { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 },
+    { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 },
+    { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 },
+    { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 },
+    { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 },
+    { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 },
+    { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 },
+    { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 },
+    { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 },
+    { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 },
+    { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 },
+    { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
+]
+
+[[package]]
+name = "pkg"
+version = "0.1.0"
+source = { virtual = "." }
+dependencies = [
+    { name = "requests" },
+]
+
+[package.metadata]
+requires-dist = [{ name = "requests", specifier = ">=2.32.3" }]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "certifi" },
+    { name = "charset-normalizer" },
+    { name = "idna" },
+    { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 },
+]
diff --git a/tests/basic/basic.py b/tests/basic/basic.py
new file mode 100644
index 0000000..f4d7f07
--- /dev/null
+++ b/tests/basic/basic.py
@@ -0,0 +1,42 @@
+from time import sleep
+
+from ...utils.containers import ClusterConductor
+from ...utils.testcase import TestCase
+from ...utils.util import Logger
+from ..helper import KVSMultiClient, KVSTestFixture
+from ...utils.kvs_api import DEFAULT_TIMEOUT
+
+def basic_kv_1(conductor: ClusterConductor, dir, log: Logger):
+    with KVSTestFixture(conductor, dir, log, node_count=2) as fx:
+        c = KVSMultiClient(fx.clients, "client", log)
+        view = {'shard1': [conductor.get_nodes()[0].get_view()], 'shard2': [conductor.get_nodes()[1].get_view()]}
+        fx.broadcast_view(view)
+
+        r = c.put(0, "x", "1")
+        assert r.ok, f"expected ok for new key, got {r.status_code}"
+
+        r = c.put(1, "y", "2")
+        assert r.ok, f"expected ok for new key, got {r.status_code}"
+
+        r = c.get(0, "x")
+        assert r.ok, f"expected ok for get, got {r.status_code}"
+        assert r.json()["value"] == "1", f"wrong value returned: {r.json()}"
+
+        r = c.get(1, "x")
+        assert r.ok, f"expected ok for get, got {r.status_code}"
+        assert r.json()["value"] == "1", f"wrong value returned: {r.json()}"
+
+        r = c.get(0, "y")
+        assert r.ok, f"expected ok for get, got {r.status_code}"
+        assert r.json()["value"] == "2", f"wrong value returned: {r.json()}"
+
+        r = c.get(1, "y")
+        assert r.ok, f"expected ok for get, got {r.status_code}"
+        assert r.json()["value"] == "2", f"wrong value returned: {r.json()}"
+
+        return True, 0
+
+
+BASIC_TESTS = [
+        TestCase("basic_kv_1", basic_kv_1)
+]
diff --git a/tests/hello.py b/tests/hello.py
new file mode 100644
index 0000000..f623fee
--- /dev/null
+++ b/tests/hello.py
@@ -0,0 +1,44 @@
+from typing import List, Dict, Any, Optional
+import requests
+
+from ..utils.containers import ClusterConductor
+from ..utils.util import Logger
+from ..utils.kvs_api import KVSClient
+
+
+def hello_cluster(conductor: ClusterConductor, dir, log: Logger):
+    # create a cluster
+    log("\n> SPAWN CLUSTER")
+    conductor.spawn_cluster(node_count=2)
+
+    # by default, all nodes are in the same partition, on the base network
+    # let's create two partitions, one with node 0 and one with node 1
+    log("\n> CREATE PARTITIONS")
+    conductor.create_partition(node_ids=[0], partition_id="p0")
+    conductor.create_partition(node_ids=[1], partition_id="p1")
+
+    # describe cluster
+    log("\n> DESCRIBE CLUSTER")
+    conductor.describe_cluster()
+
+    # talk to node 0 in the cluster
+    log("\n> TALK TO NODE 0")
+    n0_ep = conductor.node_external_endpoint(0)
+    n0_client = KVSClient(n0_ep)
+    n0_client.ping().raise_for_status()
+    log(f"  - node 0 is up at {n0_ep}")
+
+    # talk to node 1 in the cluster
+    log("\n> TALK TO NODE 1")
+    n1_ep = conductor.node_external_endpoint(1)
+    n1_client = KVSClient(n1_ep)
+    n1_client.ping().raise_for_status()
+    log(f"  - node 1 is up at {n1_ep}")
+
+    conductor.dump_all_container_logs(dir)
+    # clean up
+    log("\n> DESTROY CLUSTER")
+    conductor.destroy_cluster()
+
+    # return score/reason
+    return True, "ok"
diff --git a/tests/helper.py b/tests/helper.py
new file mode 100644
index 0000000..18b31db
--- /dev/null
+++ b/tests/helper.py
@@ -0,0 +1,117 @@
+from typing import List, Dict, Any
+from ..utils.kvs_api import DEFAULT_TIMEOUT
+
+from ..utils.containers import ClusterConductor
+from ..utils.kvs_api import KVSClient
+from ..utils.util import Logger
+
+class KVSTestFixture:  
+    def __init__(self, conductor: ClusterConductor, dir, log: Logger, node_count: int):
+        self.conductor = conductor
+        self.dir = dir
+        self.node_count = node_count
+        self.clients = []
+        self.log = log
+
+    def spawn_cluster(self):
+        self.log("\n> SPAWN CLUSTER")
+        self.conductor.spawn_cluster(node_count=self.node_count)
+
+        for i in range(self.node_count):
+            ep = self.conductor.node_external_endpoint(i)
+            self.clients.append(KVSClient(ep))
+
+            r = self.clients[i].ping()
+            assert r.status_code == 200, f"expected 200 for ping, got {
+                r.status_code}"
+            self.log(f"  - node {i} is up: {r.text}")
+
+    def broadcast_view(self, view: Dict[str, List[Dict[str, Any]]]):
+        self.log(f"\n> SEND VIEW: {view}")
+        for i, client in enumerate(self.clients):
+            r = client.send_view(view)
+            assert (
+                r.status_code == 200
+            ), f"expected 200 to ack view, got {r.status_code}"
+            self.log(f"view sent to node {i}: {r.status_code} {r.text}")
+
+    def send_view(self, node_id: int, view: Dict[str, List[Dict[str, Any]]]):
+        r = self.clients[node_id].send_view(view)
+        assert r.status_code == 200, f"expected 200 to ack view, got {
+            r.status_code}"
+        self.log(f"view sent to node {node_id}: {r.status_code} {r.text}")
+
+    def destroy_cluster(self):
+        self.conductor.dump_all_container_logs(self.dir)
+        self.log("\n> DESTROY CLUSTER")
+        self.conductor.destroy_cluster()
+
+    def __enter__(self):
+        self.spawn_cluster()
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.destroy_cluster()
+
+
+class KVSMultiClient:
+    def __init__(self, clients: List[KVSClient], name: str, log: Logger):
+        self.clients = clients
+        self.metadata = None
+        self.name = name
+        self.req = 0
+        self.log = log
+
+        # internal model of kvs
+        self._kvs_model = {}
+
+    def reset_model(self):
+        self._kvs_model = {}
+        self.metadata = None
+
+    def put(self, node_id: int, key: str, value: str, timeout: float = DEFAULT_TIMEOUT):
+        self.log(f" {self.name} req_id:{self.req} > {
+                 node_id} > kvs.put {key} <- {value}")
+
+        r = self.clients[node_id].put(
+            key, value, self.metadata, timeout=timeout)
+
+        # update model if successful
+        if r.status_code // 100 == 2:
+            self._kvs_model[key] = value
+            self.log(f" {self.name} req_id:{self.req} {r.json()}")
+            self.metadata = r.json()["causal-metadata"]
+
+        self.req += 1
+        return r
+
+    def get(self, node_id: int, key: str, timeout: float = DEFAULT_TIMEOUT):
+        self.log(f" {self.name} req_id:{self.req} > {node_id}> kvs.get {
+                 key} request \"causal-metadata\": {self.metadata}")
+        r = self.clients[node_id].get(key, self.metadata, timeout=timeout)
+
+        if r.status_code // 100 == 2:
+            self.log(f" {self.name} req_id:{self.req} > {
+                     node_id}> kvs.get {key} -> {r.json()}")
+            self.metadata = r.json()["causal-metadata"]
+        else:
+            self.log(f" {self.name} req_id:{self.req} > {
+                     node_id}> kvs.get {key} -> HTTP ERROR {r.status_code}")
+
+        self.req += 1
+        return r
+
+    def get_all(self, node_id: int, timeout: float = DEFAULT_TIMEOUT):
+        self.log(f" {self.name} req_id:{self.req} > {
+                 node_id}> kvs.get_all request \"causal-metadata\": {self.metadata}")
+        r = self.clients[node_id].get_all(self.metadata, timeout=timeout)
+        if r.status_code // 100 == 2:
+            self.log(f" {self.name} req_id:{self.req} > {
+                     node_id}> kvs.get_all -> {r.json()}")
+            self.metadata = r.json()["causal-metadata"]
+        else:
+            self.log(f" {self.name} req_id:{self.req} > {
+                     node_id}> kvs.get_all -> HTTP ERROR {r.status_code}")
+
+        self.req += 1
+        return r
diff --git a/utils/containers.py b/utils/containers.py
new file mode 100644
index 0000000..abba8fd
--- /dev/null
+++ b/utils/containers.py
@@ -0,0 +1,487 @@
+from typing import List
+from dataclasses import dataclass
+import os
+import json
+import subprocess
+import time
+import re
+
+import requests
+
+from .util import run_cmd_bg, Logger
+
+CONTAINER_ENGINE = os.getenv("ENGINE", "docker")
+
+
+class ContainerBuilder:
+    def __init__(self, project_dir: str, image_id: str):
+        self.project_dir = project_dir
+        self.image_id = image_id
+
+    def build_image(self, log: Logger) -> None:
+        # ensure we are able to build the container image
+        log(f"building container image {self.image_id}...")
+
+        cmd = [CONTAINER_ENGINE, "build", "-t",
+               self.image_id, self.project_dir]
+        run_cmd_bg(cmd, verbose=True,
+                   error_prefix="failed to build container image")
+
+        # ensure the image exists
+        log(f"inspecting container image {self.image_id}...")
+        cmd = [CONTAINER_ENGINE, "image", "inspect", self.image_id]
+        run_cmd_bg(cmd, verbose=True,
+                   error_prefix="failed to inspect container image")
+
+
+@dataclass
+class ClusterNode:
+    name: str  # container name
+    index: int  # container global id/index
+    ip: str  # container ip on current/primary network
+    port: int  # container http service port
+    external_port: (
+        int  # host's mapped external port forwarded to container's service port
+    )
+    networks: List[str]  # networks the container is attached to
+
+    def get_view(self) -> str:
+        return {"address": f"{self.ip}:{self.port}", "id": self.index}
+
+    def internal_endpoint(self) -> str:
+        return f"http://{self.ip}:{self.port}"
+
+    def external_endpoint(self) -> str:
+        return f"http://localhost:{self.external_port}"
+
+
+class ClusterConductor:
+    def __init__(self, group_id: str, base_image: str, log: Logger, external_port_base: int = 8081):
+        self.group_id = group_id
+        self.base_image = base_image
+        self.base_port = external_port_base
+        self.nodes: List[ClusterNode] = []
+
+        # naming patterns
+        self.group_ctr_prefix = f"kvs_{group_id}_node"
+        self.group_net_prefix = f"kvs_{group_id}_net"
+
+        # base network
+        self.base_net_name = f"{self.group_net_prefix}_base"
+
+        self.log = log
+
+    def _list_containers(self) -> List[str]:
+        # get list of all container names
+        try:
+            output = subprocess.check_output(
+                [CONTAINER_ENGINE, "ps", "-a", "--format", "{{.Names}}"]
+            )
+            return output.decode().strip().split("\n")
+        except subprocess.CalledProcessError as e:
+            self.log("failed to list containers")
+            self.log(e.stderr.decode())
+            raise
+
+    def _list_networks(self) -> List[str]:
+        # get list of all network names
+        try:
+            output = subprocess.check_output(
+                [CONTAINER_ENGINE, "network", "ls", "--format", "{{.Name}}"]
+            )
+            return output.decode().strip().split("\n")
+        except subprocess.CalledProcessError as e:
+            self.log("failed to list networks")
+            self.log(e.stderr.decode())
+            raise
+
+    def _make_remove_cmd(self, name: str) -> List[str]:
+        if CONTAINER_ENGINE == "podman":
+            return [CONTAINER_ENGINE, "rm", "-f", "-t", "0", name]
+        else:
+            return [CONTAINER_ENGINE, "rm", "-f", name]
+
+    def dump_all_container_logs(self, dir):
+        self.log("dumping logs of kvs containers")
+        container_pattern = "^kvs_.*"
+        container_regex = re.compile(container_pattern)
+
+        containers = self._list_containers()
+        for container in containers:
+            if container and container_regex.match(container):
+                self._dump_container_logs(dir, container)
+
+    def _dump_container_logs(self, dir, name: str) -> None:
+        log_file = os.path.join(dir, f"{name}.log")
+        self.log(f"Dumping logs for container {name} to file {log_file}")
+
+    # Construct the logs command. Docker and Podman both support the "logs" command.
+        logs_cmd = [CONTAINER_ENGINE, "logs", name]
+
+        try:
+            logs_output = subprocess.check_output(
+                logs_cmd, stderr=subprocess.STDOUT)
+            with open(log_file, "wb") as f:
+                f.write(logs_output)
+            self.log(f"Successfully wrote logs for container {
+                     name} to {log_file}")
+        except subprocess.CalledProcessError as e:
+            self.log(f"Error dumping logs for container {
+                     name}: {e.output.decode().strip()}")
+        except Exception as e:
+            self.log(
+                f"Unexpected error dumping logs for container {name}: {e}")
+
+    def _remove_container(self, name: str) -> None:
+        # remove a single container
+        self.log(f"removing container {name}")
+        run_cmd_bg(
+            self._make_remove_cmd(name),
+            verbose=True,
+            error_prefix=f"failed to remove container {name}",
+        )
+
+    def _remove_network(self, name: str) -> None:
+        # remove a single network
+        self.log(f"removing network {name}")
+        run_cmd_bg(
+            [CONTAINER_ENGINE, "network", "rm", name],
+            verbose=True,
+            error_prefix=f"failed to remove network {name}",
+            check=False,
+        )
+
+    def _create_network(self, name: str) -> None:
+        # create a single network
+        self.log(f"creating network {name}")
+        run_cmd_bg(
+            [CONTAINER_ENGINE, "network", "create", name],
+            verbose=True,
+            error_prefix=f"failed to create network {name}",
+        )
+
+    def _network_exists(self, name: str) -> bool:
+        return name in self._list_networks()
+
+    def cleanup_hanging(self, group_only: bool = True) -> None:
+        # if group_only, only clean up stuff for this group
+        # otherwise clean up anything kvs related
+        if group_only:
+            self.log(f"cleaning up group {self.group_id}")
+            container_pattern = f"^kvs_{self.group_id}_.*"
+            network_pattern = f"^kvs_{self.group_id}_net_.*"
+        else:
+            self.log("cleaning up all kvs containers and networks")
+            container_pattern = "^kvs_.*"
+            network_pattern = "^kvs_net_.*"
+
+        # compile regex patterns
+        container_regex = re.compile(container_pattern)
+        network_regex = re.compile(network_pattern)
+
+        # cleanup containers
+        self.log(f"  cleaning up {
+                 'group' if group_only else 'all'} containers")
+        containers = self._list_containers()
+        for container in containers:
+            if container and container_regex.match(container):
+                self._remove_container(container)
+
+        # cleanup networks
+        self.log(f"  cleaning up {'group' if group_only else 'all'} networks")
+        networks = self._list_networks()
+        for network in networks:
+            if network and network_regex.match(network):
+                self._remove_network(network)
+
+    # we can check if a node is online by GET /ping
+    def _is_online(self, node: ClusterNode) -> bool:
+        try:
+            r = requests.get(f"{node.external_endpoint()}/ping")
+            return r.status_code == 200
+        except requests.exceptions.RequestException as e:
+            self.log(f"node {node.name} is not online: {e}")
+            return False
+
+    def _node_name(self, index: int) -> str:
+        return f"kvs_{self.group_id}_node_{index}"
+
+    def node_external_endpoint(self, index: int) -> str:
+        return self.nodes[index].external_endpoint()
+
+    # create a cluster of nodes on the base network
+    def spawn_cluster(self, node_count: int) -> None:
+        self.log(f"spawning cluster of {node_count} nodes")
+
+        # delete base network if it exists
+        run_cmd_bg([CONTAINER_ENGINE, "network", "rm",
+                   self.base_net_name], check=False, log=self.log)
+
+        # create base network
+        run_cmd_bg(
+            [CONTAINER_ENGINE, "network", "create", self.base_net_name],
+            verbose=True,
+            error_prefix="failed to create base network",
+            log=self.log,
+        )
+
+        # spawn the nodes
+        for i in range(node_count):
+            node_name = self._node_name(i)
+            # map to sequential external port
+            external_port = self.base_port + i
+            port = 8081  # internal port
+
+            self.log(f"  starting container {
+                     node_name} (ext_port={external_port})")
+
+            # start container detached from networks
+            run_cmd_bg(
+                [
+                    CONTAINER_ENGINE,
+                    "run",
+                    "-d",
+                    "--name",
+                    node_name,
+                    "--env",
+                    f"NODE_IDENTIFIER={i}",
+                    "-p",
+                    f"{external_port}:{port}",
+                    self.base_image,
+                ],
+                verbose=True,
+                error_prefix=f"failed to start container {node_name}",
+                log=self.log,
+            )
+
+            # attach container to base network
+            self.log(f"    attaching container {node_name} to base network")
+            run_cmd_bg(
+                [CONTAINER_ENGINE, "network", "connect",
+                    self.base_net_name, node_name],
+                verbose=True,
+                error_prefix=f"failed to attach container {
+                    node_name} to base network",
+                log=self.log,
+            )
+
+            # inspect the container to get ip, etc.
+            self.log(f"    inspecting container {node_name}")
+            try:
+                inspect = subprocess.run(
+                    [CONTAINER_ENGINE, "inspect", node_name],
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.PIPE,
+                    check=True,
+                )
+            except subprocess.CalledProcessError as e:
+                self.log(f"failed to inspect container {node_name}")
+                self.log(e.stderr.decode())
+                raise
+            info = json.loads(inspect.stdout)[0]
+
+            container_ip = info["NetworkSettings"]["Networks"][self.base_net_name][
+                "IPAddress"
+            ]
+
+            # store container metadata
+            node = ClusterNode(
+                name=node_name,
+                index=i,
+                ip=container_ip,
+                port=port,
+                external_port=external_port,
+                networks=[self.base_net_name],
+            )
+            self.nodes.append(node)
+
+            self.log(f"    container {
+                     node_name} spawned, base_net_ip={container_ip}")
+
+        # wait for the nodes to come online (sequentially)
+        self.log("waiting for nodes to come online...")
+        wait_online_start = time.time()
+        # wait_online_timeout = 10
+        for i in range(node_count):
+            node = self.nodes[i]
+            while not self._is_online(node):
+                if time.time() - wait_online_start > 10:
+                    raise RuntimeError(f"node {node.name} did not come online")
+                time.sleep(0.2)
+
+            self.log(f"  node {node.name} online")
+
+        self.log("all nodes online")
+
+    def destroy_cluster(self) -> None:
+        # clean up after this group
+        self.cleanup_hanging(group_only=True)
+
+        # clear nodes
+        self.nodes.clear()
+
+    def describe_cluster(self) -> None:
+        self.log(f"TOPOLOGY: group {self.group_id}")
+        self.log("nodes:")
+        for node in self.nodes:
+            self.log(
+                f"  {node.name}: {node.ip}:{
+                    node.port} <-> localhost:{node.external_port}"
+            )
+
+        # now log the partitions and the nodes they contain
+        partitions = {}
+        for node in self.nodes:
+            for network in node.networks:
+                if network not in partitions:
+                    partitions[network] = []
+                partitions[network].append(node.index)
+
+        self.log("partitions:")
+        for net, nodes in partitions.items():
+            part_name = net[len(self.group_net_prefix) + 1:]
+            self.log(f"  {part_name}: {nodes}")
+
+    def my_partition(self, node_ids: List[int], partition_id: str) -> None:
+
+        net_name = f"kvs_{self.group_id}_net_{partition_id}"
+
+        self.log(f"creating partition {partition_id} with nodes {node_ids}")
+
+        # create partition network if it doesn't exist
+        if not self._network_exists(net_name):
+            self.log(f"creating network {net_name}")
+            self._create_network(net_name)
+
+        # disconnect specified nodes from all other networks
+        self.log("  disconnecting nodes from other networks")
+        for i in node_ids:
+            node = self.nodes[i]
+            for network in node.networks:
+                if network != net_name:
+                    self.log(f"    disconnecting {
+                             node.name} from network {network}")
+                    run_cmd_bg(
+                        [CONTAINER_ENGINE, "network",
+                            "disconnect", network, node.name],
+                        verbose=True,
+                        error_prefix=f"failed to disconnect {
+                            node.name} from network {network}",
+                    )
+                    node.networks.remove(network)
+
+        # connect nodes to partition network, and update node ip
+        self.log(f"  connecting nodes to partition network {net_name}")
+        for i in node_ids:
+            node = self.nodes[i]
+
+            self.log(f"node.networks: {node.networks}")
+            if net_name in node.networks:
+                self.log("network alr exists!")
+                continue
+
+            self.log(f"    connecting {node.name} to network {net_name}")
+            run_cmd_bg(
+                [CONTAINER_ENGINE, "network", "connect", net_name, node.name],
+                verbose=True,
+                error_prefix=f"failed to connect {
+                    node.name} to network {net_name}",
+            )
+            node.networks.append(net_name)
+
+            # update node ip on the new network
+            inspect = subprocess.run(
+                [CONTAINER_ENGINE, "inspect", node.name],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                check=True,
+            )
+            info = json.loads(inspect.stdout)[0]
+            container_ip = info["NetworkSettings"]["Networks"][net_name]["IPAddress"]
+            self.log(f"    node {node.name} ip in network {
+                     net_name}: {container_ip}")
+
+            # update node ip
+            node.ip = container_ip
+
+    def create_partition(self, node_ids: List[int], partition_id: str) -> None:
+        net_name = f"kvs_{self.group_id}_net_{partition_id}"
+
+        self.log(f"creating partition {partition_id} with nodes {node_ids}")
+
+        # create partition network if it doesn't exist
+        if not self._network_exists(net_name):
+            self._create_network(net_name)
+
+        # disconnect specified nodes from all other networks
+        self.log("  disconnecting nodes from other networks")
+        for i in node_ids:
+            node = self.nodes[i]
+            for network in node.networks:
+                if network != net_name:
+                    self.log(f"    disconnecting {
+                             node.name} from network {network}")
+                    run_cmd_bg(
+                        [CONTAINER_ENGINE, "network",
+                            "disconnect", network, node.name],
+                        verbose=True,
+                        error_prefix=f"failed to disconnect {
+                            node.name} from network {network}",
+                    )
+                    node.networks.remove(network)
+
+        # connect nodes to partition network, and update node ip
+        self.log(f"  connecting nodes to partition network {net_name}")
+        for i in node_ids:
+            node = self.nodes[i]
+            self.log(f"    connecting {node.name} to network {net_name}")
+            run_cmd_bg(
+                [CONTAINER_ENGINE, "network", "connect", net_name, node.name],
+                verbose=True,
+                error_prefix=f"failed to connect {
+                    node.name} to network {net_name}",
+            )
+            node.networks.append(net_name)
+
+            # update node ip on the new network
+            inspect = subprocess.run(
+                [CONTAINER_ENGINE, "inspect", node.name],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                check=True,
+            )
+            info = json.loads(inspect.stdout)[0]
+            container_ip = info["NetworkSettings"]["Networks"][net_name]["IPAddress"]
+            self.log(f"    node {node.name} ip in network {
+                     net_name}: {container_ip}")
+
+            # update node ip
+            node.ip = container_ip
+
+    DeprecationWarning("View is in updated format")
+    def get_full_view(self):
+        view = []
+        for node in self.nodes:
+            view.append({"address": f"{node.ip}:{
+                        node.port}", "id": node.index})
+        return view
+
+    def get_nodes(self):
+        return self.nodes
+
+
+    def get_partition_view(self, partition_id: str):
+        net_name = f"kvs_{self.group_id}_net_{partition_id}"
+        view = []
+        for node in self.nodes:
+            if net_name in node.networks:
+                view.append({"address": f"{node.ip}:{ \
+                            node.port}", "id": node.index})
+        return view
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        # clean up automatically
+        self.destroy_cluster()
diff --git a/utils/kvs_api.py b/utils/kvs_api.py
new file mode 100644
index 0000000..6f6de12
--- /dev/null
+++ b/utils/kvs_api.py
@@ -0,0 +1,112 @@
+import requests
+from typing import Dict, Any, List
+
+"""
+Request Timeout status code.
+Technically not proper as the server should return the 408 in tradititional usage,
+but it is a good enough indicator for our purposes
+"""
+REQUEST_TIMEOUT_STATUS_CODE = 408
+DEFAULT_TIMEOUT = 10
+
+
+def create_json(metadata, value=None):
+    result = {"causal-metadata": {}}
+    if metadata is not None:
+        result["causal-metadata"] = metadata
+    if value is not None:
+        result["value"] = value
+    return result
+
+# client for kvs api
+
+
+class KVSClient:
+    def __init__(self, base_url: str):
+        # set base url without trailing slash
+        self.base_url = base_url if not base_url.endswith(
+            "/") else base_url[:-1]
+
+    def ping(self, timeout: float = DEFAULT_TIMEOUT) -> requests.Response:
+        if timeout is not None:
+            try:
+                return requests.get(f"{self.base_url}/ping", timeout=timeout)
+            except requests.exceptions.Timeout:
+                r = requests.Response()
+                r.status_code = REQUEST_TIMEOUT_STATUS_CODE
+                return r
+        else:
+            return requests.get(f"{self.base_url}/ping")
+
+    def get(self, key: str, metadata: str, timeout: float = DEFAULT_TIMEOUT) -> requests.Response:
+        if not key:
+            raise ValueError("key cannot be empty")
+
+        if timeout is not None:
+            try:
+                return requests.get(f"{self.base_url}/data/{key}", json=create_json(metadata), timeout=timeout)
+            except requests.exceptions.Timeout:
+                r = requests.Response()
+                r.status_code = REQUEST_TIMEOUT_STATUS_CODE
+                return r
+        else:
+            return requests.get(f"{self.base_url}/data/{key}", json=create_json(metadata))
+
+    def put(self, key: str, value: str, metadata: str, timeout: float = DEFAULT_TIMEOUT) -> requests.Response:
+        if not key:
+            raise ValueError("key cannot be empty")
+
+        if timeout is not None:
+            try:
+                return requests.put(f"{self.base_url}/data/{key}", json=create_json(metadata, value), timeout=timeout)
+            except requests.exceptions.Timeout:
+                r = requests.Response()
+                r.status_code = REQUEST_TIMEOUT_STATUS_CODE
+                return r
+        else:
+            return requests.put(f"{self.base_url}/data/{key}", json=create_json(metadata, value))
+
+    def delete(self, key: str, timeout: float = DEFAULT_TIMEOUT) -> requests.Response:
+        if not key:
+            raise ValueError("key cannot be empty")
+
+        if timeout is not None:
+            try:
+                return requests.delete(f"{self.base_url}/data/{key}", timeout=timeout)
+            except requests.exceptions.Timeout:
+                r = requests.Response()
+                r.status_code = REQUEST_TIMEOUT_STATUS_CODE
+                return r
+        else:
+            return requests.delete(f"{self.base_url}/data/{key}")
+
+    def get_all(self, metadata: str, timeout: float = DEFAULT_TIMEOUT) -> requests.Response:
+        if timeout is not None:
+            try:
+                return requests.get(f"{self.base_url}/data", json=create_json(metadata), timeout=timeout)
+            except requests.exceptions.Timeout:
+                r = requests.Response()
+                r.status_code = REQUEST_TIMEOUT_STATUS_CODE
+                return r
+        else:
+            return requests.get(f"{self.base_url}/data")
+
+    def clear(self, timeout: float = DEFAULT_TIMEOUT) -> None:
+        response = self.get_all(timeout=timeout)
+        if response.status_code != 200:
+            raise RuntimeError(f"failed to get keys: {response.status_code}")
+
+        for key in response.json().keys():
+            delete_response = self.delete(key, timeout=timeout)
+            if delete_response.status_code != 200:
+                raise RuntimeError(
+                    f"failed to delete key {key}: {
+                        delete_response.status_code}"
+                )
+
+    def send_view(self, view: dict[str, List[Dict[str, Any]]], timeout: float = DEFAULT_TIMEOUT) -> requests.Response:
+        if not isinstance(view, dict):
+            raise ValueError("view must be a dict")
+
+        request_body = {"view": view}
+        return requests.put(f"{self.base_url}/view", json=request_body, timeout=timeout)
diff --git a/utils/testcase.py b/utils/testcase.py
new file mode 100644
index 0000000..746b582
--- /dev/null
+++ b/utils/testcase.py
@@ -0,0 +1,24 @@
+from typing import Callable
+
+
+class TestCase:
+    def __init__(self, name: str, fn: Callable, weight: int = 1):
+        self.name = name
+        self.run = fn
+        self.weight = weight
+
+        self.score = None
+        self.reason = None
+
+    def execute(self, *args, **kwargs):
+        # expect to receive a pass/fail and a reason
+        try:
+            self.score, self.reason = self.run(*args, **kwargs)
+        except Exception as e:
+            self.score = False
+            self.reason = f"FAIL: {e}"
+        
+        return self.score, self.reason
+
+    def __str__(self):
+        return f"{self.name}: {self.score} ({self.reason})"
diff --git a/utils/util.py b/utils/util.py
new file mode 100644
index 0000000..e3da95e
--- /dev/null
+++ b/utils/util.py
@@ -0,0 +1,45 @@
+from dataclasses import dataclass
+import sys
+import subprocess
+from typing import TextIO, Optional
+from collections.abc import Collection
+
+@dataclass
+class Logger:
+    files: Collection[TextIO]
+    prefix: Optional[str] = None
+
+    def __call__(self, *args):
+        if self.prefix is not None:
+            prefix = (f"{self.prefix}: ",)
+        else:
+            prefix = ()
+        for file in self.files:
+            print(*prefix, *args, file=file)
+
+_GLOBAL_LOGGER = Logger(prefix=None, files=(sys.stderr,))
+
+def log(*args):
+    _GLOBAL_LOGGER(*args)
+
+def global_logger() -> Logger:
+    return _GLOBAL_LOGGER
+
+def run_cmd_bg(
+    cmd: list[str], log: Logger = _GLOBAL_LOGGER, verbose=False, error_prefix: str = "command failed", **kwargs
+) -> subprocess.CompletedProcess:
+    # default capture opts
+    kwargs.setdefault("stdout", subprocess.PIPE)
+    kwargs.setdefault("stderr", subprocess.PIPE)
+    kwargs.setdefault("text", True)
+    kwargs.setdefault("check", True)
+
+    if verbose:
+        log(f"$ {cmd[0]} {' '.join(cmd[1:])}")
+
+    try:
+        return subprocess.run(cmd, **kwargs)
+    except subprocess.CalledProcessError as e:
+        raise RuntimeError(
+            f"{error_prefix}: {e}:\nstdout: {e.stdout}\nstderr: {e.stderr}"
+        )
-- 
GitLab