From aa424898d22d5207b28d96eaa7033066b7a7cbc8 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 12:17:58 +0200 Subject: [PATCH 001/710] Generate yaml config from graph --- src/warnet/cli/network.py | 97 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index b81cbaec6..4976f6a14 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -2,6 +2,8 @@ import json from pathlib import Path from importlib.resources import files +import networkx as nx +import yaml import click from rich import print @@ -172,3 +174,98 @@ def logs(follow: bool): stream_output = True run_command(command, stream_output=stream_output) + + +@network.command() +@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) +@click.option("--output", "-o", default="warnet-deployment.yaml", help="Output YAML file") +def generate_yaml(graph_file: Path, output: str): + """ + Generate a Kubernetes YAML file from a graph file for deploying warnet nodes. + """ + # Read and parse the graph file + graph = read_graph_file(graph_file) + + # Generate the Kubernetes YAML + kubernetes_yaml = generate_kubernetes_yaml(graph) + + # Write the YAML to a file + with open(output, "w") as f: + yaml.dump_all(kubernetes_yaml, f) + + print(f"Kubernetes YAML file generated: {output}") + + +def read_graph_file(graph_file: Path) -> nx.Graph: + with open(graph_file) as f: + return nx.parse_graphml(f.read()) + + +def generate_kubernetes_yaml(graph: nx.Graph) -> list: + kubernetes_objects = [] + + # Add Namespace object + namespace = create_namespace() + kubernetes_objects.append(namespace) + + for node, data in graph.nodes(data=True): + # Create a deployment for each node + deployment = create_node_deployment(node, data) + kubernetes_objects.append(deployment) + + # Create a service for each node + service = create_node_service(node) + kubernetes_objects.append(service) + + return kubernetes_objects + + +def create_namespace() -> dict: + return {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": "warnet"}} + + +def create_node_deployment(node: int, data: dict) -> dict: + image = data.get("image", "bitcoindevproject/bitcoin:27.0") + version = data.get("version", "27.0") + bitcoin_config = data.get("bitcoin_config", "") + + return { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": f"warnet-node-{node}", + "namespace": "warnet", + "labels": {"app": "warnet", "node": str(node)}, + }, + "spec": { + "replicas": 1, + "selector": {"matchLabels": {"app": "warnet", "node": str(node)}}, + "template": { + "metadata": {"labels": {"app": "warnet", "node": str(node)}}, + "spec": { + "containers": [ + { + "name": "bitcoin", + "image": image, + "env": [ + {"name": "BITCOIN_VERSION", "value": version}, + {"name": "BITCOIN_CONFIG", "value": bitcoin_config}, + ], + } + ] + }, + }, + }, + } + + +def create_node_service(node: int) -> dict: + return { + "apiVersion": "v1", + "kind": "Service", + "metadata": {"name": f"warnet-node-{node}-service", "namespace": "warnet"}, + "spec": { + "selector": {"app": "warnet", "node": str(node)}, + "ports": [{"port": 8333, "targetPort": 8333}], + }, + } From e65221d19df07497abe6e28ba2a549f9cbde46d6 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 12:53:57 +0200 Subject: [PATCH 002/710] cli: start and stop networks --- src/warnet/cli/network.py | 192 +++++++++++++------------------------- 1 file changed, 64 insertions(+), 128 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 4976f6a14..094c74e21 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -1,46 +1,17 @@ -import base64 # noqa: I001 import json -from pathlib import Path +import tempfile from importlib.resources import files -import networkx as nx -import yaml +from pathlib import Path import click +import networkx as nx +import yaml from rich import print -from rich.console import Console -from rich.table import Table -from .rpc import rpc_call # noqa: I001 from .util import run_command - DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") - - -def print_repr(wn: dict) -> None: - if not isinstance(wn, dict): - print("Error, cannot print_repr of non-dict") - return - console = Console() - - # Warnet table - warnet_table = Table(show_header=True, header_style="bold") - for header in wn["warnet_headers"]: - warnet_table.add_column(header) - for row in wn["warnet"]: - warnet_table.add_row(*[str(cell) for cell in row]) - - # Tank table - tank_table = Table(show_header=True, header_style="bold") - for header in wn["tank_headers"]: - tank_table.add_column(header) - for row in wn["tanks"]: - tank_table.add_row(*[str(cell) for cell in row]) - - console.print("Warnet:") - console.print(warnet_table) - console.print("\nTanks:") - console.print(tank_table) +WAR_MANIFESTS = files("manifests") @click.group(name="network") @@ -48,6 +19,19 @@ def network(): """Network commands""" +def set_kubectl_context(namespace: str): + """ + Set the default kubectl context to the specified namespace. + """ + command = f"kubectl config set-context --current --namespace={namespace}" + result = run_command(command, stream_output=True) + if result: + print(f"Kubectl context set to namespace: {namespace}") + else: + print(f"Failed to set kubectl context to namespace: {namespace}") + return result + + @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) @click.option("--force", default=False, is_flag=True, type=bool) @@ -56,29 +40,47 @@ def start(graph_file: Path, force: bool, network: str): """ Start a warnet with topology loaded from a into [network] """ - try: - encoded_graph_file = "" - with open(graph_file, "rb") as graph_file_buffer: - encoded_graph_file = base64.b64encode(graph_file_buffer.read()).decode("utf-8") - except Exception as e: - print(f"Error encoding graph file: {e}") - return - - result = rpc_call( - "network_from_file", - {"graph_file": encoded_graph_file, "force": force, "network": network}, - ) - assert isinstance(result, dict) - print_repr(result) + # Generate the Kubernetes YAML + graph = read_graph_file(graph_file) + kubernetes_yaml = generate_kubernetes_yaml(graph) + # Write the YAML to a temporary file + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump_all(kubernetes_yaml, temp_file) + temp_file_path = temp_file.name -@network.command() -@click.option("--network", default="warnet", show_default=True) -def up(network: str): - """ - Bring up a previously-stopped warnet named [network] - """ - print(rpc_call("network_up", {"network": network})) + try: + # Deploy base configurations + base_configs = [ + "namespace.yaml", + "rbac-config.yaml", + ] + + for config in base_configs: + command = f"kubectl apply -f {WAR_MANIFESTS}/{config}" + result = run_command(command, stream_output=True) + if not result: + print(f"Failed to apply {config}") + return + + # Apply the YAML using kubectl + command = f"kubectl apply -f {temp_file_path}" + result = run_command(command, stream_output=True) + + if result: + print(f"Warnet '{network}' started successfully.") + + # Set kubectl context to the warnet namespace + context_result = set_kubectl_context(network) + if not context_result: + print( + "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." + ) + else: + print(f"Failed to start warnet '{network}'.") + finally: + # Clean up the temporary file + Path(temp_file_path).unlink() @network.command() @@ -87,80 +89,14 @@ def down(network: str): """ Bring down a running warnet named [network] """ + # Delete the namespace + command = f"kubectl delete namespace {network}" + result = run_command(command, stream_output=True) - running_scenarios = rpc_call("scenarios_list_running", {}) - assert isinstance(running_scenarios, list) - if running_scenarios: - for scenario in running_scenarios: - pid = scenario.get("pid") - if pid: - try: - params = {"pid": pid} - rpc_call("scenarios_stop", params) - except Exception as e: - print( - f"Exception when stopping scenario: {scenario} with PID {scenario.pid}: {e}" - ) - print("Continuing with shutdown...") - continue - print(rpc_call("network_down", {"network": network})) - - -@network.command() -@click.option("--network", default="warnet", show_default=True) -def info(network: str): - """ - Get info about a warnet named [network] - """ - result = rpc_call("network_info", {"network": network}) - assert isinstance(result, dict), "Result is not a dict" # Make mypy happy - print_repr(result) - - -@network.command() -@click.option("--network", default="warnet", show_default=True) -def status(network: str): - """ - Get status of a warnet named [network] - """ - result = rpc_call("network_status", {"network": network}) - assert isinstance(result, list), "Result is not a list" # Make mypy happy - for tank in result: - lightning_status = "" - circuitbreaker_status = "" - if "lightning_status" in tank: - lightning_status = f"\tLightning: {tank['lightning_status']}" - if "circuitbreaker_status" in tank: - circuitbreaker_status = f"\tCircuit Breaker: {tank['circuitbreaker_status']}" - print( - f"Tank: {tank['tank_index']} \tBitcoin: {tank['bitcoin_status']}{lightning_status}{circuitbreaker_status}" - ) - - -@network.command() -@click.option("--network", default="warnet", show_default=True) -def connected(network: str): - """ - Indicate whether the all of the edges in the gaph file are connected in [network] - """ - print(rpc_call("network_connected", {"network": network})) - - -@network.command() -@click.option("--network", default="warnet", show_default=True) -@click.option("--activity", type=str) -@click.option("--exclude", type=str, default="[]") -def export(network: str, activity: str, exclude: str): - """ - Export all [network] data for a "simln" service running in a container - on the network. Optionally add JSON string [activity] to simln config. - Optionally provide a list of tank indexes to [exclude]. - Returns True on success. - """ - exclude = json.loads(exclude) - print( - rpc_call("network_export", {"network": network, "activity": activity, "exclude": exclude}) - ) + if result: + print(f"Warnet '{network}' has been successfully brought down and the namespace deleted.") + else: + print(f"Failed to bring down warnet '{network}' or delete the namespace.") @network.command() From 7370c3cb4fc3ba5dc4d882c1756789f69da0f4fb Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 13:09:56 +0200 Subject: [PATCH 003/710] spin up nodes as pods not deployments --- src/warnet/cli/network.py | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 094c74e21..b64ff4109 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -166,31 +166,24 @@ def create_node_deployment(node: int, data: dict) -> dict: bitcoin_config = data.get("bitcoin_config", "") return { - "apiVersion": "apps/v1", - "kind": "Deployment", + "apiVersion": "v1", + "kind": "Pod", "metadata": { "name": f"warnet-node-{node}", "namespace": "warnet", "labels": {"app": "warnet", "node": str(node)}, }, "spec": { - "replicas": 1, - "selector": {"matchLabels": {"app": "warnet", "node": str(node)}}, - "template": { - "metadata": {"labels": {"app": "warnet", "node": str(node)}}, - "spec": { - "containers": [ - { - "name": "bitcoin", - "image": image, - "env": [ - {"name": "BITCOIN_VERSION", "value": version}, - {"name": "BITCOIN_CONFIG", "value": bitcoin_config}, - ], - } - ] - }, - }, + "containers": [ + { + "name": "bitcoin", + "image": image, + "env": [ + {"name": "BITCOIN_VERSION", "value": version}, + {"name": "BITCOIN_CONFIG", "value": bitcoin_config}, + ], + } + ] }, } From 30b8d58153d274c0a68ce68029c31939c36c1b35 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 13:25:47 +0200 Subject: [PATCH 004/710] add logging to network.start --- src/warnet/cli/main.py | 2 - src/warnet/cli/network.py | 124 ++++++++++++++++++++++++++++++++++++-- src/warnet/utils.py | 1 + 3 files changed, 119 insertions(+), 8 deletions(-) diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index 3aa7e8dce..aad43ba35 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -6,7 +6,6 @@ from rich import print as richprint from .bitcoin import bitcoin -from .cluster import cluster from .graph import graph from .image import image from .ln import ln @@ -22,7 +21,6 @@ def cli(): cli.add_command(bitcoin) -cli.add_command(cluster) cli.add_command(graph) cli.add_command(image) cli.add_command(ln) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index b64ff4109..986b9b3f8 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -1,5 +1,5 @@ -import json import tempfile +import xml.etree.ElementTree as ET from importlib.resources import files from pathlib import Path @@ -34,9 +34,9 @@ def set_kubectl_context(namespace: str): @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) -@click.option("--force", default=False, is_flag=True, type=bool) @click.option("--network", default="warnet", show_default=True) -def start(graph_file: Path, force: bool, network: str): +@click.option("--logging/--no-logging", default=False) +def start(graph_file: Path, logging: bool, network: str): """ Start a warnet with topology loaded from a into [network] """ @@ -76,6 +76,13 @@ def start(graph_file: Path, force: bool, network: str): print( "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." ) + + if logging: + helm_result = setup_logging_helm() + if helm_result: + print("Helm charts installed successfully.") + else: + print("Failed to install Helm charts.") else: print(f"Failed to start warnet '{network}'.") finally: @@ -92,6 +99,9 @@ def down(network: str): # Delete the namespace command = f"kubectl delete namespace {network}" result = run_command(command, stream_output=True) + # TODO: Fix this + command = "kubectl delete namespace warnet-logging" + result = run_command(command, stream_output=True) if result: print(f"Warnet '{network}' has been successfully brought down and the namespace deleted.") @@ -145,6 +155,11 @@ def generate_kubernetes_yaml(graph: nx.Graph) -> list: kubernetes_objects.append(namespace) for node, data in graph.nodes(data=True): + # Create a ConfigMap for each node + config = generate_node_config(node, data) + config_map = create_config_map(node, config) + kubernetes_objects.append(config_map) + # Create a deployment for each node deployment = create_node_deployment(node, data) kubernetes_objects.append(deployment) @@ -163,7 +178,6 @@ def create_namespace() -> dict: def create_node_deployment(node: int, data: dict) -> dict: image = data.get("image", "bitcoindevproject/bitcoin:27.0") version = data.get("version", "27.0") - bitcoin_config = data.get("bitcoin_config", "") return { "apiVersion": "v1", @@ -180,10 +194,17 @@ def create_node_deployment(node: int, data: dict) -> dict: "image": image, "env": [ {"name": "BITCOIN_VERSION", "value": version}, - {"name": "BITCOIN_CONFIG", "value": bitcoin_config}, + ], + "volumeMounts": [ + { + "name": "config", + "mountPath": "/root/.bitcoin/bitcoin.conf", + "subPath": "bitcoin.conf", + } ], } - ] + ], + "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-node-{node}"}}], }, } @@ -198,3 +219,94 @@ def create_node_service(node: int) -> dict: "ports": [{"port": 8333, "targetPort": 8333}], }, } + + +def setup_logging_helm(): + """ + Run the required Helm commands for setting up Grafana, Prometheus, and Loki. + """ + helm_commands = [ + "helm repo add grafana https://grafana.github.io/helm-charts", + "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", + "helm repo update", + f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS}/loki_values.yaml loki grafana/loki --version 5.47.2", + "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", + "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", + f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS}/grafana_values.yaml", + ] + + for command in helm_commands: + result = run_command(command, stream_output=True) + if not result: + print(f"Failed to run Helm command: {command}") + return False + return True + + +@network.command() +@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path(exists=True)) +def connect(graph_file: Path): + """ + Connect nodes based on the edges defined in the graph file. + """ + # Parse the GraphML file + tree = ET.parse(graph_file) + root = tree.getroot() + + # Find all edge elements + edges = root.findall(".//{http://graphml.graphdrawing.org/xmlns}edge") + + for edge in edges: + source = edge.get("source") + target = edge.get("target") + + # Construct the kubectl command + command = f"kubectl exec -it warnet-node-{source} -- bitcoin-cli -rpcuser=user -rpcpassword=password addnode warnet-node-{target}-service:8333 add" + + print(f"Connecting node {source} to node {target}") + result = run_command(command, stream_output=True) + + if result: + print(f"Successfully connected node {source} to node {target}") + else: + print(f"Failed to connect node {source} to node {target}") + + print("All connections attempted.") + + +def generate_node_config(node: int, data: dict) -> str: + base_config = """ +regtest=1 +checkmempool=0 +acceptnonstdtxn=1 +debuglogfile=0 +logips=1 +logtimemicros=1 +capturemessages=1 +fallbackfee=0.00001000 +listen=1 + +[regtest] +rpcuser=user +rpcpassword=password +rpcport=18443 +rpcallowip=0.0.0.0/0 +rpcbind=0.0.0.0 + +zmqpubrawblock=tcp://0.0.0.0:28332 +zmqpubrawtx=tcp://0.0.0.0:28333 +""" + node_specific_config = data.get("bitcoin_config", "") + return f"{base_config}\n{node_specific_config}" + + +def create_config_map(node: int, config: str) -> dict: + return { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": f"bitcoin-config-node-{node}", + "namespace": "warnet", + }, + "data": {"bitcoin.conf": config}, + } diff --git a/src/warnet/utils.py b/src/warnet/utils.py index 23dad566b..c9d43f616 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -435,6 +435,7 @@ def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_ve graph.nodes[node]["build_args"] = "" graph.nodes[node]["exporter"] = False graph.nodes[node]["collect_logs"] = False + graph.nodes[node]["resources"] = None convert_unsupported_attributes(graph) return graph From 493c575bf9f880a8d1ddff03af1bd3e1501c34cb Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 17:30:06 +0200 Subject: [PATCH 005/710] graph: use comma separated config lines in graph --- resources/graphs/default.graphml | 24 ++++++++++++------------ src/warnet/cli/network.py | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/resources/graphs/default.graphml b/resources/graphs/default.graphml index ce84579df..ec591b000 100644 --- a/resources/graphs/default.graphml +++ b/resources/graphs/default.graphml @@ -18,60 +18,60 @@ 27.0 - -uacomment=w0 + uacomment=w0 true true 27.0 - -uacomment=w1 + uacomment=w1 true true bitcoindevproject/bitcoin:26.0 - -uacomment=w2 -debug=mempool + uacomment=w2,debug=mempool true true 27.0 - -uacomment=w3 + uacomment=w3 true 27.0 - -uacomment=w4 + uacomment=w4 true 27.0 - -uacomment=w5 + uacomment=w5 true 27.0 - -uacomment=w6 + uacomment=w6 27.0 - -uacomment=w7 + uacomment=w7 27.0 - -uacomment=w8 + uacomment=w8 27.0 - -uacomment=w9 + uacomment=w9 27.0 - -uacomment=w10 + uacomment=w10 27.0 - -uacomment=w11 + uacomment=w11 diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 986b9b3f8..102ce3692 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -297,7 +297,7 @@ def generate_node_config(node: int, data: dict) -> str: zmqpubrawtx=tcp://0.0.0.0:28333 """ node_specific_config = data.get("bitcoin_config", "") - return f"{base_config}\n{node_specific_config}" + return f"{base_config}\n{node_specific_config.replace(",", "\n")}" def create_config_map(node: int, config: str) -> dict: From 7e336b6339809bf5a92f4741186989a052424f86 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 17:32:15 +0200 Subject: [PATCH 006/710] tidy logging install messages --- src/warnet/cli/network.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 102ce3692..600d7c0b4 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -76,10 +76,17 @@ def start(graph_file: Path, logging: bool, network: str): print( "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." ) +<<<<<<< HEAD if logging: helm_result = setup_logging_helm() if helm_result: +======= + if not logging: + print("Skipping install of logging charts") + else: + if setup_logging_helm(): +>>>>>>> c109e2a (tidy logging install messages) print("Helm charts installed successfully.") else: print("Failed to install Helm charts.") From 5ac375697086b698fdf69d125fa3a747f686f69d Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 14:04:50 +0200 Subject: [PATCH 007/710] reorg --- {src/warnet => old}/server.py | 0 pyproject.toml | 1 - src/.DS_Store | Bin 0 -> 6148 bytes src/warnet/backend/__init__.py | 0 src/warnet/backend/kubernetes_backend.py | 885 ----------------------- src/warnet/cli/graph.py | 3 +- src/warnet/cli/main.py | 8 +- src/warnet/cli/network.py | 249 +++---- src/warnet/cli/rpc.py | 3 +- src/warnet/cli/util.py | 180 +++++ src/warnet/{ => cli}/utils.py | 1 + src/warnet/cln.py | 198 ----- src/warnet/lnchannel.py | 148 ---- src/warnet/lnd.py | 191 ----- src/warnet/lnnode.py | 99 --- src/warnet/services.py | 36 - src/warnet/status.py | 9 - src/warnet/tank.py | 198 ----- src/warnet/warnet.py | 283 -------- 19 files changed, 297 insertions(+), 2195 deletions(-) rename {src/warnet => old}/server.py (100%) create mode 100644 src/.DS_Store delete mode 100644 src/warnet/backend/__init__.py delete mode 100644 src/warnet/backend/kubernetes_backend.py rename src/warnet/{ => cli}/utils.py (99%) delete mode 100644 src/warnet/cln.py delete mode 100644 src/warnet/lnchannel.py delete mode 100644 src/warnet/lnd.py delete mode 100644 src/warnet/lnnode.py delete mode 100644 src/warnet/services.py delete mode 100644 src/warnet/status.py delete mode 100644 src/warnet/tank.py delete mode 100644 src/warnet/warnet.py diff --git a/src/warnet/server.py b/old/server.py similarity index 100% rename from src/warnet/server.py rename to old/server.py diff --git a/pyproject.toml b/pyproject.toml index ef349b802..5485098a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,6 @@ classifiers = [ dynamic = ["dependencies"] [project.scripts] -warnet = "warnet.server:run_server" warcli = "warnet.cli.main:cli" [project.urls] diff --git a/src/.DS_Store b/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..de7b43e8b97cfee2d79f475b5fdeb8cabe270ba2 GIT binary patch literal 6148 zcmeHK%}T>S5Z-NTyQK&@sCW!`E!g58;w8lT0!H+pQWFw17_%i!&7l->)EDwmd>&_Z zH-}*GC}L+|_nV!c-OLBsAI2DW7vY#Oi!mmkA#zk21kH`EmI+4WYL1BIK{1~Ni3}Ph z`imy~_AV=!&oY*>`uBeXvm~D9qtPdCwL2T#p6H2zxc8o9;pKk5n7e*7(vM0O`L|ZaOENm=PK9J4$&9=skO6Qo*W$y?eS`8Ywh&# zV72Ot?Y;f8%jt9SlFBztBL~KnY#OZK9h6!{uU?iWGJOPll~ctM5(C5lF+dEgBLn6% z5QB9ro@yorh=Cs&!2Q97hUge9HL9%xI=nukzk`SZI=&?kZG(=%QX_akxK0JssoXp< zxK0PVZQ>k*rAD33xLO(JF)LS(7p_(ZyRE_*cQjH@3=ji#2HJXP;rV|7zf9vJzn(%P zVt^R None: - # assumes the warnet rpc server is always - # running inside a k8s cluster as a statefulset - config.load_incluster_config() - self.client = client.CoreV1Api() - self.dynamic_client = DynamicClient(client.ApiClient()) - self.namespace = "warnet" - self.logs_pod = logs_pod - self.network_name = network_name - self.log = logger - - def build(self) -> bool: - # TODO: just return true for now, this is so we can be running either docker or k8s as a backend - # on the same branch - return True - - def up(self, warnet) -> bool: - self.deploy_pods(warnet) - return True - - def down(self, warnet) -> bool: - """ - Bring an existing network down. - e.g. `k delete -f warnet-tanks.yaml` - """ - - for tank in warnet.tanks: - self.client.delete_namespaced_pod( - self.get_pod_name(tank.index, ServiceType.BITCOIN), self.namespace - ) - self.client.delete_namespaced_service( - self.get_service_name(tank.index, ServiceType.BITCOIN), self.namespace - ) - if tank.lnnode: - self.client.delete_namespaced_pod( - self.get_pod_name(tank.index, ServiceType.LIGHTNING), self.namespace - ) - self.client.delete_namespaced_service( - self.get_service_name(tank.index, ServiceType.LIGHTNING), self.namespace - ) - - self.remove_prometheus_service_monitors(warnet.tanks) - - for service_name in warnet.services: - try: - self.client.delete_namespaced_pod( - self.get_service_pod_name(SERVICES[service_name]["container_name_suffix"]), - self.namespace, - ) - self.client.delete_namespaced_service( - self.get_service_service_name(SERVICES[service_name]["container_name_suffix"]), - self.namespace, - ) - except Exception as e: - self.log.error(f"Could not delete service: {service_name}:\n{e}") - - return True - - def get_file(self, tank_index: int, service: ServiceType, file_path: str): - """ - Read a file from inside a container - """ - pod_name = self.get_pod_name(tank_index, service) - exec_command = ["sh", "-c", f'cat "{file_path}" | base64'] - - resp = stream( - self.client.connect_get_namespaced_pod_exec, - pod_name, - self.namespace, - command=exec_command, - stderr=True, - stdin=True, - stdout=True, - tty=False, - _preload_content=False, - container=BITCOIN_CONTAINER_NAME - if service == ServiceType.BITCOIN - else LN_CONTAINER_NAME, - ) - - base64_encoded_data = "" - while resp.is_open(): - resp.update(timeout=1) - if resp.peek_stdout(): - base64_encoded_data += resp.read_stdout() - if resp.peek_stderr(): - stderr_output = resp.read_stderr() - logger.error(f"STDERR: {stderr_output}") - raise Exception(f"Problem copying file from pod: {stderr_output}") - - decoded_bytes = base64.b64decode(base64_encoded_data) - return decoded_bytes - - def get_service_pod_name(self, suffix: str) -> str: - return f"{self.network_name}-{suffix}" - - def get_service_service_name(self, suffix: str) -> str: - return f"{self.network_name}-{suffix}-service" - - def get_pod_name(self, tank_index: int, type: ServiceType) -> str: - if type == ServiceType.LIGHTNING or type == ServiceType.CIRCUITBREAKER: - return f"{self.network_name}-{POD_PREFIX}-ln-{tank_index:06d}" - return f"{self.network_name}-{POD_PREFIX}-{tank_index:06d}" - - def get_service_name(self, tank_index: int, type: ServiceType) -> str: - return f"{self.get_pod_name(tank_index, type)}-service" - - def get_pod(self, pod_name: str) -> V1Pod | None: - try: - return cast( - V1Pod, self.client.read_namespaced_pod(name=pod_name, namespace=self.namespace) - ) - except ApiException as e: - if e.status == 404: - return None - - def get_service(self, service_name: str) -> V1Service | None: - try: - return cast( - V1Service, - self.client.read_namespaced_service(name=service_name, namespace=self.namespace), - ) - except ApiException as e: - if e.status == 404: - return None - - # We could enhance this by checking the pod status as well - # The following pod phases are available: Pending, Running, Succeeded, Failed, Unknown - # For example not able to pull image will be a phase of Pending, but the container status will be ErrImagePull - def get_status(self, tank_index: int, service: ServiceType) -> RunningStatus: - pod_name = self.get_pod_name(tank_index, service) - pod = self.get_pod(pod_name) - # Possible states: - # 1. pod not found? - # -> STOPPED - # 2. pod phase Succeeded? - # -> STOPPED - # 3. pod phase Failed? - # -> FAILED - # 4. pod phase Unknown? - # -> UNKNOWN - # Pod phase is now "Running" or "Pending" - # -> otherwise we need a bug fix, return UNKNOWN - # - # The pod is ready if all containers are ready. - # 5. Pod not ready? - # -> PENDING - # 6. Pod ready? - # -> RUNNING - # - # Note: we don't know anything about deleted pods so we can't return a status for them. - # TODO: we could use a kubernetes job to keep the result 🤔 - - if pod is None: - return RunningStatus.STOPPED - - assert pod.status, "Could not get pod status" - assert pod.status.phase, "Could not get pod status.phase" - if pod.status.phase == "Succeeded": - return RunningStatus.STOPPED - if pod.status.phase == "Failed": - return RunningStatus.FAILED - if pod.status.phase == "Unknown": - return RunningStatus.UNKNOWN - if pod.status.phase == "Pending": - return RunningStatus.PENDING - - assert pod.status.phase in ("Running", "Pending"), f"Unknown pod phase {pod.status.phase}" - - # a pod is ready if all containers are ready - ready = True - for container in pod.status.container_statuses: - if container.ready is not True: - ready = False - break - return RunningStatus.RUNNING if ready else RunningStatus.PENDING - - def exec_run(self, tank_index: int, service: ServiceType, cmd: str): - pod_name = self.get_pod_name(tank_index, service) - exec_cmd = ["/bin/sh", "-c", f"{cmd}"] - self.log.debug(f"Running {exec_cmd=:} on {tank_index=:}") - if service == ServiceType.BITCOIN: - container = BITCOIN_CONTAINER_NAME - if service == ServiceType.LIGHTNING: - container = LN_CONTAINER_NAME - if service == ServiceType.CIRCUITBREAKER: - container = LN_CB_CONTAINER_NAME - result = stream( - self.client.connect_get_namespaced_pod_exec, - pod_name, - self.namespace, - container=container, - command=exec_cmd, - stderr=True, - stdin=False, - stdout=True, - tty=False, - # Avoid preloading the content to keep JSON intact - _preload_content=False, - ) - # TODO: stream result is just a string, so there is no error code to check - # ideally, we use a method where we can check for an error code, otherwise we will - # need to check for errors in the string (meh) - # - # if result.exit_code != 0: - # raise Exception( - # f"Command failed with exit code {result.exit_code}: {result.output.decode('utf-8')}" - # ) - result.run_forever() - result = result.read_all() - return result - - def get_bitcoin_debug_log(self, tank_index: int): - pod_name = self.get_pod_name(tank_index, ServiceType.BITCOIN) - logs = self.client.read_namespaced_pod_log( - name=pod_name, - namespace=self.namespace, - container=BITCOIN_CONTAINER_NAME, - ) - return logs - - def ln_cli(self, tank: Tank, command: list[str]): - if tank.lnnode is None: - raise Exception("No LN node configured for tank") - cmd = tank.lnnode.generate_cli_command(command) - self.log.debug(f"Running lncli {cmd=:} on {tank.index=:}") - return self.exec_run(tank.index, ServiceType.LIGHTNING, cmd) - - def ln_pub_key(self, tank) -> str: - if tank.lnnode is None: - raise Exception("No LN node configured for tank") - self.log.debug(f"Getting pub key for tank {tank.index}") - return tank.lnnode.get_pub_key() - - def get_bitcoin_cli(self, tank: Tank, method: str, params=None): - if params: - cmd = f"bitcoin-cli -regtest -rpcuser={tank.rpc_user} -rpcport={tank.rpc_port} -rpcpassword={tank.rpc_password} {method} {' '.join(map(str, params))}" - else: - cmd = f"bitcoin-cli -regtest -rpcuser={tank.rpc_user} -rpcport={tank.rpc_port} -rpcpassword={tank.rpc_password} {method}" - self.log.debug(f"Running bitcoin-cli {cmd=:} on {tank.index=:}") - return self.exec_run(tank.index, ServiceType.BITCOIN, cmd) - - def get_messages( - self, - a_index: int, - b_index: int, - bitcoin_network: str = "regtest", - ): - b_pod = self.get_pod(self.get_pod_name(b_index, ServiceType.BITCOIN)) - b_service = self.get_service(self.get_service_name(b_index, ServiceType.BITCOIN)) - subdir = "/" if bitcoin_network == "main" else f"{bitcoin_network}/" - base_dir = f"/root/.bitcoin/{subdir}message_capture" - cmd = f"ls {base_dir}" - self.log.debug(f"Running {cmd=:} on {a_index=:}") - dirs = self.exec_run( - a_index, - ServiceType.BITCOIN, - cmd, - ) - dirs = dirs.splitlines() - self.log.debug(f"Got dirs: {dirs}") - messages = [] - - for dir_name in dirs: - if b_pod.status.pod_ip in dir_name or b_service.spec.cluster_ip in dir_name: - for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: - # Fetch the file contents from the container - file_path = f"{base_dir}/{dir_name}/{file}" - blob = self.get_file(a_index, ServiceType.BITCOIN, f"{file_path}") - # Parse the blob - json = parse_raw_messages(blob, outbound) - messages = messages + json - - messages.sort(key=lambda x: x["time"]) - return messages - - def logs_grep(self, pattern: str, network: str, k8s_timestamps=False, no_sort=False): - compiled_pattern = re.compile(pattern) - matching_logs = [] - pods = self.client.list_namespaced_pod(self.namespace) - relevant_pods = [pod for pod in pods.items if "warnet" in pod.metadata.name] - - for pod in relevant_pods: - try: - log_stream = self.client.read_namespaced_pod_log( - name=pod.metadata.name, - container=BITCOIN_CONTAINER_NAME, - namespace=self.namespace, - timestamps=k8s_timestamps, - _preload_content=False, - ) - for log_entry in log_stream: - log_entry_str = log_entry.decode("utf-8").strip() - if compiled_pattern.search(log_entry_str): - matching_logs.append((log_entry_str, pod.metadata.name)) - except ApiException as e: - print(f"Error fetching logs for pod {pod.metadata.name}: {e}") - - sorted_logs = matching_logs if no_sort else sorted(matching_logs, key=lambda x: x[0]) - # Prepend pod names - formatted_logs = [f"{pod_name}: {log}" for log, pod_name in sorted_logs] - - return "\n".join(formatted_logs) - - def generate_deployment_file(self, warnet): - """ - TODO: implement this - """ - pass - - def create_bitcoind_container(self, tank: Tank) -> client.V1Container: - self.log.debug(f"Creating bitcoind container for tank {tank.index}") - container_name = BITCOIN_CONTAINER_NAME - container_image = None - - # Prebuilt image - if tank.image: - container_image = tank.image - # On-demand built image - elif "/" and "#" in tank.version: - # We don't have docker installed on the RPC server, where this code will be run from, - # and it's currently unclear to me if having the RPC pod build images is a good idea. - # Don't support this for now in CI by disabling in the workflow. - - # This can be re-enabled by enabling in the workflow file and installing docker and - # docker-buildx on the rpc server image. - - # it's a git branch, building step is necessary - repo, branch = tank.version.split("#") - build_image( - repo, - branch, - LOCAL_REGISTRY, - branch, - tank.DEFAULT_BUILD_ARGS + tank.build_args, - arches="amd64", - ) - # Prebuilt major version - else: - container_image = f"{DOCKER_REGISTRY_CORE}:{tank.version}" - - peers = [ - self.get_service_name(dst_index, ServiceType.BITCOIN) for dst_index in tank.init_peers - ] - bitcoind_options = tank.get_bitcoin_conf(peers) - container_env = [client.V1EnvVar(name="BITCOIN_ARGS", value=bitcoind_options)] - - bitcoind_container = client.V1Container( - name=container_name, - image=container_image, - env=container_env, - liveness_probe=client.V1Probe( - failure_threshold=3, - initial_delay_seconds=5, - period_seconds=5, - timeout_seconds=1, - _exec=client.V1ExecAction(command=["pidof", "bitcoind"]), - ), - readiness_probe=client.V1Probe( - failure_threshold=1, - initial_delay_seconds=0, - period_seconds=1, - timeout_seconds=1, - tcp_socket=client.V1TCPSocketAction(port=tank.rpc_port), - ), - security_context=client.V1SecurityContext( - privileged=True, - capabilities=client.V1Capabilities(add=["NET_ADMIN", "NET_RAW"]), - ), - ) - self.log.debug( - f"Created bitcoind container for tank {tank.index} using {bitcoind_options=:}" - ) - return bitcoind_container - - def create_prometheus_container(self, tank) -> client.V1Container: - env = [ - client.V1EnvVar(name="BITCOIN_RPC_HOST", value="127.0.0.1"), - client.V1EnvVar(name="BITCOIN_RPC_PORT", value=str(tank.rpc_port)), - client.V1EnvVar(name="BITCOIN_RPC_USER", value=tank.rpc_user), - client.V1EnvVar(name="BITCOIN_RPC_PASSWORD", value=tank.rpc_password), - ] - if tank.metrics is not None: - env.append( - client.V1EnvVar(name="METRICS", value=tank.metrics), - ) - return client.V1Container( - name="prometheus", image="bitcoindevproject/bitcoin-exporter:latest", env=env - ) - - def check_logging_crds_installed(self): - logging_crd_name = "servicemonitors.monitoring.coreos.com" - api = client.ApiextensionsV1Api() - crds = api.list_custom_resource_definition() - return bool(any(crd.metadata.name == logging_crd_name for crd in crds.items)) - - def apply_prometheus_service_monitors(self, tanks): - for tank in tanks: - if not tank.exporter: - continue - - tank_name = self.get_pod_name(tank.index, ServiceType.BITCOIN) - - service_monitor = { - "apiVersion": "monitoring.coreos.com/v1", - "kind": "ServiceMonitor", - "metadata": { - "name": tank_name, - "namespace": MAIN_NAMESPACE, - "labels": { - "app.kubernetes.io/name": "bitcoind-metrics", - "release": "prometheus", - }, - }, - "spec": { - "endpoints": [{"port": "prometheus-metrics"}], - "selector": {"matchLabels": {"app": tank_name}}, - }, - } - # Create the custom resource using the dynamic client - sc_crd = self.dynamic_client.resources.get( - api_version="monitoring.coreos.com/v1", kind="ServiceMonitor" - ) - sc_crd.create(body=service_monitor, namespace=MAIN_NAMESPACE) - - # attempts to delete the service monitors whether they exist or not - def remove_prometheus_service_monitors(self, tanks): - for tank in tanks: - try: - self.dynamic_client.resources.get( - api_version="monitoring.coreos.com/v1", kind="ServiceMonitor" - ).delete( - name=f"warnet-tank-{tank.index:06d}", - namespace=MAIN_NAMESPACE, - ) - except (ResourceNotFoundError, NotFoundError): - continue - - def get_lnnode_hostname(self, index: int) -> str: - return f"{self.get_service_name(index, ServiceType.LIGHTNING)}.{self.namespace}" - - def create_ln_container(self, tank, bitcoind_service_name, volume_mounts) -> client.V1Container: - # These args are appended to the Dockerfile `ENTRYPOINT ["lnd"]` - bitcoind_rpc_host = f"{bitcoind_service_name}.{self.namespace}" - lightning_dns = self.get_lnnode_hostname(tank.index) - args = tank.lnnode.get_conf(lightning_dns, bitcoind_rpc_host) - self.log.debug(f"Creating lightning container for tank {tank.index} using {args=:}") - lightning_ready_probe = "" - if tank.lnnode.impl == "lnd": - lightning_ready_probe = "lncli --network=regtest getinfo" - elif tank.lnnode.impl == "cln": - lightning_ready_probe = "lightning-cli --network=regtest getinfo" - else: - raise Exception( - f"Lightning node implementation {tank.lnnode.impl} for tank {tank.index} not supported" - ) - lightning_container = client.V1Container( - name=LN_CONTAINER_NAME, - image=tank.lnnode.image, - args=args.split(" "), - env=[ - client.V1EnvVar(name="LN_IMPL", value=tank.lnnode.impl), - ], - readiness_probe=client.V1Probe( - failure_threshold=1, - success_threshold=3, - initial_delay_seconds=10, - period_seconds=2, - timeout_seconds=2, - _exec=client.V1ExecAction(command=["/bin/sh", "-c", lightning_ready_probe]), - ), - security_context=client.V1SecurityContext( - privileged=True, - capabilities=client.V1Capabilities(add=["NET_ADMIN", "NET_RAW"]), - ), - volume_mounts=volume_mounts, - ) - self.log.debug(f"Created lightning container for tank {tank.index}") - return lightning_container - - def create_circuitbreaker_container(self, tank, volume_mounts) -> client.V1Container: - self.log.debug(f"Creating circuitbreaker container for tank {tank.index}") - cb_container = client.V1Container( - name=LN_CB_CONTAINER_NAME, - image=tank.lnnode.cb, - args=[ - "--network=regtest", - f"--rpcserver=127.0.0.1:{tank.lnnode.rpc_port}", - f"--tlscertpath={LND_MOUNT_PATH}/tls.cert", - f"--macaroonpath={LND_MOUNT_PATH}/data/chain/bitcoin/regtest/admin.macaroon", - ], - security_context=client.V1SecurityContext( - privileged=True, - capabilities=client.V1Capabilities(add=["NET_ADMIN", "NET_RAW"]), - ), - volume_mounts=volume_mounts, - ) - self.log.debug(f"Created circuitbreaker container for tank {tank.index}") - return cb_container - - def create_pod_object( - self, - tank: Tank, - containers: list[client.V1Container], - volumes: list[client.V1Volume], - name: str, - ) -> client.V1Pod: - # Create and return a Pod object - # TODO: pass a custom namespace , e.g. different warnet sims can be deployed into diff namespaces - - return client.V1Pod( - api_version="v1", - kind="Pod", - metadata=client.V1ObjectMeta( - name=name, - namespace=self.namespace, - labels={ - "app": name, - "network": tank.warnet.network_name, - }, - ), - spec=client.V1PodSpec( - # Might need some more thinking on the pod restart policy, setting to Never for now - # This means if a node has a problem it dies - restart_policy="OnFailure", - containers=containers, - volumes=volumes, - ), - ) - - def get_tank_ipv4(self, index: int) -> str | None: - pod_name = self.get_pod_name(index, ServiceType.BITCOIN) - pod = self.get_pod(pod_name) - if pod: - return pod.status.pod_ip - else: - return None - - def get_tank_dns_addr(self, index: int) -> str | None: - service_name = self.get_service_name(index, ServiceType.BITCOIN) - try: - self.client.read_namespaced_service(name=service_name, namespace=self.namespace) - except ApiValueError as e: - self.log.info(ApiValueError(f"dns addr request for {service_name} raised {str(e)}")) - return None - return service_name - - def get_tank_ip_addr(self, index: int) -> str | None: - service_name = self.get_service_name(index, ServiceType.BITCOIN) - try: - endpoints = self.client.read_namespaced_endpoints( - name=service_name, namespace=self.namespace - ) - except ApiValueError as e: - self.log.info(f"ip addr request for {service_name} raised {str(e)}") - return None - - if len(endpoints.subsets) == 0: - raise Exception(f"{service_name}'s endpoint does not have an initial subset") - initial_subset = endpoints.subsets[0] - - if len(initial_subset.addresses) == 0: - raise Exception(f"{service_name}'s initial subset does not have an initial address") - initial_address = initial_subset.addresses[0] - - return str(initial_address.ip) - - def create_bitcoind_service(self, tank) -> client.V1Service: - service_name = self.get_service_name(tank.index, ServiceType.BITCOIN) - self.log.debug(f"Creating bitcoind service {service_name} for tank {tank.index}") - service = client.V1Service( - api_version="v1", - kind="Service", - metadata=client.V1ObjectMeta( - name=service_name, - labels={ - "app": self.get_pod_name(tank.index, ServiceType.BITCOIN), - "network": tank.warnet.network_name, - }, - ), - spec=client.V1ServiceSpec( - selector={"app": self.get_pod_name(tank.index, ServiceType.BITCOIN)}, - publish_not_ready_addresses=True, - ports=[ - client.V1ServicePort(port=18444, target_port=18444, name="p2p"), - client.V1ServicePort(port=tank.rpc_port, target_port=tank.rpc_port, name="rpc"), - client.V1ServicePort( - port=tank.zmqblockport, target_port=tank.zmqblockport, name="zmqblock" - ), - client.V1ServicePort( - port=tank.zmqtxport, target_port=tank.zmqtxport, name="zmqtx" - ), - client.V1ServicePort( - port=PROMETHEUS_METRICS_PORT, - target_port=PROMETHEUS_METRICS_PORT, - name="prometheus-metrics", - ), - ], - ), - ) - self.log.debug(f"Created bitcoind service {service_name} for tank {tank.index}") - return service - - def create_lightning_service(self, tank) -> client.V1Service: - service_name = self.get_service_name(tank.index, ServiceType.LIGHTNING) - self.log.debug(f"Creating lightning service {service_name} for tank {tank.index}") - service = client.V1Service( - api_version="v1", - kind="Service", - metadata=client.V1ObjectMeta( - name=service_name, - labels={ - "app": self.get_pod_name(tank.index, ServiceType.LIGHTNING), - "network": tank.warnet.network_name, - }, - ), - spec=client.V1ServiceSpec( - selector={"app": self.get_pod_name(tank.index, ServiceType.LIGHTNING)}, - cluster_ip="None", - ports=[ - client.V1ServicePort( - port=tank.lnnode.rpc_port, target_port=tank.lnnode.rpc_port, name="rpc" - ), - ], - publish_not_ready_addresses=True, - ), - ) - self.log.debug(f"Created lightning service {service_name} for tank {tank.index}") - return service - - def deploy_pods(self, warnet): - # TODO: this is pretty hack right now, ideally it should mirror - # a similar workflow to the docker backend: - # 1. read graph file, turn graph file into k8s resources, deploy the resources - tank_resource_files = [] - self.log.debug("Deploying pods") - for tank in warnet.tanks: - # Create and deploy bitcoind pod and service - bitcoind_container = self.create_bitcoind_container(tank) - bitcoind_pod = self.create_pod_object( - tank, [bitcoind_container], [], self.get_pod_name(tank.index, ServiceType.BITCOIN) - ) - - if tank.exporter and self.check_logging_crds_installed(): - prometheus_container = self.create_prometheus_container(tank) - bitcoind_pod.spec.containers.append(prometheus_container) - - bitcoind_service = self.create_bitcoind_service(tank) - self.client.create_namespaced_pod(namespace=self.namespace, body=bitcoind_pod) - # delete the service if it already exists, ignore 404 - try: - self.client.delete_namespaced_service( - name=bitcoind_service.metadata.name, namespace=self.namespace - ) - except ApiException as e: - if e.status != 404: - raise e - self.client.create_namespaced_service(namespace=self.namespace, body=bitcoind_service) - - # Create and deploy a lightning pod - if tank.lnnode: - conts = [] - vols = [] - volume_mounts = [] - if tank.lnnode.cb: - # Create a shared volume between containers in the pod - volume_name = f"ln-cb-data-{tank.index}" - vols.append( - client.V1Volume(name=volume_name, empty_dir=client.V1EmptyDirVolumeSource()) - ) - volume_mounts.append( - client.V1VolumeMount( - name=volume_name, - mount_path=LND_MOUNT_PATH, - ) - ) - # Add circuit breaker container - conts.append(self.create_circuitbreaker_container(tank, volume_mounts)) - # Add lightning container - conts.append( - self.create_ln_container(tank, bitcoind_service.metadata.name, volume_mounts) - ) - # Put it all together in a pod - lnd_pod = self.create_pod_object( - tank, conts, vols, self.get_pod_name(tank.index, ServiceType.LIGHTNING) - ) - self.client.create_namespaced_pod(namespace=self.namespace, body=lnd_pod) - # Create service for the pod - lightning_service = self.create_lightning_service(tank) - try: - self.client.delete_namespaced_service( - name=lightning_service.metadata.name, namespace=self.namespace - ) - except ApiException as e: - if e.status != 404: - raise e - self.client.create_namespaced_service( - namespace=self.namespace, body=lightning_service - ) - - # add metrics scraping for tanks configured to export metrics - if self.check_logging_crds_installed(): - self.apply_prometheus_service_monitors(warnet.tanks) - - for service_name in warnet.services: - try: - self.service_from_json(SERVICES[service_name]) - except Exception as e: - self.log.error(f"Error starting service: {service_name}\n{e}") - - self.log.debug("Containers and services created. Configuring IP addresses") - # now that the pods have had a second to create, - # get the ips and set them on the tanks - - # TODO: this is really hacky, should probably just update the generate_ipv4 function at some point - # by moving it into the base class - for tank in warnet.tanks: - pod_ip = None - while not pod_ip: - pod_name = self.get_pod_name(tank.index, ServiceType.BITCOIN) - pod = self.get_pod(pod_name) - if pod is None or pod.status is None or getattr(pod.status, "pod_ip", None) is None: - self.log.info("Waiting for pod response or pod IP...") - time.sleep(3) - continue - pod_ip = pod.status.pod_ip - - tank._ipv4 = pod_ip - self.log.debug(f"Tank {tank.index} created") - - with open(warnet.config_dir / "warnet-tanks.yaml", "w") as f: - for pod in tank_resource_files: - yaml.dump(pod.to_dict(), f) - f.write("---\n") # separator for multiple resources - self.log.info("Pod definitions saved to warnet-tanks.yaml") - - def wait_for_healthy_tanks(self, warnet, timeout=30): - """ - Wait for healthy status on all bitcoind nodes - """ - pass - - def service_from_json(self, obj): - env = [] - for pair in obj.get("environment", []): - name, value = pair.split("=") - env.append(client.V1EnvVar(name=name, value=value)) - volume_mounts = [] - volumes = [] - for vol in obj.get("config_files", []): - volume_name, mount_path = vol.split(":") - volume_name = volume_name.replace("/", "") - volume_mounts.append(client.V1VolumeMount(name=volume_name, mount_path=mount_path)) - volumes.append( - client.V1Volume(name=volume_name, empty_dir=client.V1EmptyDirVolumeSource()) - ) - - service_container = client.V1Container( - name=self.get_service_pod_name(obj["container_name_suffix"]), - image=obj["image"], - env=env, - security_context=client.V1SecurityContext( - privileged=True, - capabilities=client.V1Capabilities(add=["NET_ADMIN", "NET_RAW"]), - ), - volume_mounts=volume_mounts, - ) - sidecar_container = client.V1Container( - name="sidecar", - image="pinheadmz/sidecar:latest", - volume_mounts=volume_mounts, - ports=[client.V1ContainerPort(container_port=22)], - ) - service_pod = client.V1Pod( - api_version="v1", - kind="Pod", - metadata=client.V1ObjectMeta( - name=self.get_service_pod_name(obj["container_name_suffix"]), - namespace=self.namespace, - labels={ - "app": self.get_service_pod_name(obj["container_name_suffix"]), - "network": self.network_name, - }, - ), - spec=client.V1PodSpec( - restart_policy="OnFailure", - containers=[service_container, sidecar_container], - volumes=volumes, - ), - ) - - # Do not ever change this variable name. xoxo, --Zip - service_service = client.V1Service( - api_version="v1", - kind="Service", - metadata=client.V1ObjectMeta( - name=self.get_service_service_name(obj["container_name_suffix"]), - labels={ - "app": self.get_service_pod_name(obj["container_name_suffix"]), - "network": self.network_name, - }, - ), - spec=client.V1ServiceSpec( - selector={"app": self.get_service_pod_name(obj["container_name_suffix"])}, - publish_not_ready_addresses=True, - ports=[ - client.V1ServicePort(name="ssh", port=22, target_port=22), - ], - ), - ) - - self.client.create_namespaced_pod(namespace=self.namespace, body=service_pod) - self.client.create_namespaced_service(namespace=self.namespace, body=service_service) - - def write_service_config(self, source_path: str, service_name: str, destination_path: str): - obj = SERVICES[service_name] - container_name = "sidecar" - # Copy the archive from our local drive (Warnet RPC container/pod) - # to the destination service's sidecar container via ssh - self.log.info( - f"Copying local {source_path} to remote {destination_path} for {service_name}" - ) - subprocess.run( - [ - "scp", - "-o", - "StrictHostKeyChecking=accept-new", - source_path, - f"root@{self.get_service_service_name(obj['container_name_suffix'])}.{self.namespace}:/arbitrary_filename.tar", - ] - ) - self.log.info(f"Finished copying tarball for {service_name}, unpacking...") - # Unpack the archive - stream( - self.client.connect_get_namespaced_pod_exec, - self.get_service_pod_name(obj["container_name_suffix"]), - self.namespace, - container=container_name, - command=["/bin/sh", "-c", f"tar -xf /arbitrary_filename.tar -C {destination_path}"], - stderr=True, - stdin=False, - stdout=True, - tty=False, - _preload_content=False, - ) - self.log.info(f"Finished unpacking config data for {service_name} to {destination_path}") diff --git a/src/warnet/cli/graph.py b/src/warnet/cli/graph.py index 45128d603..1e75862e5 100644 --- a/src/warnet/cli/graph.py +++ b/src/warnet/cli/graph.py @@ -5,7 +5,8 @@ import click import networkx as nx from rich import print -from warnet.utils import DEFAULT_TAG, create_cycle_graph, validate_graph_schema + +from .util import DEFAULT_TAG, create_cycle_graph, validate_graph_schema @click.group(name="graph") diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index aad43ba35..de9595559 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -5,10 +5,10 @@ import click from rich import print as richprint -from .bitcoin import bitcoin +# from .bitcoin import bitcoin from .graph import graph from .image import image -from .ln import ln +# from .ln import ln from .network import network from .scenarios import scenarios @@ -20,10 +20,10 @@ def cli(): pass -cli.add_command(bitcoin) +# cli.add_command(bitcoin) cli.add_command(graph) cli.add_command(image) -cli.add_command(ln) +# cli.add_command(ln) cli.add_command(network) cli.add_command(scenarios) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 600d7c0b4..ff104e620 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -19,19 +19,7 @@ def network(): """Network commands""" -def set_kubectl_context(namespace: str): - """ - Set the default kubectl context to the specified namespace. - """ - command = f"kubectl config set-context --current --namespace={namespace}" - result = run_command(command, stream_output=True) - if result: - print(f"Kubectl context set to namespace: {namespace}") - else: - print(f"Failed to set kubectl context to namespace: {namespace}") - return result - - +# High-level network operations @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) @click.option("--network", default="warnet", show_default=True) @@ -40,60 +28,30 @@ def start(graph_file: Path, logging: bool, network: str): """ Start a warnet with topology loaded from a into [network] """ - # Generate the Kubernetes YAML graph = read_graph_file(graph_file) kubernetes_yaml = generate_kubernetes_yaml(graph) - # Write the YAML to a temporary file with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: yaml.dump_all(kubernetes_yaml, temp_file) temp_file_path = temp_file.name try: - # Deploy base configurations - base_configs = [ - "namespace.yaml", - "rbac-config.yaml", - ] - - for config in base_configs: - command = f"kubectl apply -f {WAR_MANIFESTS}/{config}" - result = run_command(command, stream_output=True) - if not result: - print(f"Failed to apply {config}") - return - - # Apply the YAML using kubectl - command = f"kubectl apply -f {temp_file_path}" - result = run_command(command, stream_output=True) - - if result: + if deploy_base_configurations() and apply_kubernetes_yaml(temp_file_path): print(f"Warnet '{network}' started successfully.") - - # Set kubectl context to the warnet namespace - context_result = set_kubectl_context(network) - if not context_result: + if not set_kubectl_context(network): print( "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." ) -<<<<<<< HEAD - - if logging: - helm_result = setup_logging_helm() - if helm_result: -======= if not logging: print("Skipping install of logging charts") else: if setup_logging_helm(): ->>>>>>> c109e2a (tidy logging install messages) print("Helm charts installed successfully.") else: print("Failed to install Helm charts.") else: print(f"Failed to start warnet '{network}'.") finally: - # Clean up the temporary file Path(temp_file_path).unlink() @@ -103,17 +61,10 @@ def down(network: str): """ Bring down a running warnet named [network] """ - # Delete the namespace - command = f"kubectl delete namespace {network}" - result = run_command(command, stream_output=True) - # TODO: Fix this - command = "kubectl delete namespace warnet-logging" - result = run_command(command, stream_output=True) - - if result: - print(f"Warnet '{network}' has been successfully brought down and the namespace deleted.") + if delete_namespace(network) and delete_namespace("warnet-logging"): + print(f"Warnet '{network}' has been successfully brought down and the namespaces deleted.") else: - print(f"Failed to bring down warnet '{network}' or delete the namespace.") + print(f"Failed to bring down warnet '{network}' or delete the namespaces.") @network.command() @@ -121,12 +72,9 @@ def down(network: str): def logs(follow: bool): """Get Kubernetes logs from the RPC server""" command = "kubectl logs rpc-0" - stream_output = False if follow: command += " --follow" - stream_output = True - - run_command(command, stream_output=stream_output) + run_command(command, stream_output=follow) @network.command() @@ -136,44 +84,52 @@ def generate_yaml(graph_file: Path, output: str): """ Generate a Kubernetes YAML file from a graph file for deploying warnet nodes. """ - # Read and parse the graph file graph = read_graph_file(graph_file) - - # Generate the Kubernetes YAML kubernetes_yaml = generate_kubernetes_yaml(graph) - # Write the YAML to a file with open(output, "w") as f: yaml.dump_all(kubernetes_yaml, f) print(f"Kubernetes YAML file generated: {output}") -def read_graph_file(graph_file: Path) -> nx.Graph: - with open(graph_file) as f: - return nx.parse_graphml(f.read()) +@network.command() +@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path(exists=True)) +def connect(graph_file: Path): + """ + Connect nodes based on the edges defined in the graph file. + """ + tree = ET.parse(graph_file) + root = tree.getroot() + edges = root.findall(".//{http://graphml.graphdrawing.org/xmlns}edge") + for edge in edges: + source = edge.get("source") + target = edge.get("target") + command = f"kubectl exec -it warnet-node-{source} -- bitcoin-cli -rpcuser=user -rpcpassword=password addnode warnet-node-{target}-service:8333 add" -def generate_kubernetes_yaml(graph: nx.Graph) -> list: - kubernetes_objects = [] + print(f"Connecting node {source} to node {target}") + if run_command(command, stream_output=True): + print(f"Successfully connected node {source} to node {target}") + else: + print(f"Failed to connect node {source} to node {target}") - # Add Namespace object - namespace = create_namespace() - kubernetes_objects.append(namespace) + print("All connections attempted.") - for node, data in graph.nodes(data=True): - # Create a ConfigMap for each node - config = generate_node_config(node, data) - config_map = create_config_map(node, config) - kubernetes_objects.append(config_map) - # Create a deployment for each node - deployment = create_node_deployment(node, data) - kubernetes_objects.append(deployment) +# Kubernetes object generation +def generate_kubernetes_yaml(graph: nx.Graph) -> list: + kubernetes_objects = [create_namespace()] - # Create a service for each node - service = create_node_service(node) - kubernetes_objects.append(service) + for node, data in graph.nodes(data=True): + config = generate_node_config(node, data) + kubernetes_objects.extend( + [ + create_config_map(node, config), + create_node_deployment(node, data), + create_node_service(node), + ] + ) return kubernetes_objects @@ -228,57 +184,22 @@ def create_node_service(node: int) -> dict: } -def setup_logging_helm(): - """ - Run the required Helm commands for setting up Grafana, Prometheus, and Loki. - """ - helm_commands = [ - "helm repo add grafana https://grafana.github.io/helm-charts", - "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", - "helm repo update", - f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS}/loki_values.yaml loki grafana/loki --version 5.47.2", - "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", - "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", - f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS}/grafana_values.yaml", - ] - - for command in helm_commands: - result = run_command(command, stream_output=True) - if not result: - print(f"Failed to run Helm command: {command}") - return False - return True - - -@network.command() -@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path(exists=True)) -def connect(graph_file: Path): - """ - Connect nodes based on the edges defined in the graph file. - """ - # Parse the GraphML file - tree = ET.parse(graph_file) - root = tree.getroot() - - # Find all edge elements - edges = root.findall(".//{http://graphml.graphdrawing.org/xmlns}edge") - - for edge in edges: - source = edge.get("source") - target = edge.get("target") - - # Construct the kubectl command - command = f"kubectl exec -it warnet-node-{source} -- bitcoin-cli -rpcuser=user -rpcpassword=password addnode warnet-node-{target}-service:8333 add" +def create_config_map(node: int, config: str) -> dict: + return { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": f"bitcoin-config-node-{node}", + "namespace": "warnet", + }, + "data": {"bitcoin.conf": config}, + } - print(f"Connecting node {source} to node {target}") - result = run_command(command, stream_output=True) - if result: - print(f"Successfully connected node {source} to node {target}") - else: - print(f"Failed to connect node {source} to node {target}") - - print("All connections attempted.") +# Utility functions +def read_graph_file(graph_file: Path) -> nx.Graph: + with open(graph_file) as f: + return nx.parse_graphml(f.read()) def generate_node_config(node: int, data: dict) -> str: @@ -307,13 +228,59 @@ def generate_node_config(node: int, data: dict) -> str: return f"{base_config}\n{node_specific_config.replace(",", "\n")}" -def create_config_map(node: int, config: str) -> dict: - return { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": f"bitcoin-config-node-{node}", - "namespace": "warnet", - }, - "data": {"bitcoin.conf": config}, - } +def set_kubectl_context(namespace: str): + """ + Set the default kubectl context to the specified namespace. + """ + command = f"kubectl config set-context --current --namespace={namespace}" + result = run_command(command, stream_output=True) + if result: + print(f"Kubectl context set to namespace: {namespace}") + else: + print(f"Failed to set kubectl context to namespace: {namespace}") + return result + + +def deploy_base_configurations(): + base_configs = [ + "namespace.yaml", + "rbac-config.yaml", + ] + + for config in base_configs: + command = f"kubectl apply -f {WAR_MANIFESTS}/{config}" + if not run_command(command, stream_output=True): + print(f"Failed to apply {config}") + return False + return True + + +def apply_kubernetes_yaml(yaml_file: str): + command = f"kubectl apply -f {yaml_file}" + return run_command(command, stream_output=True) + + +def delete_namespace(namespace: str): + command = f"kubectl delete namespace {namespace}" + return run_command(command, stream_output=True) + + +def setup_logging_helm(): + """ + Run the required Helm commands for setting up Grafana, Prometheus, and Loki. + """ + helm_commands = [ + "helm repo add grafana https://grafana.github.io/helm-charts", + "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", + "helm repo update", + f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS}/loki_values.yaml loki grafana/loki --version 5.47.2", + "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", + "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", + f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS}/grafana_values.yaml", + ] + + for command in helm_commands: + if not run_command(command, stream_output=True): + print(f"Failed to run Helm command: {command}") + return False + return True diff --git a/src/warnet/cli/rpc.py b/src/warnet/cli/rpc.py index 9380ede0c..4ecd6bada 100644 --- a/src/warnet/cli/rpc.py +++ b/src/warnet/cli/rpc.py @@ -5,7 +5,8 @@ import requests from jsonrpcclient.requests import request from jsonrpcclient.responses import Error, Ok, parse -from warnet.server import WARNET_SERVER_PORT + +WARNET_SERVER_PORT = 9276 class JSONRPCException(Exception): diff --git a/src/warnet/cli/util.py b/src/warnet/cli/util.py index 80db73cd9..718ee6fb9 100644 --- a/src/warnet/cli/util.py +++ b/src/warnet/cli/util.py @@ -1,5 +1,22 @@ +import json +import logging import os +import random import subprocess +from importlib.resources import files +from pathlib import Path + +import networkx as nx +from jsonschema import validate + +logger = logging.getLogger("utils") + +SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] +DEFAULT_TAG = SUPPORTED_TAGS[0] +WEIGHTED_TAGS = [ + tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) +] +SRC_DIR = files("warnet") def run_command(command, stream_output=False, env=None): @@ -40,3 +57,166 @@ def run_command(command, stream_output=False, env=None): return False print(result.stdout) return True + + +def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): + try: + # Use nx.MultiDiGraph() so we get directed edges (source->target) + # and still allow parallel edges (L1 p2p connections + LN channels) + graph = nx.generators.cycle_graph(n, nx.MultiDiGraph()) + except TypeError as e: + msg = f"Failed to create graph: {e}" + logging.error(msg) + return msg + + # Graph is a simply cycle graph with all nodes connected in a loop, including both ends. + # Ensure each node has at least 8 outbound connections by making 7 more outbound connections + for src_node in graph.nodes(): + logging.debug(f"Creating additional connections for node {src_node}") + for _ in range(8): + # Choose a random node to connect to + # Make sure it's not the same node and they aren't already connected in either direction + potential_nodes = [ + dst_node + for dst_node in range(n) + if dst_node != src_node + and not graph.has_edge(dst_node, src_node) + and not graph.has_edge(src_node, dst_node) + ] + if potential_nodes: + chosen_node = random.choice(potential_nodes) + graph.add_edge(src_node, chosen_node) + logging.debug(f"Added edge: {src_node}:{chosen_node}") + logging.debug(f"Node {src_node} edges: {graph.edges(src_node)}") + + # parse and process conf file + conf_contents = "" + if bitcoin_conf is not None: + conf = Path(bitcoin_conf) + if conf.is_file(): + with open(conf) as f: + # parse INI style conf then dump using for_graph + conf_dict = parse_bitcoin_conf(f.read()) + conf_contents = dump_bitcoin_conf(conf_dict, for_graph=True) + + # populate our custom fields + for i, node in enumerate(graph.nodes()): + if random_version: + graph.nodes[node]["version"] = random.choice(WEIGHTED_TAGS) + else: + # One node demoing the image tag + if i == 1: + graph.nodes[node]["image"] = f"bitcoindevproject/bitcoin:{version}" + else: + graph.nodes[node]["version"] = version + graph.nodes[node]["bitcoin_config"] = conf_contents + graph.nodes[node]["tc_netem"] = "" + graph.nodes[node]["build_args"] = "" + graph.nodes[node]["exporter"] = False + graph.nodes[node]["collect_logs"] = False + graph.nodes[node]["resources"] = None + + convert_unsupported_attributes(graph) + return graph + + +def convert_unsupported_attributes(graph: nx.Graph): + # Sometimes networkx complains about invalid types when writing the graph + # (it just generated itself!). Try to convert them here just in case. + for _, node_data in graph.nodes(data=True): + for key, value in node_data.items(): + if isinstance(value, set): + node_data[key] = list(value) + elif isinstance(value, int | float | str): + continue + else: + node_data[key] = str(value) + + for _, _, edge_data in graph.edges(data=True): + for key, value in edge_data.items(): + if isinstance(value, set): + edge_data[key] = list(value) + elif isinstance(value, int | float | str): + continue + else: + edge_data[key] = str(value) + + +def load_schema(): + with open(SRC_DIR / "graph_schema.json") as schema_file: + return json.load(schema_file) + + +def validate_graph_schema(graph: nx.Graph): + """ + Validate a networkx.Graph against the node schema + """ + graph_schema = load_schema() + validate(instance=graph.graph, schema=graph_schema["graph"]) + for n in list(graph.nodes): + validate(instance=graph.nodes[n], schema=graph_schema["node"]) + for e in list(graph.edges): + validate(instance=graph.edges[e], schema=graph_schema["edge"]) + + +def parse_bitcoin_conf(file_content): + """ + Custom parser for INI-style bitcoin.conf + + Args: + - file_content (str): The content of the INI-style file. + + Returns: + - dict: A dictionary representation of the file content. + Key-value pairs are stored as tuples so one key may have + multiple values. Sections are represented as arrays of these tuples. + """ + current_section = None + result = {current_section: []} + + for line in file_content.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + + if line.startswith("[") and line.endswith("]"): + current_section = line[1:-1] + result[current_section] = [] + elif "=" in line: + key, value = line.split("=", 1) + result[current_section].append((key.strip(), value.strip())) + + return result + + +def dump_bitcoin_conf(conf_dict, for_graph=False): + """ + Converts a dictionary representation of bitcoin.conf content back to INI-style string. + + Args: + - conf_dict (dict): A dictionary representation of the file content. + + Returns: + - str: The INI-style string representation of the input dictionary. + """ + result = [] + + # Print global section at the top first + values = conf_dict[None] + for sub_key, sub_value in values: + result.append(f"{sub_key}={sub_value}") + + # Then print any named subsections + for section, values in conf_dict.items(): + if section is not None: + result.append(f"\n[{section}]") + else: + continue + for sub_key, sub_value in values: + result.append(f"{sub_key}={sub_value}") + + if for_graph: + return ",".join(result) + + # Terminate file with newline + return "\n".join(result) + "\n" diff --git a/src/warnet/utils.py b/src/warnet/cli/utils.py similarity index 99% rename from src/warnet/utils.py rename to src/warnet/cli/utils.py index c9d43f616..9b9662f4c 100644 --- a/src/warnet/utils.py +++ b/src/warnet/cli/utils.py @@ -21,6 +21,7 @@ logger = logging.getLogger("utils") + SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] DEFAULT_TAG = SUPPORTED_TAGS[0] WEIGHTED_TAGS = [ diff --git a/src/warnet/cln.py b/src/warnet/cln.py deleted file mode 100644 index 53ed5ffa1..000000000 --- a/src/warnet/cln.py +++ /dev/null @@ -1,198 +0,0 @@ -import io -import tarfile - -from warnet.backend.kubernetes_backend import KubernetesBackend -from warnet.services import ServiceType -from warnet.utils import exponential_backoff, generate_ipv4_addr, handle_json - -from .lnchannel import LNChannel, LNPolicy -from .lnnode import LNNode -from .status import RunningStatus - -CLN_CONFIG_BASE = " ".join( - [ - "--network=regtest", - "--database-upgrade=true", - "--bitcoin-retry-timeout=600", - "--bind-addr=0.0.0.0:9735", - "--developer", - "--dev-fast-gossip", - "--log-level=debug", - ] -) - - -class CLNNode(LNNode): - def __init__(self, warnet, tank, backend: KubernetesBackend, options): - self.warnet = warnet - self.tank = tank - self.backend = backend - self.image = options["ln_image"] - self.cb = options["cb_image"] - self.ln_config = options["ln_config"] - self.ipv4 = generate_ipv4_addr(self.warnet.subnet) - self.rpc_port = 10009 - self.impl = "cln" - - @property - def status(self) -> RunningStatus: - return super().status - - @property - def cb_status(self) -> RunningStatus: - return super().cb_status - - def get_conf(self, ln_container_name, tank_container_name) -> str: - conf = CLN_CONFIG_BASE - conf += f" --alias={self.tank.index}" - conf += f" --grpc-port={self.rpc_port}" - conf += f" --bitcoin-rpcuser={self.tank.rpc_user}" - conf += f" --bitcoin-rpcpassword={self.tank.rpc_password}" - conf += f" --bitcoin-rpcconnect={tank_container_name}" - conf += f" --bitcoin-rpcport={self.tank.rpc_port}" - conf += f" --announce-addr=dns:{ln_container_name}:9735" - return conf - - @exponential_backoff(max_retries=20, max_delay=300) - @handle_json - def lncli(self, cmd) -> dict: - cli = "lightning-cli" - cmd = f"{cli} --network=regtest {cmd}" - return self.backend.exec_run(self.tank.index, ServiceType.LIGHTNING, cmd) - - def getnewaddress(self): - return self.lncli("newaddr")["bech32"] - - def get_pub_key(self): - res = self.lncli("getinfo") - return res["id"] - - def getURI(self): - res = self.lncli("getinfo") - if len(res["address"]) < 1: - return None - return f'{res["id"]}@{res["address"][0]["address"]}:{res["address"][0]["port"]}' - - def get_wallet_balance(self) -> int: - res = self.lncli("listfunds") - return int(sum(o["amount_msat"] for o in res["outputs"]) / 1000) - - # returns the channel point in the form txid:output_index - def open_channel_to_tank(self, index: int, channel_open_data: str) -> str: - tank = self.warnet.tanks[index] - [pubkey, host] = tank.lnnode.getURI().split("@") - res = self.lncli(f"fundchannel id={pubkey} {channel_open_data}") - if "txid" not in res or "outnum" not in res: - raise ValueError(f"Error opening channel to tank: {res}") - return f"{res['txid']}:{res['outnum']}" - - def update_channel_policy(self, chan_point: str, policy: str) -> str: - return self.lncli(f"setchannel {chan_point} {policy}") - - def get_graph_nodes(self) -> list[str]: - return list(n["nodeid"] for n in self.lncli("listnodes")["nodes"]) - - def get_graph_channels(self) -> list[LNChannel]: - cln_channels = self.lncli("listchannels")["channels"] - # CLN lists channels twice, once for each direction. This finds the unique channel ids. - short_channel_ids = {chan["short_channel_id"]: chan for chan in cln_channels}.keys() - channels = [] - for short_channel_id in short_channel_ids: - nodes = [ - chans for chans in cln_channels if chans["short_channel_id"] == short_channel_id - ] - # CLN has only heard about one side of the channel - if len(nodes) == 1: - channels.append(self.lnchannel_from_json(nodes[0], None)) - continue - channels.append(self.lnchannel_from_json(nodes[0], nodes[1])) - return channels - - @staticmethod - def lnchannel_from_json(node1: object, node2: object) -> LNChannel: - if not node1: - raise ValueError("node1 can't be None") - - node2_policy = ( - LNPolicy( - min_htlc=node2["htlc_minimum_msat"], - max_htlc=node2["htlc_maximum_msat"], - base_fee_msat=node2["base_fee_millisatoshi"], - fee_rate_milli_msat=node2["fee_per_millionth"], - ) - if node2 is not None - else None - ) - - return LNChannel( - node1_pub=node1["source"], - node2_pub=node1["destination"], - capacity_msat=node1["amount_msat"], - short_chan_id=node1["short_channel_id"], - node1_policy=LNPolicy( - min_htlc=node1["htlc_minimum_msat"], - max_htlc=node1["htlc_maximum_msat"], - base_fee_msat=node1["base_fee_millisatoshi"], - fee_rate_milli_msat=node1["fee_per_millionth"], - ), - node2_policy=node2_policy, - ) - - def get_peers(self) -> list[str]: - return list(p["id"] for p in self.lncli("listpeers")["peers"]) - - def connect_to_tank(self, index): - return super().connect_to_tank(index) - - def generate_cli_command(self, command: list[str]): - network = f"--network={self.tank.warnet.bitcoin_network}" - cmd = f"{network} {' '.join(command)}" - cmd = f"lightning-cli {cmd}" - return cmd - - def export(self, config: object, tar_file): - # Retrieve the credentials - ca_cert = self.backend.get_file( - self.tank.index, - ServiceType.LIGHTNING, - "/root/.lightning/regtest/ca.pem", - ) - client_cert = self.backend.get_file( - self.tank.index, - ServiceType.LIGHTNING, - "/root/.lightning/regtest/client.pem", - ) - client_key = self.backend.get_file( - self.tank.index, - ServiceType.LIGHTNING, - "/root/.lightning/regtest/client-key.pem", - ) - name = f"ln-{self.tank.index}" - ca_cert_filename = f"{name}_ca_cert.pem" - client_cert_filename = f"{name}_client_cert.pem" - client_key_filename = f"{name}_client_key.pem" - host = self.backend.get_lnnode_hostname(self.tank.index) - - # Add the files to the in-memory tar archive - tarinfo1 = tarfile.TarInfo(name=ca_cert_filename) - tarinfo1.size = len(ca_cert) - fileobj1 = io.BytesIO(ca_cert) - tar_file.addfile(tarinfo=tarinfo1, fileobj=fileobj1) - tarinfo2 = tarfile.TarInfo(name=client_cert_filename) - tarinfo2.size = len(client_cert) - fileobj2 = io.BytesIO(client_cert) - tar_file.addfile(tarinfo=tarinfo2, fileobj=fileobj2) - tarinfo3 = tarfile.TarInfo(name=client_key_filename) - tarinfo3.size = len(client_key) - fileobj3 = io.BytesIO(client_key) - tar_file.addfile(tarinfo=tarinfo3, fileobj=fileobj3) - - config["nodes"].append( - { - "id": name, - "address": f"https://{host}:{self.rpc_port}", - "ca_cert": f"/simln/{ca_cert_filename}", - "client_cert": f"/simln/{client_cert_filename}", - "client_key": f"/simln/{client_key_filename}", - } - ) diff --git a/src/warnet/lnchannel.py b/src/warnet/lnchannel.py deleted file mode 100644 index 2d17460fb..000000000 --- a/src/warnet/lnchannel.py +++ /dev/null @@ -1,148 +0,0 @@ -import logging - - -class LNPolicy: - def __init__( - self, - min_htlc: int, - max_htlc: int, - base_fee_msat: int, - fee_rate_milli_msat: int, - time_lock_delta: int = 0, - ) -> None: - self.min_htlc = min_htlc - self.max_htlc = max_htlc - self.base_fee_msat = base_fee_msat - self.fee_rate_milli_msat = fee_rate_milli_msat - self.time_lock_delta = time_lock_delta - - def __str__(self) -> str: - return ( - f"LNPolicy(min_htlc={self.min_htlc}, " - f"max_htlc={self.max_htlc}, " - f"base_fee={self.base_fee_msat}, " - f"fee_rate={self.fee_rate_milli_msat}, " - f"time_lock_delta={self.time_lock_delta})" - ) - - -class LNChannel: - def __init__( - self, - node1_pub: str, - node2_pub: str, - capacity_msat: int = 0, - short_chan_id: str = "", - node1_policy: LNPolicy = None, - node2_policy: LNPolicy = None, - ) -> None: - # Ensure that the node with the lower pubkey is node1 - if node1_pub > node2_pub: - node1_pub, node2_pub = node2_pub, node1_pub - node1_policy, node2_policy = node2_policy, node1_policy - self.node1_pub = node1_pub - self.node2_pub = node2_pub - self.capacity_msat = capacity_msat - self.short_chan_id = short_chan_id - self.node1_policy = node1_policy - self.node2_policy = node2_policy - self.logger = logging.getLogger("lnchan") - - def __str__(self) -> str: - return ( - f"LNChannel(short_chan_id={self.short_chan_id}, " - f"capacity_msat={self.capacity_msat}, " - f"node1_pub={self.node1_pub[:8]}..., " - f"node2_pub={self.node2_pub[:8]}..., " - f"node1_policy=({self.node1_policy.__str__()}), " - f"node2_policy=({self.node2_policy.__str__()}))" - ) - - # Only used to compare warnet channels imported from a mainnet source file - # because pubkeys are unpredictable and node 1/2 might be swapped - def flip(self) -> "LNChannel": - return LNChannel( - # Keep the old pubkeys so the constructor doesn't just flip it back - node1_pub=self.node1_pub, - node2_pub=self.node2_pub, - capacity_msat=self.capacity_msat, - short_chan_id=self.short_chan_id, - node1_policy=self.node2_policy, - node2_policy=self.node1_policy, - ) - - def policy_match(self, ch2: "LNChannel") -> bool: - assert isinstance(ch2, LNChannel) - - node1_policy_match = False - node2_policy_match = False - - if self.node1_policy is None and ch2.node1_policy is None: - node1_policy_match = True - - if self.node2_policy is None and ch2.node2_policy is None: - node2_policy_match = True - - def compare_attributes(attr1, attr2, min_value=0, attr_name=""): - if attr1 == 0 or attr2 == 0: - return True - result = max(int(attr1), min_value) == max(int(attr2), min_value) - if not result: - self.logger.debug(f"Mismatch in {attr_name}: {attr1} != {attr2}") - return result - - if self.node1_policy is not None and ch2.node1_policy is not None: - attributes_to_compare = [ - ( - self.node1_policy.time_lock_delta, - ch2.node1_policy.time_lock_delta, - 18, - "node1_time_lock_delta", - ), - (self.node1_policy.min_htlc, ch2.node1_policy.min_htlc, 1, "node1_min_htlc"), - ( - self.node1_policy.base_fee_msat, - ch2.node1_policy.base_fee_msat, - 0, - "node1_base_fee_msat", - ), - ( - self.node1_policy.fee_rate_milli_msat, - ch2.node1_policy.fee_rate_milli_msat, - 0, - "node1_fee_rate_milli_msat", - ), - ] - node1_policy_match = all(compare_attributes(*attrs) for attrs in attributes_to_compare) - - if self.node2_policy is not None and ch2.node2_policy is not None: - attributes_to_compare = [ - ( - self.node2_policy.time_lock_delta, - ch2.node2_policy.time_lock_delta, - 18, - "node2_time_lock_delta", - ), - (self.node2_policy.min_htlc, ch2.node2_policy.min_htlc, 1, "node2_min_htlc"), - ( - self.node2_policy.base_fee_msat, - ch2.node2_policy.base_fee_msat, - 0, - "node2_base_fee_msat", - ), - ( - self.node2_policy.fee_rate_milli_msat, - ch2.node2_policy.fee_rate_milli_msat, - 0, - "node2_fee_rate_milli_msat", - ), - ] - node2_policy_match = all(compare_attributes(*attrs) for attrs in attributes_to_compare) - - return node1_policy_match and node2_policy_match - - def channel_match(self, ch2: "LNChannel") -> bool: - if self.capacity_msat != ch2.capacity_msat: - self.logger.debug(f"Capacity mismatch: {self.capacity_msat} != {ch2.capacity_msat}") - return False - return self.policy_match(ch2) diff --git a/src/warnet/lnd.py b/src/warnet/lnd.py deleted file mode 100644 index 3282d8253..000000000 --- a/src/warnet/lnd.py +++ /dev/null @@ -1,191 +0,0 @@ -import io -import tarfile - -from warnet.backend.kubernetes_backend import KubernetesBackend -from warnet.services import ServiceType -from warnet.utils import exponential_backoff, generate_ipv4_addr, handle_json - -from .lnchannel import LNChannel, LNPolicy -from .lnnode import LNNode, lnd_to_cl_scid -from .status import RunningStatus - -LND_CONFIG_BASE = " ".join( - [ - "--noseedbackup", - "--norest", - "--debuglevel=debug", - "--accept-keysend", - "--bitcoin.active", - "--bitcoin.regtest", - "--bitcoin.node=bitcoind", - "--maxpendingchannels=64", - "--trickledelay=1", - ] -) - - -class LNDNode(LNNode): - def __init__(self, warnet, tank, backend: KubernetesBackend, options): - self.warnet = warnet - self.tank = tank - self.backend = backend - self.image = options["ln_image"] - self.cb = options["cb_image"] - self.ln_config = options["ln_config"] - self.ipv4 = generate_ipv4_addr(self.warnet.subnet) - self.rpc_port = 10009 - self.impl = "lnd" - - @property - def status(self) -> RunningStatus: - return super().status - - @property - def cb_status(self) -> RunningStatus: - return super().cb_status - - def get_conf(self, ln_container_name, tank_container_name) -> str: - conf = LND_CONFIG_BASE - conf += f" --bitcoind.rpcuser={self.tank.rpc_user}" - conf += f" --bitcoind.rpcpass={self.tank.rpc_password}" - conf += f" --bitcoind.rpchost={tank_container_name}:{self.tank.rpc_port}" - conf += f" --bitcoind.zmqpubrawblock=tcp://{tank_container_name}:{self.tank.zmqblockport}" - conf += f" --bitcoind.zmqpubrawtx=tcp://{tank_container_name}:{self.tank.zmqtxport}" - conf += f" --rpclisten=0.0.0.0:{self.rpc_port}" - conf += f" --alias={self.tank.index}" - conf += f" --externalhosts={ln_container_name}" - conf += f" --tlsextradomain={ln_container_name}" - conf += " " + self.ln_config - return conf - - @exponential_backoff(max_retries=20, max_delay=300) - @handle_json - def lncli(self, cmd) -> dict: - cli = "lncli" - cmd = f"{cli} --network=regtest {cmd}" - return self.backend.exec_run(self.tank.index, ServiceType.LIGHTNING, cmd) - - def getnewaddress(self): - return self.lncli("newaddress p2wkh")["address"] - - def get_pub_key(self): - res = self.lncli("getinfo") - return res["identity_pubkey"] - - def getURI(self): - res = self.lncli("getinfo") - if len(res["uris"]) < 1: - return None - return res["uris"][0] - - def get_wallet_balance(self) -> int: - res = self.lncli("walletbalance")["confirmed_balance"] - return res - - # returns the channel point in the form txid:output_index - def open_channel_to_tank(self, index: int, channel_open_data: str) -> str: - tank = self.warnet.tanks[index] - [pubkey, host] = tank.lnnode.getURI().split("@") - txid = self.lncli(f"openchannel --node_key={pubkey} --connect={host} {channel_open_data}")[ - "funding_txid" - ] - # Why doesn't LND return the output index as well? - # Do they charge by the RPC call or something?! - pending = self.lncli("pendingchannels") - for chan in pending["pending_open_channels"]: - if txid in chan["channel"]["channel_point"]: - return chan["channel"]["channel_point"] - raise Exception(f"Opened channel with txid {txid} not found in pending channels") - - def update_channel_policy(self, chan_point: str, policy: str) -> str: - ret = self.lncli(f"updatechanpolicy --chan_point={chan_point} {policy}") - if len(ret["failed_updates"]) == 0: - return ret - else: - raise Exception(ret) - - def get_graph_nodes(self) -> list[str]: - return list(n["pub_key"] for n in self.lncli("describegraph")["nodes"]) - - def get_graph_channels(self) -> list[LNChannel]: - edges = self.lncli("describegraph")["edges"] - return [self.lnchannel_from_json(edge) for edge in edges] - - @staticmethod - def lnchannel_from_json(edge: object) -> LNChannel: - node1_policy = ( - LNPolicy( - min_htlc=int(edge["node1_policy"]["min_htlc"]), - max_htlc=int(edge["node1_policy"]["max_htlc_msat"]), - base_fee_msat=int(edge["node1_policy"]["fee_base_msat"]), - fee_rate_milli_msat=int(edge["node1_policy"]["fee_rate_milli_msat"]), - time_lock_delta=int(edge["node1_policy"]["time_lock_delta"]), - ) - if edge["node1_policy"] - else None - ) - - node2_policy = ( - LNPolicy( - min_htlc=int(edge["node2_policy"]["min_htlc"]), - max_htlc=int(edge["node2_policy"]["max_htlc_msat"]), - base_fee_msat=int(edge["node2_policy"]["fee_base_msat"]), - fee_rate_milli_msat=int(edge["node2_policy"]["fee_rate_milli_msat"]), - time_lock_delta=int(edge["node2_policy"]["time_lock_delta"]), - ) - if edge["node2_policy"] - else None - ) - - return LNChannel( - node1_pub=edge["node1_pub"], - node2_pub=edge["node2_pub"], - capacity_msat=(int(edge["capacity"]) * 1000), - short_chan_id=lnd_to_cl_scid(edge["channel_id"]), - node1_policy=node1_policy, - node2_policy=node2_policy, - ) - - def get_peers(self) -> list[str]: - return list(p["pub_key"] for p in self.lncli("listpeers")["peers"]) - - def connect_to_tank(self, index): - return super().connect_to_tank(index) - - def generate_cli_command(self, command: list[str]): - network = f"--network={self.tank.warnet.bitcoin_network}" - cmd = f"{network} {' '.join(command)}" - cmd = f"lncli {cmd}" - return cmd - - def export(self, config: object, tar_file): - # Retrieve the credentials - macaroon = self.backend.get_file( - self.tank.index, - ServiceType.LIGHTNING, - "/root/.lnd/data/chain/bitcoin/regtest/admin.macaroon", - ) - cert = self.backend.get_file(self.tank.index, ServiceType.LIGHTNING, "/root/.lnd/tls.cert") - name = f"ln-{self.tank.index}" - macaroon_filename = f"{name}_admin.macaroon" - cert_filename = f"{name}_tls.cert" - host = self.backend.get_lnnode_hostname(self.tank.index) - - # Add the files to the in-memory tar archive - tarinfo1 = tarfile.TarInfo(name=macaroon_filename) - tarinfo1.size = len(macaroon) - fileobj1 = io.BytesIO(macaroon) - tar_file.addfile(tarinfo=tarinfo1, fileobj=fileobj1) - tarinfo2 = tarfile.TarInfo(name=cert_filename) - tarinfo2.size = len(cert) - fileobj2 = io.BytesIO(cert) - tar_file.addfile(tarinfo=tarinfo2, fileobj=fileobj2) - - config["nodes"].append( - { - "id": name, - "address": f"https://{host}:{self.rpc_port}", - "macaroon": f"/simln/{macaroon_filename}", - "cert": f"/simln/{cert_filename}", - } - ) diff --git a/src/warnet/lnnode.py b/src/warnet/lnnode.py deleted file mode 100644 index deda5da20..000000000 --- a/src/warnet/lnnode.py +++ /dev/null @@ -1,99 +0,0 @@ -from abc import ABC, abstractmethod - -from warnet.backend.kubernetes_backend import KubernetesBackend -from warnet.services import ServiceType -from warnet.utils import exponential_backoff, handle_json - -from .status import RunningStatus - - -class LNNode(ABC): - @abstractmethod - def __init__(self, warnet, tank, backend: KubernetesBackend, options): - pass - - @property - def status(self) -> RunningStatus: - return self.warnet.container_interface.get_status(self.tank.index, ServiceType.LIGHTNING) - - @property - def cb_status(self) -> RunningStatus: - if not self.cb: - return None - return self.warnet.container_interface.get_status( - self.tank.index, ServiceType.CIRCUITBREAKER - ) - - @abstractmethod - def get_conf(self, ln_container_name, tank_container_name) -> str: - pass - - @exponential_backoff(max_retries=20, max_delay=300) - @handle_json - @abstractmethod - def lncli(self, cmd) -> dict: - pass - - @abstractmethod - def getnewaddress(self): - pass - - @abstractmethod - def get_pub_key(self): - pass - - @abstractmethod - def getURI(self): - pass - - @abstractmethod - def get_wallet_balance(self) -> int: - pass - - @abstractmethod - def open_channel_to_tank(self, index: int, channel_open_data: str) -> str: - """Return the channel point in the form txid:output_index""" - pass - - @abstractmethod - def update_channel_policy(self, chan_point: str, policy: str) -> str: - pass - - @abstractmethod - def get_graph_nodes(self) -> list[str]: - pass - - @abstractmethod - def get_graph_channels(self) -> list[dict]: - pass - - @abstractmethod - def get_peers(self) -> list[str]: - pass - - def connect_to_tank(self, index): - tank = self.warnet.tanks[index] - uri = tank.lnnode.getURI() - res = self.lncli(f"connect {uri}") - return res - - @abstractmethod - def generate_cli_command(self, command: list[str]): - pass - - @abstractmethod - def export(self, config: object, tar_file): - pass - - -def lnd_to_cl_scid(id) -> str: - s = int(id, 10) - block = s >> 40 - tx = s >> 16 & 0xFFFFFF - output = s & 0xFFFF - return f"{block}x{tx}x{output}" - - -def cl_to_lnd_scid(s) -> int: - s = [int(i) for i in s.split("x")] - return (s[0] << 40) | (s[1] << 16) | s[2] diff --git a/src/warnet/services.py b/src/warnet/services.py deleted file mode 100644 index 562813432..000000000 --- a/src/warnet/services.py +++ /dev/null @@ -1,36 +0,0 @@ -from enum import Enum - -FO_CONF_NAME = "fork_observer_config.toml" -AO_CONF_NAME = "addrman_observer_config.toml" -GRAFANA_PROVISIONING = "grafana-provisioning" -PROM_CONF_NAME = "prometheus.yml" - - -class ServiceType(Enum): - BITCOIN = 1 - LIGHTNING = 2 - CIRCUITBREAKER = 3 - - -SERVICES = { - # "forkobserver": { - # "image": "b10c/fork-observer:latest", - # "container_name_suffix": "fork-observer", - # "warnet_port": "23001", - # "container_port": "2323", - # "config_files": [f"{FO_CONF_NAME}:/app/config.toml"], - # }, - # "addrmanobserver": { - # "image": "b10c/addrman-observer:latest", - # "container_name_suffix": "addrman-observer", - # "warnet_port": "23005", - # "container_port": "3882", - # "config_files": [f"{AO_CONF_NAME}:/app/config.toml"], - # }, - "simln": { - "image": "bitcoindevproject/simln:0.2.0", - "container_name_suffix": "simln", - "environment": ["LOG_LEVEL=debug", "SIMFILE_PATH=/simln/sim.json"], - "config_files": ["simln/:/simln"], - }, -} diff --git a/src/warnet/status.py b/src/warnet/status.py deleted file mode 100644 index ac83d4140..000000000 --- a/src/warnet/status.py +++ /dev/null @@ -1,9 +0,0 @@ -from enum import Enum - - -class RunningStatus(Enum): - PENDING = 1 - RUNNING = 2 - STOPPED = 3 - FAILED = 4 - UNKNOWN = 5 diff --git a/src/warnet/tank.py b/src/warnet/tank.py deleted file mode 100644 index ac04d3f70..000000000 --- a/src/warnet/tank.py +++ /dev/null @@ -1,198 +0,0 @@ -""" -Tanks are containerized bitcoind nodes -""" - -import logging - -from .services import ServiceType -from .status import RunningStatus -from .utils import ( - SUPPORTED_TAGS, - exponential_backoff, - generate_ipv4_addr, - sanitize_tc_netem_command, -) - -CONTAINER_PREFIX_PROMETHEUS = "prometheus_exporter" - -logger = logging.getLogger("tank") - -CONFIG_BASE = " ".join( - [ - "-regtest=1", - "-checkmempool=0", - "-acceptnonstdtxn=1", - "-debuglogfile=0", - "-logips=1", - "-logtimemicros=1", - "-capturemessages=1", - "-rpcallowip=0.0.0.0/0", - "-rpcbind=0.0.0.0", - "-fallbackfee=0.00001000", - "-listen=1", - ] -) - - -class Tank: - DEFAULT_BUILD_ARGS = "--disable-tests --with-incompatible-bdb --without-gui --disable-bench --disable-fuzz-binary --enable-suppress-external-warnings --enable-debug " - - def __init__(self, index: int, warnet): - from warnet.lnnode import LNNode - - self.index = index - self.warnet = warnet - self.network_name = warnet.network_name - self.bitcoin_network = warnet.bitcoin_network - self.version: str = "" - self.image: str = "" - self.bitcoin_config = "" - self.netem = None - self.exporter = False - self.metrics = None - self.collect_logs = False - self.build_args = "" - self.lnnode: LNNode | None = None - self.rpc_port = 18443 - self.rpc_user = "warnet_user" - self.rpc_password = "2themoon" - self.zmqblockport = 28332 - self.zmqtxport = 28333 - self._suffix = None - self._ipv4 = None - self._exporter_name = None - # index of integers imported from graph file - # indicating which tanks to initially connect to - self.init_peers = [] - - def _parse_version(self, version): - if not version: - return - if version not in SUPPORTED_TAGS and not ("/" in version and "#" in version): - raise Exception( - f"Unsupported version: can't be generated from Docker images: {self.version}" - ) - self.version = version - - def parse_graph_node(self, node): - # Dynamically parse properties based on the schema - graph_properties = {} - for property, specs in self.warnet.graph_schema["node"]["properties"].items(): - value = node.get(property, specs.get("default")) - if property == "version": - self._parse_version(value) - setattr(self, property, value) - graph_properties[property] = value - - if self.version and self.image: - raise Exception( - f"Tank has {self.version=:} and {self.image=:} supplied and can't be built. Provide one or the other." - ) - - # Special handling for complex properties - if "ln" in node: - options = { - "impl": node["ln"], - "cb_image": node.get("ln_cb_image", None), - "ln_config": node.get("ln_config", ""), - } - from warnet.cln import CLNNode - from warnet.lnd import LNDNode - - if options["impl"] == "lnd": - options["ln_image"] = node.get("ln_image", "lightninglabs/lnd:v0.18.0-beta") - self.lnnode = LNDNode(self.warnet, self, self.warnet.container_interface, options) - elif options["impl"] == "cln": - options["ln_image"] = node.get("ln_image", "elementsproject/lightningd:v23.11") - self.lnnode = CLNNode(self.warnet, self, self.warnet.container_interface, options) - else: - raise Exception(f"Unsupported Lightning Network implementation: {options['impl']}") - - if "metrics" in node: - self.metrics = node["metrics"] - - logger.debug( - f"Parsed graph node: {self.index} with attributes: {[f'{key}={value}' for key, value in graph_properties.items()]}" - ) - - @classmethod - def from_graph_node(cls, index, warnet, tank=None): - assert index is not None - index = int(index) - self = tank - if self is None: - self = cls(index, warnet) - node = warnet.graph.nodes[index] - self.parse_graph_node(node) - return self - - @property - def suffix(self): - if self._suffix is None: - self._suffix = f"{self.index:06}" - return self._suffix - - @property - def ipv4(self): - if self._ipv4 is None: - self._ipv4 = generate_ipv4_addr(self.warnet.subnet) - return self._ipv4 - - @property - def exporter_name(self): - if self._exporter_name is None: - self._exporter_name = f"{self.network_name}-{CONTAINER_PREFIX_PROMETHEUS}-{self.suffix}" - return self._exporter_name - - @property - def status(self) -> RunningStatus: - return self.warnet.container_interface.get_status(self.index, ServiceType.BITCOIN) - - @exponential_backoff() - def exec(self, cmd: str): - return self.warnet.container_interface.exec_run(self.index, ServiceType.BITCOIN, cmd=cmd) - - def get_dns_addr(self) -> str: - dns_addr = self.warnet.container_interface.get_tank_dns_addr(self.index) - return dns_addr - - def get_ip_addr(self) -> str: - ip_addr = self.warnet.container_interface.get_tank_ip_addr(self.index) - return ip_addr - - def get_bitcoin_conf(self, nodes: list[str]) -> str: - conf = CONFIG_BASE - conf += f" -rpcuser={self.rpc_user}" - conf += f" -rpcpassword={self.rpc_password}" - conf += f" -rpcport={self.rpc_port}" - conf += f" -zmqpubrawblock=tcp://0.0.0.0:{self.zmqblockport}" - conf += f" -zmqpubrawtx=tcp://0.0.0.0:{self.zmqtxport}" - conf += " " + self.bitcoin_config - for node in nodes: - conf += f" -addnode={node}" - return conf - - def apply_network_conditions(self): - if self.netem is None: - return - - if not sanitize_tc_netem_command(self.netem): - logger.warning( - f"Not applying unsafe tc-netem conditions to tank {self.index}: `{self.netem}`" - ) - return - - # Apply the network condition to the container - try: - self.exec(self.netem) - logger.info( - f"Successfully applied network conditions to tank {self.index}: `{self.netem}`" - ) - except Exception as e: - logger.error( - f"Error applying network conditions to tank {self.index}: `{self.netem}` ({e})" - ) - - def export(self, config: object, tar_file): - if self.lnnode is not None: - self.lnnode.export(config, tar_file) diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py deleted file mode 100644 index 67cd52002..000000000 --- a/src/warnet/warnet.py +++ /dev/null @@ -1,283 +0,0 @@ -""" -Warnet is the top-level class for a simulated network. -""" - -import base64 -import json -import logging -from pathlib import Path - -import networkx - -from .backend.kubernetes_backend import KubernetesBackend -from .tank import Tank -from .utils import gen_config_dir, load_schema, validate_graph_schema - -logger = logging.getLogger("warnet") - - -class Warnet: - def __init__(self, config_dir, network_name: str): - self.config_dir: Path = config_dir - self.config_dir.mkdir(parents=True, exist_ok=True) - self.container_interface = KubernetesBackend(config_dir, network_name) - self.bitcoin_network: str = "regtest" - self.network_name: str = "warnet" - self.subnet: str = "100.0.0.0/8" - self.graph: networkx.Graph | None = None - self.graph_name = "graph.graphml" - self.tanks: list[Tank] = [] - self.deployment_file: Path | None = None - self.graph_schema = load_schema() - self.services = [] - - def _warnet_dict_representation(self) -> dict: - repr = {} - # Warnet - repr["warnet_headers"] = [ - "Temp dir", - "Bitcoin network", - "Docker network", - "Subnet", - "Graph", - ] - repr["warnet"] = [ - [ - str(self.config_dir), - self.bitcoin_network, - self.network_name, - self.subnet, - str(self.graph), - ] - ] - - # Tanks - tank_headers = [ - "Index", - "Version", - "IPv4", - "bitcoin conf", - "tc_netem", - "LN", - "LN Image", - "LN IPv4", - ] - has_ln = any(tank.lnnode and tank.lnnode.impl for tank in self.tanks) - tanks = [] - for tank in self.tanks: - tank_data = [ - tank.index, - tank.version if tank.version else tank.image, - tank.ipv4, - tank.bitcoin_config, - tank.netem, - ] - if has_ln: - tank_data.extend( - [ - tank.lnnode.impl if tank.lnnode else "", - tank.lnnode.image if tank.lnnode else "", - tank.lnnode.ipv4 if tank.lnnode else "", - ] - ) - tanks.append(tank_data) - if not has_ln: - tank_headers.remove("LN") - tank_headers.remove("LN IPv4") - - repr["tank_headers"] = tank_headers - repr["tanks"] = tanks - - return repr - - @classmethod - def from_graph_file( - cls, - base64_graph: str, - config_dir: Path, - network: str = "warnet", - ): - self = cls(config_dir, network) - destination = self.config_dir / self.graph_name - destination.parent.mkdir(parents=True, exist_ok=True) - graph_file = base64.b64decode(base64_graph) - with open(destination, "wb") as f: - f.write(graph_file) - self.network_name = network - self.graph = networkx.parse_graphml( - graph_file.decode("utf-8"), node_type=int, force_multigraph=True - ) - validate_graph_schema(self.graph) - self.tanks_from_graph() - if "services" in self.graph.graph: - self.services = self.graph.graph["services"].split() - logger.info(f"Created Warnet using directory {self.config_dir}") - return self - - @classmethod - def from_graph(cls, graph, network="warnet"): - self = cls(Path(), network) - self.graph = graph - validate_graph_schema(self.graph) - self.tanks_from_graph() - if "services" in self.graph.graph: - self.services = self.graph.graph["services"].split() - logger.info(f"Created Warnet using directory {self.config_dir}") - return self - - @classmethod - def from_network(cls, network_name): - config_dir = gen_config_dir(network_name) - self = cls(config_dir, network_name) - self.network_name = network_name - # Get network graph edges from graph file (required for network restarts) - self.graph = networkx.read_graphml( - Path(self.config_dir / self.graph_name), node_type=int, force_multigraph=True - ) - validate_graph_schema(self.graph) - self.tanks_from_graph() - if "services" in self.graph.graph: - self.services = self.graph.graph["services"].split() - for tank in self.tanks: - tank._ipv4 = self.container_interface.get_tank_ipv4(tank.index) - return self - - def tanks_from_graph(self): - if not self.graph: - return - for node_id in self.graph.nodes(): - if int(node_id) != len(self.tanks): - raise Exception( - f"Node ID in graph must be incrementing integers (got '{node_id}', expected '{len(self.tanks)}')" - ) - tank = Tank.from_graph_node(node_id, self) - # import edges as list of destinations to connect to - for edge in self.graph.edges(data=True): - (src, dst, data) = edge - # Ignore LN edges for now - if "channel_open" in data: - continue - if src == node_id: - tank.init_peers.append(int(dst)) - self.tanks.append(tank) - logger.info(f"Imported {len(self.tanks)} tanks from graph") - - def apply_network_conditions(self): - for tank in self.tanks: - tank.apply_network_conditions() - - def warnet_build(self): - self.container_interface.build() - - def get_ln_node_from_tank(self, index): - return self.tanks[index].lnnode - - def warnet_up(self): - self.container_interface.up(self) - - def warnet_down(self): - self.container_interface.down(self) - - def generate_deployment(self): - self.container_interface.generate_deployment_file(self) - - # if "forkobserver" in self.services: - # self.write_fork_observer_config() - # if "addrmanobserver" in self.services: - # self.write_addrman_observer_config() - # if "grafana" in self.services: - # self.write_grafana_config() - # if "prometheus" in self.services: - # self.write_prometheus_config() - - # def write_fork_observer_config(self): - # src = FO_CONF_NAME - # dst = self.config_dir / FO_CONF_NAME - # shutil.copy(src, dst) - # with open(dst, "a") as f: - # for tank in self.tanks: - # f.write( - # f""" - # [[networks.nodes]] - # id = {tank.index} - # name = "Node {tank.index}" - # description = "Warnet tank {tank.index}" - # rpc_host = "{tank.ipv4}" - # rpc_port = {tank.rpc_port} - # rpc_user = "{tank.rpc_user}" - # rpc_password = "{tank.rpc_password}" - # """ - # ) - # logger.info(f"Wrote file: {dst}") - - # def write_addrman_observer_config(self): - # src = AO_CONF_NAME - # dst = self.config_dir / AO_CONF_NAME - # shutil.copy(src, dst) - # with open(dst, "a") as f: - # for tank in self.tanks: - # f.write( - # f""" - # [[nodes]] - # id = {tank.index} - # name = "node-{tank.index}" - # rpc_host = "{tank.ipv4}" - # rpc_port = {tank.rpc_port} - # rpc_user = "{tank.rpc_user}" - # rpc_password = "{tank.rpc_password}" - # """ - # ) - # logger.info(f"Wrote file: {dst}") - - # def write_grafana_config(self): - # src = GRAFANA_PROVISIONING - # dst = self.config_dir / GRAFANA_PROVISIONING - # shutil.copytree(src, dst, dirs_exist_ok=True) - # logger.info(f"Wrote directory: {dst}") - - # def write_prometheus_config(self): - # scrape_configs = [ - # { - # "job_name": "cadvisor", - # "scrape_interval": "15s", - # "static_configs": [{"targets": [f"{self.network_name}_cadvisor:8080"]}], - # } - # ] - # for tank in self.tanks: - # if tank.exporter: - # scrape_configs.append( - # { - # "job_name": tank.exporter_name, - # "scrape_interval": "5s", - # "static_configs": [{"targets": [f"{tank.exporter_name}:9332"]}], - # } - # ) - # config = {"global": {"scrape_interval": "15s"}, "scrape_configs": scrape_configs} - # prometheus_path = self.config_dir / PROM_CONF_NAME - # try: - # with open(prometheus_path, "w") as file: - # yaml.dump(config, file) - # logger.info(f"Wrote file: {prometheus_path}") - # except Exception as e: - # logger.error(f"An error occurred while writing to {prometheus_path}: {e}") - - def export(self, config: object, tar_file, exclude: list[int]): - for tank in self.tanks: - if tank.index not in exclude: - tank.export(config, tar_file) - - def wait_for_health(self): - self.container_interface.wait_for_healthy_tanks(self) - - def network_connected(self): - for tank in self.tanks: - peerinfo = json.loads(self.container_interface.get_bitcoin_cli(tank, "getpeerinfo")) - manuals = 0 - for peer in peerinfo: - if peer["connection_type"] == "manual": - manuals += 1 - # Even if more edges are specifed, bitcoind only allows - # 8 manual outbound connections - if min(8, len(tank.init_peers)) > manuals: - return False - return True From c0b093e1b69f2d225f0d48a50628109927899874 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 16:25:28 +0200 Subject: [PATCH 008/710] update docs --- docs/warcli.md | 154 ++++--------------------------------------------- 1 file changed, 12 insertions(+), 142 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index 77dddae85..8257bdfc7 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -30,87 +30,6 @@ options: Check Warnet requirements are installed -## Bitcoin - -### `warcli bitcoin debug-log` -Fetch the Bitcoin Core debug log from \ in [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| node | Int | yes | | -| network | String | | "warnet" | - -### `warcli bitcoin grep-logs` -Grep combined logs via fluentd using regex \ - -options: -| name | type | required | default | -|---------------------|--------|------------|-----------| -| pattern | String | yes | | -| show_k8s_timestamps | Bool | | False | -| no_sort | Bool | | False | -| network | String | | "warnet" | - -### `warcli bitcoin messages` -Fetch messages sent between \ and \ in [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| node_a | Int | yes | | -| node_b | Int | yes | | -| network | String | | "warnet" | - -### `warcli bitcoin rpc` -Call bitcoin-cli \ [params] on \ in [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| node | Int | yes | | -| method | String | yes | | -| params | String | | | -| network | String | | "warnet" | - -## Cluster - -### `warcli cluster connect-logging` -Connect kubectl to cluster logging - - -### `warcli cluster deploy` -Deploy Warnet using the current kubectl-configured cluster - -options: -| name | type | required | default | -|--------|--------|------------|-----------| -| dev | Bool | | False | - -### `warcli cluster deploy-logging` -Deploy logging configurations to the cluster using helm - - -### `warcli cluster port-start` -Port forward (runs as a detached process) - - -### `warcli cluster port-stop` -Stop the port forwarding process - - -### `warcli cluster setup-minikube` -Configure a local minikube cluster - -options: -| name | type | required | default | -|--------|--------|------------|-----------| -| clean | Bool | | False | - -### `warcli cluster teardown` -Stop the warnet server and tear down the cluster - - ## Graph ### `warcli graph create` @@ -165,36 +84,15 @@ options: | arches | String | | | | action | String | | "load" | -## Ln - -### `warcli ln pubkey` -Get lightning node pub key on \ in [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| node | Int | yes | | -| network | String | | "warnet" | - -### `warcli ln rpc` -Call lightning cli rpc \ on \ in [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| node | Int | yes | | -| command | String | yes | | -| network | String | | "warnet" | - ## Network -### `warcli network connected` -Indicate whether the all of the edges in the gaph file are connected in [network] +### `warcli network connect` +Connect nodes based on the edges defined in the graph file. options: -| name | type | required | default | -|---------|--------|------------|-----------| -| network | String | | "warnet" | +| name | type | required | default | +|------------|--------|------------|----------------------------------| +| graph_file | Path | | resources/graphs/default.graphml | ### `warcli network down` Bring down a running warnet named [network] @@ -204,26 +102,14 @@ options: |---------|--------|------------|-----------| | network | String | | "warnet" | -### `warcli network export` -Export all [network] data for a "simln" service running in a container - on the network. Optionally add JSON string [activity] to simln config. - Optionally provide a list of tank indexes to [exclude]. - Returns True on success. - -options: -| name | type | required | default | -|----------|--------|------------|-----------| -| network | String | | "warnet" | -| activity | String | | | -| exclude | String | | "[]" | - -### `warcli network info` -Get info about a warnet named [network] +### `warcli network generate-yaml` +Generate a Kubernetes YAML file from a graph file for deploying warnet nodes. options: -| name | type | required | default | -|---------|--------|------------|-----------| -| network | String | | "warnet" | +| name | type | required | default | +|------------|--------|------------|----------------------------------| +| graph_file | Path | | resources/graphs/default.graphml | +| output | String | | "warnet-deployment.yaml" | ### `warcli network logs` Get Kubernetes logs from the RPC server @@ -240,24 +126,8 @@ options: | name | type | required | default | |------------|--------|------------|----------------------------------| | graph_file | Path | | resources/graphs/default.graphml | -| force | Bool | | False | | network | String | | "warnet" | - -### `warcli network status` -Get status of a warnet named [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| network | String | | "warnet" | - -### `warcli network up` -Bring up a previously-stopped warnet named [network] - -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| network | String | | "warnet" | +| logging | Bool | | False | ## Scenarios From ea8565b783bf386d3ec9b9bd2b9309e773452013 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 16:50:19 +0200 Subject: [PATCH 009/710] temporarily disable test CI job --- .github/workflows/deploy.yml | 4 ++- .github/workflows/publish-dist.yml | 5 +-- .github/workflows/test.yml | 56 +++++++----------------------- src/warnet/cli/main.py | 1 + src/warnet/cli/utils.py | 1 - 5 files changed, 20 insertions(+), 47 deletions(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 021b70d66..12fae71e5 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -8,7 +8,9 @@ on: jobs: deploy-to-dockerhub: runs-on: ubuntu-latest - if: github.event.workflow_run.conclusion == 'success' + # if: github.event.workflow_run.conclusion == 'success' + # DISABLE FOR REWRITE + if: false steps: - uses: actions/checkout@v4 - name: Set up QEMU diff --git a/.github/workflows/publish-dist.yml b/.github/workflows/publish-dist.yml index d228464e0..35c41ae39 100644 --- a/.github/workflows/publish-dist.yml +++ b/.github/workflows/publish-dist.yml @@ -6,7 +6,6 @@ jobs: build: name: Build distribution 📦 runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 - name: Set up Python @@ -30,7 +29,9 @@ jobs: publish-to-pypi: name: >- Publish Python 🐍 distribution 📦 to PyPI - if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes + # if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes + # DISABLE FOR REWRITE + if: false needs: - build runs-on: ubuntu-latest diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0524bed9c..bd5e7425f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,48 +7,25 @@ on: - main jobs: - # ruff: - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 - # - uses: chartboost/ruff-action@v1 - # with: - # args: 'check .' - ruff-format: + ruff: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: chartboost/ruff-action@v1 - with: - args: 'format --check .' - build-image: - # needs: [ruff, ruff-format] - needs: [ruff-format] + - uses: hynek/setup-cached-uv@v1 + - run: uv venv + - run: uv pip install ruff + - run: source .venv/bin/activate; ruff check . + ruff-format: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build and export - uses: docker/build-push-action@v5 - with: - file: resources/images/rpc/Dockerfile_prod - context: . - tags: warnet/dev - cache-from: type=gha - cache-to: type=gha,mode=max - outputs: type=docker,dest=/tmp/warnet.tar - - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: warnet - path: /tmp/warnet.tar + - uses: actions/checkout@v4 + - uses: hynek/setup-cached-uv@v1 + - run: uv venv + - run: uv pip install ruff + - run: source .venv/bin/activate; ruff format . test: - needs: [build-image] + # DISABLE FOR REWRITE + if: false runs-on: ubuntu-latest strategy: matrix: @@ -102,10 +79,3 @@ jobs: run: | source .venv/bin/activate ./test/${{matrix.test}} - # build-test: - # needs: [build-image] - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 - # - uses: ./.github/actions/compose - # - run: ./test/build_branch_test.py compose diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index de9595559..a1dda555c 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -8,6 +8,7 @@ # from .bitcoin import bitcoin from .graph import graph from .image import image + # from .ln import ln from .network import network from .scenarios import scenarios diff --git a/src/warnet/cli/utils.py b/src/warnet/cli/utils.py index 9b9662f4c..c9d43f616 100644 --- a/src/warnet/cli/utils.py +++ b/src/warnet/cli/utils.py @@ -21,7 +21,6 @@ logger = logging.getLogger("utils") - SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] DEFAULT_TAG = SUPPORTED_TAGS[0] WEIGHTED_TAGS = [ From 8998a86492f6a6b5da4bd16c6a7d8457fbd29f2e Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Tue, 20 Aug 2024 18:08:03 +0200 Subject: [PATCH 010/710] fix unmatched params --- src/warnet/cli/network.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index ff104e620..90ac36812 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -225,7 +225,8 @@ def generate_node_config(node: int, data: dict) -> str: zmqpubrawtx=tcp://0.0.0.0:28333 """ node_specific_config = data.get("bitcoin_config", "") - return f"{base_config}\n{node_specific_config.replace(",", "\n")}" + node_specific_config = node_specific_config.replace(",", "\n") + return f"{base_config}\n{node_specific_config}" def set_kubectl_context(namespace: str): From 18c88f2bdc08609dd4e6c691365689e2a8b1f0d7 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 20 Aug 2024 18:26:44 +0200 Subject: [PATCH 011/710] k8s utils --- src/warnet/cli/k8s.py | 99 +++++++++++++++++++++++++++++++++++++++ src/warnet/cli/network.py | 45 +++--------------- 2 files changed, 106 insertions(+), 38 deletions(-) create mode 100644 src/warnet/cli/k8s.py diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py new file mode 100644 index 000000000..a8c7d2da6 --- /dev/null +++ b/src/warnet/cli/k8s.py @@ -0,0 +1,99 @@ +import os +import subprocess + +from importlib.resources import files + +from kubernetes import client, config +from kubernetes.dynamic import DynamicClient + + +WAR_MANIFESTS = files("manifests") + +def get_static_client(): + config.load_kube_config() + return client.CoreV1Api() + +def get_dynamic_client(): + config.load_kube_config() + return DynamicClient(client.ApiClient()) + +def get_pods(): + sclient = get_static_client() + return sclient.list_namespaced_pod("warnet") + +def run_command(command, stream_output=False, env=None): + # Merge the current environment with the provided env + full_env = os.environ.copy() + if env: + # Convert all env values to strings (only a safeguard) + env = {k: str(v) for k, v in env.items()} + full_env.update(env) + + if stream_output: + process = subprocess.Popen( + ["/bin/bash", "-c", command], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + universal_newlines=True, + env=full_env, + ) + + for line in iter(process.stdout.readline, ""): + print(line, end="") + + process.stdout.close() + return_code = process.wait() + + if return_code != 0: + print(f"Command failed with return code {return_code}") + return False + return True + else: + result = subprocess.run( + command, shell=True, capture_output=True, text=True, executable="/bin/bash" + ) + if result.returncode != 0: + print(f"Error: {result.stderr}") + return False + print(result.stdout) + return True + + +def set_kubectl_context(namespace: str): + """ + Set the default kubectl context to the specified namespace. + """ + command = f"kubectl config set-context --current --namespace={namespace}" + result = run_command(command, stream_output=True) + if result: + print(f"Kubectl context set to namespace: {namespace}") + else: + print(f"Failed to set kubectl context to namespace: {namespace}") + return result + + +def deploy_base_configurations(): + base_configs = [ + "namespace.yaml", + "rbac-config.yaml", + ] + + for config in base_configs: + command = f"kubectl apply -f {WAR_MANIFESTS}/{config}" + if not run_command(command, stream_output=True): + print(f"Failed to apply {config}") + return False + return True + + +def apply_kubernetes_yaml(yaml_file: str): + command = f"kubectl apply -f {yaml_file}" + return run_command(command, stream_output=True) + + +def delete_namespace(namespace: str): + command = f"kubectl delete namespace {namespace}" + return run_command(command, stream_output=True) + diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 90ac36812..6c521029a 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -8,7 +8,13 @@ import yaml from rich import print -from .util import run_command +from .k8s import ( + run_command, + set_kubectl_context, + deploy_base_configurations, + apply_kubernetes_yaml, + delete_namespace +) DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") WAR_MANIFESTS = files("manifests") @@ -229,43 +235,6 @@ def generate_node_config(node: int, data: dict) -> str: return f"{base_config}\n{node_specific_config}" -def set_kubectl_context(namespace: str): - """ - Set the default kubectl context to the specified namespace. - """ - command = f"kubectl config set-context --current --namespace={namespace}" - result = run_command(command, stream_output=True) - if result: - print(f"Kubectl context set to namespace: {namespace}") - else: - print(f"Failed to set kubectl context to namespace: {namespace}") - return result - - -def deploy_base_configurations(): - base_configs = [ - "namespace.yaml", - "rbac-config.yaml", - ] - - for config in base_configs: - command = f"kubectl apply -f {WAR_MANIFESTS}/{config}" - if not run_command(command, stream_output=True): - print(f"Failed to apply {config}") - return False - return True - - -def apply_kubernetes_yaml(yaml_file: str): - command = f"kubectl apply -f {yaml_file}" - return run_command(command, stream_output=True) - - -def delete_namespace(namespace: str): - command = f"kubectl delete namespace {namespace}" - return run_command(command, stream_output=True) - - def setup_logging_helm(): """ Run the required Helm commands for setting up Grafana, Prometheus, and Loki. From f0d121438b6e04aad8f631236bae10255fb8ac31 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 19:56:37 +0200 Subject: [PATCH 012/710] remove cli:utils --- src/warnet/cli/utils.py | 480 ---------------------------------------- 1 file changed, 480 deletions(-) delete mode 100644 src/warnet/cli/utils.py diff --git a/src/warnet/cli/utils.py b/src/warnet/cli/utils.py deleted file mode 100644 index c9d43f616..000000000 --- a/src/warnet/cli/utils.py +++ /dev/null @@ -1,480 +0,0 @@ -import functools -import ipaddress -import json -import logging -import os -import random -import re -import stat -import subprocess -import sys -import time -from io import BytesIO -from pathlib import Path - -import networkx as nx -from jsonschema import validate -from test_framework.messages import ser_uint256 -from test_framework.p2p import MESSAGEMAP -from warnet import SRC_DIR - -logger = logging.getLogger("utils") - - -SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] -DEFAULT_TAG = SUPPORTED_TAGS[0] -WEIGHTED_TAGS = [ - tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) -] - - -class NonErrorFilter(logging.Filter): - def filter(self, record: logging.LogRecord) -> bool | logging.LogRecord: - return record.levelno <= logging.INFO - - -def exponential_backoff(max_retries=5, base_delay=1, max_delay=32): - """ - A decorator for exponential backoff. - - Parameters: - - max_retries: Maximum number of retries before giving up. - - base_delay: Initial delay in seconds. - - max_delay: Maximum delay in seconds. - """ - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - retries = 0 - while retries < max_retries: - try: - return func(*args, **kwargs) - except Exception as e: - error_msg = str(e).replace("\n", " ").replace("\t", " ") - logger.error(f"rpc error: {error_msg}") - retries += 1 - if retries == max_retries: - raise e - delay = min(base_delay * (2**retries), max_delay) - logger.warning(f"exponential_backoff: retry in {delay} seconds...") - time.sleep(delay) - - return wrapper - - return decorator - - -def handle_json(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = "" - try: - result = func(*args, **kwargs) - logger.debug(f"{result=:}") - if isinstance(result, dict): - return result - parsed_result = json.loads(result) - return parsed_result - except json.JSONDecodeError as e: - logging.error( - f"JSON parsing error in {func.__name__}: {e}. Undecodable result: {result}" - ) - raise - except Exception as e: - logger.error(f"Error in {func.__name__}: {e}") - raise - - return wrapper - - -def get_architecture(): - """ - Get the architecture of the machine. - :return: The architecture of the machine or None if an error occurred - """ - result = subprocess.run(["uname", "-m"], stdout=subprocess.PIPE) - arch = result.stdout.decode("utf-8").strip() - if arch == "x86_64": - arch = "amd64" - if arch is None: - raise Exception("Failed to detect architecture.") - return arch - - -def generate_ipv4_addr(subnet): - """ - Generate a valid random IPv4 address within the given subnet. - - :param subnet: Subnet in CIDR notation (e.g., '100.0.0.0/8') - :return: Random IP address within the subnet - """ - reserved_ips = [ - "0.0.0.0/8", - "10.0.0.0/8", - "100.64.0.0/10", - "127.0.0.0/8", - "169.254.0.0/16", - "172.16.0.0/12", - "192.0.0.0/24", - "192.0.2.0/24", - "192.88.99.0/24", - "192.168.0.0/16", - "198.18.0.0/15", - "198.51.100.0/24", - "203.0.113.0/24", - "224.0.0.0/4", - ] - - def is_public(ip): - for reserved in reserved_ips: - if ipaddress.ip_address(ip) in ipaddress.ip_network(reserved, strict=False): - return False - return True - - network = ipaddress.ip_network(subnet, strict=False) - - # Generate a random IP within the subnet range - while True: - ip_int = random.randint(int(network.network_address), int(network.broadcast_address)) - ip_str = str(ipaddress.ip_address(ip_int)) - if is_public(ip_str): - return ip_str - - -def sanitize_tc_netem_command(command: str) -> bool: - """ - Sanitize the tc-netem command to ensure it's valid and safe to execute, as we run it as root on a container. - - Args: - - command (str): The tc-netem command to sanitize. - - Returns: - - bool: True if the command is valid and safe, False otherwise. - """ - if not command.startswith("tc qdisc add dev eth0 root netem"): - return False - - tokens = command.split()[7:] # Skip the prefix - - # Valid tc-netem parameters and their patterns - valid_params = { - "delay": r"^\d+ms(\s\d+ms)?(\sdistribution\s(normal|pareto|paretonormal|uniform))?$", - "loss": r"^\d+(\.\d+)?%$", - "duplicate": r"^\d+(\.\d+)?%$", - "corrupt": r"^\d+(\.\d+)?%$", - "reorder": r"^\d+(\.\d+)?%\s\d+(\.\d+)?%$", - "rate": r"^\d+(kbit|mbit|gbit)$", - } - - # Validate each param - i = 0 - while i < len(tokens): - param = tokens[i] - if param not in valid_params: - return False - i += 1 - value_tokens = [] - while i < len(tokens) and tokens[i] not in valid_params: - value_tokens.append(tokens[i]) - i += 1 - value = " ".join(value_tokens) - if not re.match(valid_params[param], value): - return False - - return True - - -def parse_bitcoin_conf(file_content): - """ - Custom parser for INI-style bitcoin.conf - - Args: - - file_content (str): The content of the INI-style file. - - Returns: - - dict: A dictionary representation of the file content. - Key-value pairs are stored as tuples so one key may have - multiple values. Sections are represented as arrays of these tuples. - """ - current_section = None - result = {current_section: []} - - for line in file_content.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - - if line.startswith("[") and line.endswith("]"): - current_section = line[1:-1] - result[current_section] = [] - elif "=" in line: - key, value = line.split("=", 1) - result[current_section].append((key.strip(), value.strip())) - - return result - - -def dump_bitcoin_conf(conf_dict, for_graph=False): - """ - Converts a dictionary representation of bitcoin.conf content back to INI-style string. - - Args: - - conf_dict (dict): A dictionary representation of the file content. - - Returns: - - str: The INI-style string representation of the input dictionary. - """ - result = [] - - # Print global section at the top first - values = conf_dict[None] - for sub_key, sub_value in values: - result.append(f"{sub_key}={sub_value}") - - # Then print any named subsections - for section, values in conf_dict.items(): - if section is not None: - result.append(f"\n[{section}]") - else: - continue - for sub_key, sub_value in values: - result.append(f"{sub_key}={sub_value}") - - if for_graph: - return ",".join(result) - - # Terminate file with newline - return "\n".join(result) + "\n" - - -def to_jsonable(obj): - HASH_INTS = [ - "blockhash", - "block_hash", - "hash", - "hashMerkleRoot", - "hashPrevBlock", - "hashstop", - "prev_header", - "sha256", - "stop_hash", - ] - - HASH_INT_VECTORS = [ - "hashes", - "headers", - "vHave", - "vHash", - ] - - if hasattr(obj, "__dict__"): - return obj.__dict__ - elif hasattr(obj, "__slots__"): - ret = {} # type: Any - for slot in obj.__slots__: - val = getattr(obj, slot, None) - if slot in HASH_INTS and isinstance(val, int): - ret[slot] = ser_uint256(val).hex() - elif slot in HASH_INT_VECTORS and all(isinstance(a, int) for a in val): - ret[slot] = [ser_uint256(a).hex() for a in val] - else: - ret[slot] = to_jsonable(val) - return ret - elif isinstance(obj, list): - return [to_jsonable(a) for a in obj] - elif isinstance(obj, bytes): - return obj.hex() - else: - return obj - - -# This function is a hacked-up copy of process_file() from -# Bitcoin Core contrib/message-capture/message-capture-parser.py -def parse_raw_messages(blob, outbound): - TIME_SIZE = 8 - LENGTH_SIZE = 4 - MSGTYPE_SIZE = 12 - - messages = [] - offset = 0 - while True: - # Read the Header - header_len = TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE - tmp_header_raw = blob[offset : offset + header_len] - - offset = offset + header_len - if not tmp_header_raw: - break - tmp_header = BytesIO(tmp_header_raw) - time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int - msgtype = tmp_header.read(MSGTYPE_SIZE).split(b"\x00", 1)[0] # type: bytes - length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int - - # Start converting the message to a dictionary - msg_dict = {} - msg_dict["outbound"] = outbound - msg_dict["time"] = time - msg_dict["size"] = length # "size" is less readable here, but more readable in the output - - msg_ser = BytesIO(blob[offset : offset + length]) - offset = offset + length - - # Determine message type - if msgtype not in MESSAGEMAP: - # Unrecognized message type - try: - msgtype_tmp = msgtype.decode() - if not msgtype_tmp.isprintable(): - raise UnicodeDecodeError - msg_dict["msgtype"] = msgtype_tmp - except UnicodeDecodeError: - msg_dict["msgtype"] = "UNREADABLE" - msg_dict["body"] = msg_ser.read().hex() - msg_dict["error"] = "Unrecognized message type." - messages.append(msg_dict) - print(f"WARNING - Unrecognized message type {msgtype}", file=sys.stderr) - continue - - # Deserialize the message - msg = MESSAGEMAP[msgtype]() - msg_dict["msgtype"] = msgtype.decode() - - try: - msg.deserialize(msg_ser) - except KeyboardInterrupt: - raise - except Exception: - # Unable to deserialize message body - msg_ser.seek(0, os.SEEK_SET) - msg_dict["body"] = msg_ser.read().hex() - msg_dict["error"] = "Unable to deserialize message." - messages.append(msg_dict) - print("WARNING - Unable to deserialize message", file=sys.stderr) - continue - - # Convert body of message into a jsonable object - if length: - msg_dict["body"] = to_jsonable(msg) - messages.append(msg_dict) - return messages - - -def gen_config_dir(network: str) -> Path: - """ - Determine a config dir based on network name - """ - config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.warnet")) - config_dir = Path(config_dir) / "warnet" / network - return config_dir - - -def remove_version_prefix(version_str): - if version_str.startswith("0."): - return version_str[2:] - return version_str - - -def set_execute_permission(file_path): - current_permissions = os.stat(file_path).st_mode - os.chmod(file_path, current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - - -def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): - try: - # Use nx.MultiDiGraph() so we get directed edges (source->target) - # and still allow parallel edges (L1 p2p connections + LN channels) - graph = nx.generators.cycle_graph(n, nx.MultiDiGraph()) - except TypeError as e: - msg = f"Failed to create graph: {e}" - logger.error(msg) - return msg - - # Graph is a simply cycle graph with all nodes connected in a loop, including both ends. - # Ensure each node has at least 8 outbound connections by making 7 more outbound connections - for src_node in graph.nodes(): - logger.debug(f"Creating additional connections for node {src_node}") - for _ in range(8): - # Choose a random node to connect to - # Make sure it's not the same node and they aren't already connected in either direction - potential_nodes = [ - dst_node - for dst_node in range(n) - if dst_node != src_node - and not graph.has_edge(dst_node, src_node) - and not graph.has_edge(src_node, dst_node) - ] - if potential_nodes: - chosen_node = random.choice(potential_nodes) - graph.add_edge(src_node, chosen_node) - logger.debug(f"Added edge: {src_node}:{chosen_node}") - logger.debug(f"Node {src_node} edges: {graph.edges(src_node)}") - - # parse and process conf file - conf_contents = "" - if bitcoin_conf is not None: - conf = Path(bitcoin_conf) - if conf.is_file(): - with open(conf) as f: - # parse INI style conf then dump using for_graph - conf_dict = parse_bitcoin_conf(f.read()) - conf_contents = dump_bitcoin_conf(conf_dict, for_graph=True) - - # populate our custom fields - for i, node in enumerate(graph.nodes()): - if random_version: - graph.nodes[node]["version"] = random.choice(WEIGHTED_TAGS) - else: - # One node demoing the image tag - if i == 1: - graph.nodes[node]["image"] = f"bitcoindevproject/bitcoin:{version}" - else: - graph.nodes[node]["version"] = version - graph.nodes[node]["bitcoin_config"] = conf_contents - graph.nodes[node]["tc_netem"] = "" - graph.nodes[node]["build_args"] = "" - graph.nodes[node]["exporter"] = False - graph.nodes[node]["collect_logs"] = False - graph.nodes[node]["resources"] = None - - convert_unsupported_attributes(graph) - return graph - - -def convert_unsupported_attributes(graph: nx.Graph): - # Sometimes networkx complains about invalid types when writing the graph - # (it just generated itself!). Try to convert them here just in case. - for _, node_data in graph.nodes(data=True): - for key, value in node_data.items(): - if isinstance(value, set): - node_data[key] = list(value) - elif isinstance(value, int | float | str): - continue - else: - node_data[key] = str(value) - - for _, _, edge_data in graph.edges(data=True): - for key, value in edge_data.items(): - if isinstance(value, set): - edge_data[key] = list(value) - elif isinstance(value, int | float | str): - continue - else: - edge_data[key] = str(value) - - -def load_schema(): - with open(SRC_DIR / "graph_schema.json") as schema_file: - return json.load(schema_file) - - -def validate_graph_schema(graph: nx.Graph): - """ - Validate a networkx.Graph against the node schema - """ - graph_schema = load_schema() - validate(instance=graph.graph, schema=graph_schema["graph"]) - for n in list(graph.nodes): - validate(instance=graph.nodes[n], schema=graph_schema["node"]) - for e in list(graph.edges): - validate(instance=graph.edges[e], schema=graph_schema["edge"]) From 2f050050e5b43475934466b5a7ba33d51d3bfe09 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 20:11:32 +0200 Subject: [PATCH 013/710] cli: implement 'bitcoin rpc' using kubectl --- src/warnet/cli/bitcoin.py | 94 +++++++++++++++++++-------------------- src/warnet/cli/main.py | 4 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 0c01f7d0d..39a98b4fa 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -1,6 +1,6 @@ import click -from .rpc import rpc_call +from .util import run_command @click.group(name="bitcoin") @@ -17,51 +17,51 @@ def rpc(node, method, params, network): """ Call bitcoin-cli [params] on in [network] """ - print( - rpc_call( - "tank_bcli", {"network": network, "node": node, "method": method, "params": params} - ) - ) + if params: + cmd = f"kubectl exec warnet-node-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" + else: + cmd = f"kubectl exec warnet-node-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" + run_command(cmd) -@bitcoin.command() -@click.argument("node", type=int, required=True) -@click.option("--network", default="warnet", show_default=True) -def debug_log(node, network): - """ - Fetch the Bitcoin Core debug log from in [network] - """ - print(rpc_call("tank_debug_log", {"node": node, "network": network})) - - -@bitcoin.command() -@click.argument("node_a", type=int, required=True) -@click.argument("node_b", type=int, required=True) -@click.option("--network", default="warnet", show_default=True) -def messages(node_a, node_b, network): - """ - Fetch messages sent between and in [network] - """ - print(rpc_call("tank_messages", {"network": network, "node_a": node_a, "node_b": node_b})) - - -@bitcoin.command() -@click.argument("pattern", type=str, required=True) -@click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) -@click.option("--no-sort", is_flag=True, default=False, show_default=True) -@click.option("--network", default="warnet", show_default=True) -def grep_logs(pattern, network, show_k8s_timestamps, no_sort): - """ - Grep combined logs via fluentd using regex - """ - print( - rpc_call( - "logs_grep", - { - "network": network, - "pattern": pattern, - "k8s_timestamps": show_k8s_timestamps, - "no_sort": no_sort, - }, - ) - ) +# @bitcoin.command() +# @click.argument("node", type=int, required=True) +# @click.option("--network", default="warnet", show_default=True) +# def debug_log(node, network): +# """ +# Fetch the Bitcoin Core debug log from in [network] +# """ +# print(rpc_call("tank_debug_log", {"node": node, "network": network})) +# +# +# @bitcoin.command() +# @click.argument("node_a", type=int, required=True) +# @click.argument("node_b", type=int, required=True) +# @click.option("--network", default="warnet", show_default=True) +# def messages(node_a, node_b, network): +# """ +# Fetch messages sent between and in [network] +# """ +# print(rpc_call("tank_messages", {"network": network, "node_a": node_a, "node_b": node_b})) +# +# +# @bitcoin.command() +# @click.argument("pattern", type=str, required=True) +# @click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) +# @click.option("--no-sort", is_flag=True, default=False, show_default=True) +# @click.option("--network", default="warnet", show_default=True) +# def grep_logs(pattern, network, show_k8s_timestamps, no_sort): +# """ +# Grep combined logs via fluentd using regex +# """ +# print( +# rpc_call( +# "logs_grep", +# { +# "network": network, +# "pattern": pattern, +# "k8s_timestamps": show_k8s_timestamps, +# "no_sort": no_sort, +# }, +# ) +# ) diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index a1dda555c..95197bd29 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -5,7 +5,7 @@ import click from rich import print as richprint -# from .bitcoin import bitcoin +from .bitcoin import bitcoin from .graph import graph from .image import image @@ -21,7 +21,7 @@ def cli(): pass -# cli.add_command(bitcoin) +cli.add_command(bitcoin) cli.add_command(graph) cli.add_command(image) # cli.add_command(ln) From 1736635a045214c5d45f6032a77eaa613c469d14 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 20 Aug 2024 21:57:31 +0200 Subject: [PATCH 014/710] scenarios working --- resources/images/commander/Dockerfile | 11 + .../images/commander/src}/__init__.py | 0 resources/images/commander/src/commander.py | 392 ++++++++++++++++++ .../commander/src/test_framework/__init__.py | 0 .../commander/src}/test_framework/address.py | 0 .../src}/test_framework/authproxy.py | 0 .../commander/src}/test_framework/bdb.py | 0 .../test_framework/bip340_test_vectors.csv | 0 .../src}/test_framework/blockfilter.py | 0 .../src}/test_framework/blocktools.py | 0 .../commander/src}/test_framework/coverage.py | 0 .../src}/test_framework/descriptors.py | 0 .../commander/src}/test_framework/ellswift.py | 0 .../ellswift_decode_test_vectors.csv | 0 .../commander/src}/test_framework/key.py | 0 .../commander/src}/test_framework/messages.py | 0 .../commander/src}/test_framework/muhash.py | 0 .../commander/src}/test_framework/netutil.py | 0 .../commander/src}/test_framework/p2p.py | 0 .../commander/src}/test_framework/psbt.py | 0 .../src}/test_framework/ripemd160.py | 0 .../commander/src}/test_framework/script.py | 0 .../src}/test_framework/script_util.py | 0 .../src}/test_framework/secp256k1.py | 0 .../src}/test_framework/segwit_addr.py | 0 .../commander/src}/test_framework/siphash.py | 0 .../commander/src}/test_framework/socks5.py | 0 .../src}/test_framework/test_framework.py | 0 .../src}/test_framework/test_node.py | 0 .../src}/test_framework/test_shell.py | 0 .../commander/src}/test_framework/util.py | 0 .../commander/src}/test_framework/wallet.py | 0 .../src}/test_framework/wallet_util.py | 0 .../xswiftec_inv_test_vectors.csv | 0 src/warnet/cli/k8s.py | 27 +- src/warnet/cli/network.py | 11 +- src/warnet/cli/scenarios.py | 97 ++++- src/warnet/scenarios/miner_std.py | 11 +- src/warnet/scenarios/utils.py | 5 - 39 files changed, 525 insertions(+), 29 deletions(-) create mode 100644 resources/images/commander/Dockerfile rename {src/test_framework => resources/images/commander/src}/__init__.py (100%) create mode 100644 resources/images/commander/src/commander.py create mode 100644 resources/images/commander/src/test_framework/__init__.py rename {src => resources/images/commander/src}/test_framework/address.py (100%) rename {src => resources/images/commander/src}/test_framework/authproxy.py (100%) rename {src => resources/images/commander/src}/test_framework/bdb.py (100%) rename {src => resources/images/commander/src}/test_framework/bip340_test_vectors.csv (100%) rename {src => resources/images/commander/src}/test_framework/blockfilter.py (100%) rename {src => resources/images/commander/src}/test_framework/blocktools.py (100%) rename {src => resources/images/commander/src}/test_framework/coverage.py (100%) rename {src => resources/images/commander/src}/test_framework/descriptors.py (100%) rename {src => resources/images/commander/src}/test_framework/ellswift.py (100%) rename {src => resources/images/commander/src}/test_framework/ellswift_decode_test_vectors.csv (100%) rename {src => resources/images/commander/src}/test_framework/key.py (100%) rename {src => resources/images/commander/src}/test_framework/messages.py (100%) rename {src => resources/images/commander/src}/test_framework/muhash.py (100%) rename {src => resources/images/commander/src}/test_framework/netutil.py (100%) rename {src => resources/images/commander/src}/test_framework/p2p.py (100%) rename {src => resources/images/commander/src}/test_framework/psbt.py (100%) rename {src => resources/images/commander/src}/test_framework/ripemd160.py (100%) rename {src => resources/images/commander/src}/test_framework/script.py (100%) rename {src => resources/images/commander/src}/test_framework/script_util.py (100%) rename {src => resources/images/commander/src}/test_framework/secp256k1.py (100%) rename {src => resources/images/commander/src}/test_framework/segwit_addr.py (100%) rename {src => resources/images/commander/src}/test_framework/siphash.py (100%) rename {src => resources/images/commander/src}/test_framework/socks5.py (100%) rename {src => resources/images/commander/src}/test_framework/test_framework.py (100%) rename {src => resources/images/commander/src}/test_framework/test_node.py (100%) rename {src => resources/images/commander/src}/test_framework/test_shell.py (100%) rename {src => resources/images/commander/src}/test_framework/util.py (100%) rename {src => resources/images/commander/src}/test_framework/wallet.py (100%) rename {src => resources/images/commander/src}/test_framework/wallet_util.py (100%) rename {src => resources/images/commander/src}/test_framework/xswiftec_inv_test_vectors.csv (100%) delete mode 100644 src/warnet/scenarios/utils.py diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile new file mode 100644 index 000000000..243f88a5e --- /dev/null +++ b/resources/images/commander/Dockerfile @@ -0,0 +1,11 @@ +# Use an official Python runtime as the base image +FROM python:3.12-slim + +# Python dependencies +#RUN pip install --no-cache-dir prometheus_client + +# Prometheus exporter script for bitcoind +COPY src / + +# -u: force the stdout and stderr streams to be unbuffered +CMD ["python", "-u", "/scenario.py"] diff --git a/src/test_framework/__init__.py b/resources/images/commander/src/__init__.py similarity index 100% rename from src/test_framework/__init__.py rename to resources/images/commander/src/__init__.py diff --git a/resources/images/commander/src/commander.py b/resources/images/commander/src/commander.py new file mode 100644 index 000000000..dfdb75a31 --- /dev/null +++ b/resources/images/commander/src/commander.py @@ -0,0 +1,392 @@ +import argparse +import configparser +import ipaddress +import json +import logging +import os +import pathlib +import random +import signal +import sys +import tempfile + +from pathlib import Path +from test_framework.authproxy import AuthServiceProxy +from test_framework.p2p import NetworkThread +from test_framework.test_framework import ( + TMPDIR_PREFIX, + BitcoinTestFramework, + TestStatus, +) +from test_framework.test_node import TestNode +from test_framework.util import PortSeed, get_rpc_proxy + +WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" +with open(WARNET_FILE, "r") as file: + WARNET = json.load(file) + +# Ensure that all RPC calls are made with brand new http connections +def auth_proxy_request(self, method, path, postdata): + self._set_conn() # creates new http client connection + return self.oldrequest(method, path, postdata) +AuthServiceProxy.oldrequest = AuthServiceProxy._request +AuthServiceProxy._request = auth_proxy_request + + +class Commander(BitcoinTestFramework): + # required by subclasses of BitcoinTestFramework + def set_test_params(self): + pass + + def run_test(self): + pass + + # Utility functions for Warnet scenarios + @staticmethod + def ensure_miner(node): + wallets = node.listwallets() + if "miner" not in wallets: + node.createwallet("miner", descriptors=True) + return node.get_wallet_rpc("miner") + + def network_connected(self): + for tank in self.nodes: + peerinfo = tank.getpeerinfo() + manuals = 0 + for peer in peerinfo: + if peer["connection_type"] == "manual": + manuals += 1 + # Even if more edges are specifed, bitcoind only allows + # 8 manual outbound connections + if min(8, len(tank.init_peers)) > manuals: + return False + return True + + def handle_sigterm(self, signum, frame): + print("SIGTERM received, stopping...") + self.shutdown() + sys.exit(0) + + # The following functions are chopped-up hacks of + # the original methods from BitcoinTestFramework + + def setup(self): + signal.signal(signal.SIGTERM, self.handle_sigterm) + + # hacked from _start_logging() + # Scenarios will log plain messages to stdout only, which will can redirected by warnet + self.log = logging.getLogger(self.__class__.__name__) + self.log.setLevel(logging.INFO) # set this to DEBUG to see ALL RPC CALLS + + # Because scenarios run in their own subprocess, the logger here + # is not the same as the warnet server or other global loggers. + # Scenarios log directly to stdout which gets picked up by the + # subprocess manager in the server, and reprinted to the global log. + ch = logging.StreamHandler(sys.stdout) + formatter = logging.Formatter(fmt="%(name)-8s %(message)s") + ch.setFormatter(formatter) + self.log.addHandler(ch) + + for i, tank in enumerate(WARNET): + self.log.info(f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}") + node = TestNode( + i, + pathlib.Path(), # datadir path + chain=tank['chain'], + rpchost=tank['rpc_host'], + timewait=60, + timeout_factor=self.options.timeout_factor, + bitcoind=None, + bitcoin_cli=None, + cwd=self.options.tmpdir, + coverage_dir=self.options.coveragedir, + ) + node.rpc = get_rpc_proxy( + f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", + i, + timeout=60, + coveragedir=self.options.coveragedir, + ) + node.rpc_connected = True + node.init_peers = tank['init_peers'] + self.nodes.append(node) + + self.num_nodes = len(self.nodes) + + # Set up temp directory and start logging + if self.options.tmpdir: + self.options.tmpdir = os.path.abspath(self.options.tmpdir) + os.makedirs(self.options.tmpdir, exist_ok=False) + else: + self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) + + seed = self.options.randomseed + if seed is None: + seed = random.randrange(sys.maxsize) + else: + self.log.info(f"User supplied random seed {seed}") + random.seed(seed) + self.log.info(f"PRNG seed is: {seed}") + + self.log.debug("Setting up network thread") + self.network_thread = NetworkThread() + self.network_thread.start() + + self.success = TestStatus.PASSED + + def parse_args(self): + previous_releases_path = "" + parser = argparse.ArgumentParser(usage="%(prog)s [options]") + parser.add_argument( + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error", + ) + parser.add_argument( + "--nosandbox", + dest="nosandbox", + default=False, + action="store_true", + help="Don't use the syscall sandbox", + ) + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution", + ) + parser.add_argument( + "--cachedir", + dest="cachedir", + default=None, + help="Directory for caching pregenerated datadirs (default: %(default)s)", + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" + ) + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="DEBUG", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made", + ) + parser.add_argument( + "--portseed", + dest="port_seed", + default=0, + help="The seed to use for assigning port numbers (default: current process id)", + ) + parser.add_argument( + "--previous-releases", + dest="prev_releases", + default=None, + action="store_true", + help="Force test of previous releases (default: %(default)s)", + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + default=None, + help="Write tested RPC commands into this directory", + ) + parser.add_argument( + "--configfile", + dest="configfile", + default=None, + help="Location of the test framework config file (default: %(default)s)", + ) + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands", + ) + parser.add_argument( + "--perf", + dest="perf", + default=False, + action="store_true", + help="profile running nodes with perf for the duration of the test", + ) + parser.add_argument( + "--valgrind", + dest="valgrind", + default=False, + action="store_true", + help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", + ) + parser.add_argument( + "--randomseed", + default=0x7761726E6574, # "warnet" ascii + help="set a random seed for deterministically reproducing a previous test run", + ) + parser.add_argument( + "--timeout-factor", + dest="timeout_factor", + default=1, + help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", + ) + parser.add_argument( + "--network", + dest="network", + default="warnet", + help="Designate which warnet this should run on (default: warnet)", + ) + parser.add_argument( + "--v2transport", + dest="v2transport", + default=False, + action="store_true", + help="use BIP324 v2 connections between all nodes by default", + ) + + self.add_options(parser) + # Running TestShell in a Jupyter notebook causes an additional -f argument + # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument + # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 + parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") + self.options = parser.parse_args() + if self.options.timeout_factor == 0: + self.options.timeout_factor = 99999 + self.options.timeout_factor = self.options.timeout_factor or ( + 4 if self.options.valgrind else 1 + ) + self.options.previous_releases_path = previous_releases_path + config = configparser.ConfigParser() + if self.options.configfile is not None: + with open(self.options.configfile) as f: + config.read_file(f) + + config["environment"] = {"PACKAGE_BUGREPORT": ""} + + self.config = config + + if "descriptors" not in self.options: + # Wallet is not required by the test at all and the value of self.options.descriptors won't matter. + # It still needs to exist and be None in order for tests to work however. + # So set it to None to force -disablewallet, because the wallet is not needed. + self.options.descriptors = None + elif self.options.descriptors is None: + # Some wallet is either required or optionally used by the test. + # Prefer SQLite unless it isn't available + if self.is_sqlite_compiled(): + self.options.descriptors = True + elif self.is_bdb_compiled(): + self.options.descriptors = False + else: + # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter + # It still needs to exist and be None in order for tests to work however. + # So set it to None, which will also set -disablewallet. + self.options.descriptors = None + + PortSeed.n = self.options.port_seed + + def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool = True): + """ + Kwargs: + wait_for_connect: if True, block until the nodes are verified as connected. You might + want to disable this when using -stopatheight with one of the connected nodes, + since there will be a race between the actual connection and performing + the assertions before one node shuts down. + """ + from_connection = self.nodes[a] + to_connection = self.nodes[b] + + to_ip_port = self.warnet.tanks[b].get_dns_addr() + from_ip_port = self.warnet.tanks[a].get_ip_addr() + + if peer_advertises_v2 is None: + peer_advertises_v2 = self.options.v2transport + + if peer_advertises_v2: + from_connection.addnode(node=to_ip_port, command="onetry", v2transport=True) + else: + # skip the optional third argument (default false) for + # compatibility with older clients + from_connection.addnode(to_ip_port, "onetry") + + if not wait_for_connect: + return + + def get_peer_ip(peer): + try: # we encounter a regular ip address + ip_addr = str(ipaddress.ip_address(peer["addr"].split(":")[0])) + return ip_addr + except ValueError as err: # or we encounter a service name + try: + # NETWORK-tank-TANK_INDEX-service + # NETWORK-test-TEST-tank-TANK_INDEX-service + tank_index = int(peer["addr"].split("-")[-2]) + except (ValueError, IndexError) as inner_err: + raise ValueError( + "could not derive tank index from service name: {} {}".format( + peer["addr"], inner_err + ) + ) from err + + ip_addr = self.warnet.tanks[tank_index].get_ip_addr() + return ip_addr + + # poll until version handshake complete to avoid race conditions + # with transaction relaying + # See comments in net_processing: + # * Must have a version message before anything else + # * Must have a verack message before anything else + self.wait_until( + lambda: any( + peer["addr"] == to_ip_port and peer["version"] != 0 + for peer in from_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + get_peer_ip(peer) == from_ip_port and peer["version"] != 0 + for peer in to_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + for peer in from_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + get_peer_ip(peer) == from_ip_port + and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + for peer in to_connection.getpeerinfo() + ) + ) + # The message bytes are counted before processing the message, so make + # sure it was fully processed by waiting for a ping. + self.wait_until( + lambda: any( + peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + for peer in from_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + get_peer_ip(peer) == from_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + for peer in to_connection.getpeerinfo() + ) + ) diff --git a/resources/images/commander/src/test_framework/__init__.py b/resources/images/commander/src/test_framework/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/test_framework/address.py b/resources/images/commander/src/test_framework/address.py similarity index 100% rename from src/test_framework/address.py rename to resources/images/commander/src/test_framework/address.py diff --git a/src/test_framework/authproxy.py b/resources/images/commander/src/test_framework/authproxy.py similarity index 100% rename from src/test_framework/authproxy.py rename to resources/images/commander/src/test_framework/authproxy.py diff --git a/src/test_framework/bdb.py b/resources/images/commander/src/test_framework/bdb.py similarity index 100% rename from src/test_framework/bdb.py rename to resources/images/commander/src/test_framework/bdb.py diff --git a/src/test_framework/bip340_test_vectors.csv b/resources/images/commander/src/test_framework/bip340_test_vectors.csv similarity index 100% rename from src/test_framework/bip340_test_vectors.csv rename to resources/images/commander/src/test_framework/bip340_test_vectors.csv diff --git a/src/test_framework/blockfilter.py b/resources/images/commander/src/test_framework/blockfilter.py similarity index 100% rename from src/test_framework/blockfilter.py rename to resources/images/commander/src/test_framework/blockfilter.py diff --git a/src/test_framework/blocktools.py b/resources/images/commander/src/test_framework/blocktools.py similarity index 100% rename from src/test_framework/blocktools.py rename to resources/images/commander/src/test_framework/blocktools.py diff --git a/src/test_framework/coverage.py b/resources/images/commander/src/test_framework/coverage.py similarity index 100% rename from src/test_framework/coverage.py rename to resources/images/commander/src/test_framework/coverage.py diff --git a/src/test_framework/descriptors.py b/resources/images/commander/src/test_framework/descriptors.py similarity index 100% rename from src/test_framework/descriptors.py rename to resources/images/commander/src/test_framework/descriptors.py diff --git a/src/test_framework/ellswift.py b/resources/images/commander/src/test_framework/ellswift.py similarity index 100% rename from src/test_framework/ellswift.py rename to resources/images/commander/src/test_framework/ellswift.py diff --git a/src/test_framework/ellswift_decode_test_vectors.csv b/resources/images/commander/src/test_framework/ellswift_decode_test_vectors.csv similarity index 100% rename from src/test_framework/ellswift_decode_test_vectors.csv rename to resources/images/commander/src/test_framework/ellswift_decode_test_vectors.csv diff --git a/src/test_framework/key.py b/resources/images/commander/src/test_framework/key.py similarity index 100% rename from src/test_framework/key.py rename to resources/images/commander/src/test_framework/key.py diff --git a/src/test_framework/messages.py b/resources/images/commander/src/test_framework/messages.py similarity index 100% rename from src/test_framework/messages.py rename to resources/images/commander/src/test_framework/messages.py diff --git a/src/test_framework/muhash.py b/resources/images/commander/src/test_framework/muhash.py similarity index 100% rename from src/test_framework/muhash.py rename to resources/images/commander/src/test_framework/muhash.py diff --git a/src/test_framework/netutil.py b/resources/images/commander/src/test_framework/netutil.py similarity index 100% rename from src/test_framework/netutil.py rename to resources/images/commander/src/test_framework/netutil.py diff --git a/src/test_framework/p2p.py b/resources/images/commander/src/test_framework/p2p.py similarity index 100% rename from src/test_framework/p2p.py rename to resources/images/commander/src/test_framework/p2p.py diff --git a/src/test_framework/psbt.py b/resources/images/commander/src/test_framework/psbt.py similarity index 100% rename from src/test_framework/psbt.py rename to resources/images/commander/src/test_framework/psbt.py diff --git a/src/test_framework/ripemd160.py b/resources/images/commander/src/test_framework/ripemd160.py similarity index 100% rename from src/test_framework/ripemd160.py rename to resources/images/commander/src/test_framework/ripemd160.py diff --git a/src/test_framework/script.py b/resources/images/commander/src/test_framework/script.py similarity index 100% rename from src/test_framework/script.py rename to resources/images/commander/src/test_framework/script.py diff --git a/src/test_framework/script_util.py b/resources/images/commander/src/test_framework/script_util.py similarity index 100% rename from src/test_framework/script_util.py rename to resources/images/commander/src/test_framework/script_util.py diff --git a/src/test_framework/secp256k1.py b/resources/images/commander/src/test_framework/secp256k1.py similarity index 100% rename from src/test_framework/secp256k1.py rename to resources/images/commander/src/test_framework/secp256k1.py diff --git a/src/test_framework/segwit_addr.py b/resources/images/commander/src/test_framework/segwit_addr.py similarity index 100% rename from src/test_framework/segwit_addr.py rename to resources/images/commander/src/test_framework/segwit_addr.py diff --git a/src/test_framework/siphash.py b/resources/images/commander/src/test_framework/siphash.py similarity index 100% rename from src/test_framework/siphash.py rename to resources/images/commander/src/test_framework/siphash.py diff --git a/src/test_framework/socks5.py b/resources/images/commander/src/test_framework/socks5.py similarity index 100% rename from src/test_framework/socks5.py rename to resources/images/commander/src/test_framework/socks5.py diff --git a/src/test_framework/test_framework.py b/resources/images/commander/src/test_framework/test_framework.py similarity index 100% rename from src/test_framework/test_framework.py rename to resources/images/commander/src/test_framework/test_framework.py diff --git a/src/test_framework/test_node.py b/resources/images/commander/src/test_framework/test_node.py similarity index 100% rename from src/test_framework/test_node.py rename to resources/images/commander/src/test_framework/test_node.py diff --git a/src/test_framework/test_shell.py b/resources/images/commander/src/test_framework/test_shell.py similarity index 100% rename from src/test_framework/test_shell.py rename to resources/images/commander/src/test_framework/test_shell.py diff --git a/src/test_framework/util.py b/resources/images/commander/src/test_framework/util.py similarity index 100% rename from src/test_framework/util.py rename to resources/images/commander/src/test_framework/util.py diff --git a/src/test_framework/wallet.py b/resources/images/commander/src/test_framework/wallet.py similarity index 100% rename from src/test_framework/wallet.py rename to resources/images/commander/src/test_framework/wallet.py diff --git a/src/test_framework/wallet_util.py b/resources/images/commander/src/test_framework/wallet_util.py similarity index 100% rename from src/test_framework/wallet_util.py rename to resources/images/commander/src/test_framework/wallet_util.py diff --git a/src/test_framework/xswiftec_inv_test_vectors.csv b/resources/images/commander/src/test_framework/xswiftec_inv_test_vectors.csv similarity index 100% rename from src/test_framework/xswiftec_inv_test_vectors.csv rename to resources/images/commander/src/test_framework/xswiftec_inv_test_vectors.csv diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index a8c7d2da6..ba45c1762 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -21,6 +21,23 @@ def get_pods(): sclient = get_static_client() return sclient.list_namespaced_pod("warnet") +def get_tanks(): + pods = get_pods() + tanks = [] + # TODO: filter tanks only!!!! + for pod in pods.items: + if "rank" in pod.metadata.labels and pod.metadata.labels["rank"] == "tank": + tanks.append({ + "tank": pod.metadata.name, + "chain": "regtest", + "rpc_host": pod.status.pod_ip, + "rpc_port": 18443, + "rpc_user": "user", + "rpc_password": "password", + "init_peers": [] + }) + return tanks + def run_command(command, stream_output=False, env=None): # Merge the current environment with the provided env full_env = os.environ.copy() @@ -61,6 +78,10 @@ def run_command(command, stream_output=False, env=None): return True +def create_namespace() -> dict: + return {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": "warnet"}} + + def set_kubectl_context(namespace: str): """ Set the default kubectl context to the specified namespace. @@ -80,10 +101,10 @@ def deploy_base_configurations(): "rbac-config.yaml", ] - for config in base_configs: - command = f"kubectl apply -f {WAR_MANIFESTS}/{config}" + for bconfig in base_configs: + command = f"kubectl apply -f {WAR_MANIFESTS}/{bconfig}" if not run_command(command, stream_output=True): - print(f"Failed to apply {config}") + print(f"Failed to apply {bconfig}") return False return True diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 6c521029a..f080d1535 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -13,7 +13,8 @@ set_kubectl_context, deploy_base_configurations, apply_kubernetes_yaml, - delete_namespace + delete_namespace, + create_namespace ) DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") @@ -140,10 +141,6 @@ def generate_kubernetes_yaml(graph: nx.Graph) -> list: return kubernetes_objects -def create_namespace() -> dict: - return {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": "warnet"}} - - def create_node_deployment(node: int, data: dict) -> dict: image = data.get("image", "bitcoindevproject/bitcoin:27.0") version = data.get("version", "27.0") @@ -152,9 +149,9 @@ def create_node_deployment(node: int, data: dict) -> dict: "apiVersion": "v1", "kind": "Pod", "metadata": { - "name": f"warnet-node-{node}", + "name": f"warnet-tank-{node}", "namespace": "warnet", - "labels": {"app": "warnet", "node": str(node)}, + "labels": {"rank": "tank", "index": str(node)}, }, "spec": { "containers": [ diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index fb40be6e7..255a67f89 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -1,15 +1,23 @@ import base64 +import importlib +import json import os import sys - +import time +import tempfile +import yaml import click from rich import print from rich.console import Console from rich.table import Table +from .k8s import ( + get_tanks, + create_namespace, + apply_kubernetes_yaml +) from .rpc import rpc_call - @click.group(name="scenarios") def scenarios(): """Manage scenarios on a running network""" @@ -44,12 +52,85 @@ def run(scenario, network, additional_args): """ Run from the Warnet Test Framework on [network] with optional arguments """ - params = { - "scenario": scenario, - "additional_args": additional_args, - "network": network, - } - print(rpc_call("scenarios_run", params)) + + # Use importlib.resources to get the scenario path + scenario_package = "warnet.scenarios" + scenario_filename = f"{scenario}.py" + + # Ensure the scenario file exists within the package + with importlib.resources.path(scenario_package, scenario_filename) as scenario_path: + scenario_path = str(scenario_path) # Convert Path object to string + + if not os.path.exists(scenario_path): + raise Exception(f"Scenario {scenario} not found at {scenario_path}.") + + with open(scenario_path, "r") as file: + scenario_text = file.read() + + name = f"commander-{scenario.replace('_', '')}-{int(time.time())}" + + tanks = get_tanks() + kubernetes_objects = [create_namespace()] + kubernetes_objects.extend( + [ + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "warnetjson", + "namespace": "warnet", + }, + "data": {"warnet.json": json.dumps(tanks)}, + }, + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "scnaeriopy", + "namespace": "warnet", + }, + "data": {"scenario.py": scenario_text}, + }, + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": name, + "namespace": "warnet", + "labels": {"app": "warnet"}, + }, + "spec": { + "containers": [ + { + "name": name, + "image": "warnet-commander:latest", + "imagePullPolicy": "Never", + "volumeMounts": [ + { + "name": "warnetjson", + "mountPath": "warnet.json", + "subPath": "warnet.json", + }, + { + "name": "scnaeriopy", + "mountPath": "scenario.py", + "subPath": "scenario.py", + } + ], + } + ], + "volumes": [ + {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, + {"name": "scnaeriopy", "configMap": {"name": "scnaeriopy"}} + ], + } + } + ] + ) + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump_all(kubernetes_objects, temp_file) + temp_file_path = temp_file.name + apply_kubernetes_yaml(temp_file_path) @scenarios.command(context_settings={"ignore_unknown_options": True}) diff --git a/src/warnet/scenarios/miner_std.py b/src/warnet/scenarios/miner_std.py index 063e07f5e..73e91a112 100755 --- a/src/warnet/scenarios/miner_std.py +++ b/src/warnet/scenarios/miner_std.py @@ -2,23 +2,22 @@ from time import sleep -from warnet.scenarios.utils import ensure_miner -from warnet.test_framework_bridge import WarnetTestFramework +# The base class exists inside the commander container +from commander import Commander def cli_help(): return "Generate blocks over time. Options: [--allnodes | --interval= | --mature ]" - class Miner: def __init__(self, node, mature): self.node = node - self.wallet = ensure_miner(self.node) + self.wallet = Commander.ensure_miner(self.node) self.addr = self.wallet.getnewaddress() self.mature = mature -class MinerStd(WarnetTestFramework): +class MinerStd(Commander): def set_test_params(self): # This is just a minimum self.num_nodes = 0 @@ -46,7 +45,7 @@ def add_options(self, parser): ) def run_test(self): - while not self.warnet.network_connected(): + while not self.network_connected(): self.log.info("Waiting for complete network connection...") sleep(5) self.log.info("Network connected. Starting miners.") diff --git a/src/warnet/scenarios/utils.py b/src/warnet/scenarios/utils.py deleted file mode 100644 index b0204d461..000000000 --- a/src/warnet/scenarios/utils.py +++ /dev/null @@ -1,5 +0,0 @@ -def ensure_miner(node): - wallets = node.listwallets() - if "miner" not in wallets: - node.createwallet("miner", descriptors=True) - return node.get_wallet_rpc("miner") From 985218d2769d3c4992f81f3702fee108ca491ccc Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 20:15:39 +0200 Subject: [PATCH 015/710] cli: implement 'bitcoin debug_log' using kubectl --- src/warnet/cli/bitcoin.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 39a98b4fa..e9ad5ad1e 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -24,16 +24,17 @@ def rpc(node, method, params, network): run_command(cmd) -# @bitcoin.command() -# @click.argument("node", type=int, required=True) -# @click.option("--network", default="warnet", show_default=True) -# def debug_log(node, network): -# """ -# Fetch the Bitcoin Core debug log from in [network] -# """ -# print(rpc_call("tank_debug_log", {"node": node, "network": network})) -# -# +@bitcoin.command() +@click.argument("node", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) +def debug_log(node, network): + """ + Fetch the Bitcoin Core debug log from in [network] + """ + cmd = f"kubectl logs warnet-node-{node}" + run_command(cmd) + + # @bitcoin.command() # @click.argument("node_a", type=int, required=True) # @click.argument("node_b", type=int, required=True) From 5eb7870c9fe16abf64812d687b5ea1e20a7acbd5 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 22:18:22 +0200 Subject: [PATCH 016/710] k8s: add a 'get_all_tanks' command --- src/warnet/cli/k8s.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index ba45c1762..4649885c0 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -1,22 +1,23 @@ import os import subprocess - from importlib.resources import files from kubernetes import client, config from kubernetes.dynamic import DynamicClient - WAR_MANIFESTS = files("manifests") + def get_static_client(): config.load_kube_config() return client.CoreV1Api() + def get_dynamic_client(): config.load_kube_config() return DynamicClient(client.ApiClient()) + def get_pods(): sclient = get_static_client() return sclient.list_namespaced_pod("warnet") From 300a8aa3ea136d962d241948862c4ae8a16d862a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 22:19:11 +0200 Subject: [PATCH 017/710] cli: add grep_logs function --- src/warnet/cli/bitcoin.py | 94 ++++++++++++++++++++++++++++++--------- src/warnet/cli/util.py | 25 ++++++----- 2 files changed, 89 insertions(+), 30 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index e9ad5ad1e..32d5b4550 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -1,3 +1,6 @@ +import json +import re + import click from .util import run_command @@ -46,23 +49,74 @@ def debug_log(node, network): # print(rpc_call("tank_messages", {"network": network, "node_a": node_a, "node_b": node_b})) # # -# @bitcoin.command() -# @click.argument("pattern", type=str, required=True) -# @click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) -# @click.option("--no-sort", is_flag=True, default=False, show_default=True) -# @click.option("--network", default="warnet", show_default=True) -# def grep_logs(pattern, network, show_k8s_timestamps, no_sort): -# """ -# Grep combined logs via fluentd using regex -# """ -# print( -# rpc_call( -# "logs_grep", -# { -# "network": network, -# "pattern": pattern, -# "k8s_timestamps": show_k8s_timestamps, -# "no_sort": no_sort, -# }, -# ) -# ) + +@bitcoin.command() +@click.argument("pattern", type=str, required=True) +@click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) +@click.option("--no-sort", is_flag=True, default=False, show_default=True) +@click.option("--network", default="warnet", show_default=True) +def grep_logs(pattern, network, show_k8s_timestamps, no_sort): + """ + Grep combined bitcoind logs using regex + """ + + # Get all pods in the namespace + command = f"kubectl get pods -n {network} -o json" + pods_json = run_command(command, return_output=True) + + if pods_json is False: + print("Error: Failed to get pods information") + return + + try: + pods = json.loads(pods_json) + except json.JSONDecodeError as e: + print(f"Error decoding JSON: {e}") + return + + matching_logs = [] + + for pod in pods.get("items", []): + pod_name = pod.get("metadata", {}).get("name", "") + if "warnet" in pod_name: + # Get container names for this pod + containers = pod.get("spec", {}).get("containers", []) + if not containers: + continue + + # Use the first container name + container_name = containers[0].get("name", "") + if not container_name: + continue + + # Get logs from the specific container + command = f"kubectl logs {pod_name} -c {container_name} -n {network} --timestamps" + logs = run_command(command, return_output=True) + + if logs is not False: + # Process logs + for log_entry in logs.splitlines(): + if re.search(pattern, log_entry): + matching_logs.append((log_entry, pod_name)) + + # Sort logs if needed + if not no_sort: + matching_logs.sort(key=lambda x: x[0]) + + # Print matching logs + for log_entry, pod_name in matching_logs: + try: + # Split the log entry into Kubernetes timestamp, Bitcoin timestamp, and the rest of the log + k8s_timestamp, rest = log_entry.split(' ', 1) + bitcoin_timestamp, log_message = rest.split(' ', 1) + + # Format the output based on the show_k8s_timestamps option + if show_k8s_timestamps: + print(f"{pod_name}: {k8s_timestamp} {bitcoin_timestamp} {log_message}") + else: + print(f"{pod_name}: {bitcoin_timestamp} {log_message}") + except ValueError: + # If we can't parse the timestamps, just print the original log entry + print(f"{pod_name}: {log_entry}") + + return matching_logs diff --git a/src/warnet/cli/util.py b/src/warnet/cli/util.py index 718ee6fb9..18ce7c004 100644 --- a/src/warnet/cli/util.py +++ b/src/warnet/cli/util.py @@ -19,7 +19,7 @@ SRC_DIR = files("warnet") -def run_command(command, stream_output=False, env=None): +def run_command(command, stream_output=False, env=None, return_output=False): # Merge the current environment with the provided env full_env = os.environ.copy() if env: @@ -37,26 +37,31 @@ def run_command(command, stream_output=False, env=None): universal_newlines=True, env=full_env, ) - + output = [] for line in iter(process.stdout.readline, ""): print(line, end="") - + output.append(line) process.stdout.close() return_code = process.wait() - if return_code != 0: print(f"Command failed with return code {return_code}") - return False - return True + return False if not return_output else "".join(output) + return True if not return_output else "".join(output) else: result = subprocess.run( - command, shell=True, capture_output=True, text=True, executable="/bin/bash" + command, + shell=True, + capture_output=True, + text=True, + executable="/bin/bash", + env=full_env, ) if result.returncode != 0: print(f"Error: {result.stderr}") - return False - print(result.stdout) - return True + return False if not return_output else result.stderr + if not return_output: + print(result.stdout) + return True if not return_output else result.stdout def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): From a550da16b8820acd39eb35b0183a26c4134653e9 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 20 Aug 2024 22:36:04 +0200 Subject: [PATCH 018/710] ruff --- resources/images/commander/src/commander.py | 17 +++++++++------ ruff.toml | 2 +- src/warnet/cli/k8s.py | 23 ++++++++++++--------- src/warnet/cli/network.py | 8 +++---- src/warnet/cli/scenarios.py | 22 +++++++++----------- src/warnet/scenarios/ln_init.py | 18 ++++++++-------- src/warnet/scenarios/miner_std.py | 1 + src/warnet/scenarios/sens_relay.py | 12 +++++------ src/warnet/scenarios/tx_flood.py | 8 +++---- 9 files changed, 59 insertions(+), 52 deletions(-) diff --git a/resources/images/commander/src/commander.py b/resources/images/commander/src/commander.py index dfdb75a31..98c9cdc71 100644 --- a/resources/images/commander/src/commander.py +++ b/resources/images/commander/src/commander.py @@ -9,8 +9,8 @@ import signal import sys import tempfile - from pathlib import Path + from test_framework.authproxy import AuthServiceProxy from test_framework.p2p import NetworkThread from test_framework.test_framework import ( @@ -22,13 +22,16 @@ from test_framework.util import PortSeed, get_rpc_proxy WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" -with open(WARNET_FILE, "r") as file: +with open(WARNET_FILE) as file: WARNET = json.load(file) + # Ensure that all RPC calls are made with brand new http connections def auth_proxy_request(self, method, path, postdata): self._set_conn() # creates new http client connection return self.oldrequest(method, path, postdata) + + AuthServiceProxy.oldrequest = AuthServiceProxy._request AuthServiceProxy._request = auth_proxy_request @@ -88,12 +91,14 @@ def setup(self): self.log.addHandler(ch) for i, tank in enumerate(WARNET): - self.log.info(f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}") + self.log.info( + f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" + ) node = TestNode( i, pathlib.Path(), # datadir path - chain=tank['chain'], - rpchost=tank['rpc_host'], + chain=tank["chain"], + rpchost=tank["rpc_host"], timewait=60, timeout_factor=self.options.timeout_factor, bitcoind=None, @@ -108,7 +113,7 @@ def setup(self): coveragedir=self.options.coveragedir, ) node.rpc_connected = True - node.init_peers = tank['init_peers'] + node.init_peers = tank["init_peers"] self.nodes.append(node) self.num_nodes = len(self.nodes) diff --git a/ruff.toml b/ruff.toml index b940c5694..1cef47129 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,5 +1,5 @@ extend-exclude = [ - "src/test_framework/*.py", + "resources/images/commander/src/test_framework", "resources/images/exporter/authproxy.py", ] line-length = 100 diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 4649885c0..33aa638c0 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -22,23 +22,27 @@ def get_pods(): sclient = get_static_client() return sclient.list_namespaced_pod("warnet") + def get_tanks(): pods = get_pods() tanks = [] # TODO: filter tanks only!!!! for pod in pods.items: if "rank" in pod.metadata.labels and pod.metadata.labels["rank"] == "tank": - tanks.append({ - "tank": pod.metadata.name, - "chain": "regtest", - "rpc_host": pod.status.pod_ip, - "rpc_port": 18443, - "rpc_user": "user", - "rpc_password": "password", - "init_peers": [] - }) + tanks.append( + { + "tank": pod.metadata.name, + "chain": "regtest", + "rpc_host": pod.status.pod_ip, + "rpc_port": 18443, + "rpc_user": "user", + "rpc_password": "password", + "init_peers": [], + } + ) return tanks + def run_command(command, stream_output=False, env=None): # Merge the current environment with the provided env full_env = os.environ.copy() @@ -118,4 +122,3 @@ def apply_kubernetes_yaml(yaml_file: str): def delete_namespace(namespace: str): command = f"kubectl delete namespace {namespace}" return run_command(command, stream_output=True) - diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index f080d1535..0143ab116 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -9,12 +9,12 @@ from rich import print from .k8s import ( - run_command, - set_kubectl_context, - deploy_base_configurations, apply_kubernetes_yaml, + create_namespace, delete_namespace, - create_namespace + deploy_base_configurations, + run_command, + set_kubectl_context, ) DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 255a67f89..be1e9f4d1 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -3,21 +3,19 @@ import json import os import sys -import time import tempfile -import yaml +import time + import click +import yaml from rich import print from rich.console import Console from rich.table import Table -from .k8s import ( - get_tanks, - create_namespace, - apply_kubernetes_yaml -) +from .k8s import apply_kubernetes_yaml, create_namespace, get_tanks from .rpc import rpc_call + @click.group(name="scenarios") def scenarios(): """Manage scenarios on a running network""" @@ -64,7 +62,7 @@ def run(scenario, network, additional_args): if not os.path.exists(scenario_path): raise Exception(f"Scenario {scenario} not found at {scenario_path}.") - with open(scenario_path, "r") as file: + with open(scenario_path) as file: scenario_text = file.read() name = f"commander-{scenario.replace('_', '')}-{int(time.time())}" @@ -115,16 +113,16 @@ def run(scenario, network, additional_args): "name": "scnaeriopy", "mountPath": "scenario.py", "subPath": "scenario.py", - } + }, ], } ], "volumes": [ {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, - {"name": "scnaeriopy", "configMap": {"name": "scnaeriopy"}} + {"name": "scnaeriopy", "configMap": {"name": "scnaeriopy"}}, ], - } - } + }, + }, ] ) with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: diff --git a/src/warnet/scenarios/ln_init.py b/src/warnet/scenarios/ln_init.py index 98d8179df..4894e1dc0 100644 --- a/src/warnet/scenarios/ln_init.py +++ b/src/warnet/scenarios/ln_init.py @@ -2,23 +2,23 @@ from time import sleep -from warnet.scenarios.utils import ensure_miner -from warnet.test_framework_bridge import WarnetTestFramework +# The base class exists inside the commander container +from commander import Commander def cli_help(): return "Fund LN wallets and open channels" -class LNInit(WarnetTestFramework): +class LNInit(Commander): def set_test_params(self): self.num_nodes = None def run_test(self): self.log.info("Lock out of IBD") - miner = ensure_miner(self.nodes[0]) + miner = self.ensure_miner(self.nodes[0]) miner_addr = miner.getnewaddress() - self.generatetoaddress(self.nodes[0], 1, miner_addr) + self.generatetoaddress(self.nodes[0], 1, miner_addr, sync_fun=self.no_op) self.log.info("Get LN nodes and wallet addresses") ln_nodes = [] @@ -29,10 +29,10 @@ def run_test(self): ln_nodes.append(tank.index) self.log.info("Fund LN wallets") - miner = ensure_miner(self.nodes[0]) + miner = self.ensure_miner(self.nodes[0]) miner_addr = miner.getnewaddress() # 298 block base - self.generatetoaddress(self.nodes[0], 297, miner_addr) + self.generatetoaddress(self.nodes[0], 297, miner_addr, sync_fun=self.no_op) # divvy up the goods split = (miner.getbalance() - 1) // len(recv_addrs) sends = {} @@ -40,7 +40,7 @@ def run_test(self): sends[addr] = split miner.sendmany("", sends) # confirm funds in block 299 - self.generatetoaddress(self.nodes[0], 1, miner_addr) + self.generatetoaddress(self.nodes[0], 1, miner_addr, sync_fun=self.no_op) self.log.info( f"Waiting for funds to be spendable: {split} BTC each for {len(recv_addrs)} LN nodes" @@ -110,7 +110,7 @@ def funded_lnnodes(): ) # Ensure all channel opens are sufficiently confirmed - self.generatetoaddress(self.nodes[0], 10, miner_addr) + self.generatetoaddress(self.nodes[0], 10, miner_addr, sync_fun=self.no_op) ln_nodes_gossip = ln_nodes.copy() while len(ln_nodes_gossip) > 0: self.log.info(f"Waiting for graph gossip sync, LN nodes remaining: {ln_nodes_gossip}") diff --git a/src/warnet/scenarios/miner_std.py b/src/warnet/scenarios/miner_std.py index 73e91a112..64c0db993 100755 --- a/src/warnet/scenarios/miner_std.py +++ b/src/warnet/scenarios/miner_std.py @@ -9,6 +9,7 @@ def cli_help(): return "Generate blocks over time. Options: [--allnodes | --interval= | --mature ]" + class Miner: def __init__(self, node, mature): self.node = node diff --git a/src/warnet/scenarios/sens_relay.py b/src/warnet/scenarios/sens_relay.py index 41cddf929..0fa4ed55a 100644 --- a/src/warnet/scenarios/sens_relay.py +++ b/src/warnet/scenarios/sens_relay.py @@ -1,25 +1,25 @@ #!/usr/bin/env python3 -from warnet.scenarios.utils import ensure_miner -from warnet.test_framework_bridge import WarnetTestFramework +# The base class exists inside the commander container +from commander import Commander def cli_help(): return "Send a transaction using sensitive relay" -class MinerStd(WarnetTestFramework): +class MinerStd(Commander): def set_test_params(self): self.num_nodes = 12 def run_test(self): # PR branch node test_node = self.nodes[11] - test_wallet = ensure_miner(test_node) + test_wallet = self.ensure_miner(test_node) addr = test_wallet.getnewaddress() self.log.info("generating 110 blocks...") - self.generatetoaddress(test_node, 110, addr) + self.generatetoaddress(test_node, 110, addr, sync_fun=self.no_op) self.log.info("adding onion addresses from all peers...") for i in range(11): @@ -32,7 +32,7 @@ def run_test(self): self.log.info("getting address from recipient...") # some other node recip = self.nodes[5] - recip_wallet = ensure_miner(recip) + recip_wallet = self.ensure_miner(recip) recip_addr = recip_wallet.getnewaddress() self.log.info("sending transaction...") diff --git a/src/warnet/scenarios/tx_flood.py b/src/warnet/scenarios/tx_flood.py index e4fe3ce2f..69b00b460 100755 --- a/src/warnet/scenarios/tx_flood.py +++ b/src/warnet/scenarios/tx_flood.py @@ -3,15 +3,15 @@ from random import choice, randrange from time import sleep -from warnet.scenarios.utils import ensure_miner -from warnet.test_framework_bridge import WarnetTestFramework +# The base class exists inside the commander container +from commander import Commander def cli_help(): return "Make a big transaction mess. Options: [--interval=]" -class TXFlood(WarnetTestFramework): +class TXFlood(Commander): def set_test_params(self): self.num_nodes = 1 self.addrs = [] @@ -27,7 +27,7 @@ def add_options(self, parser): ) def orders(self, node): - wallet = ensure_miner(node) + wallet = self.ensure_miner(node) for address_type in ["legacy", "p2sh-segwit", "bech32", "bech32m"]: self.addrs.append(wallet.getnewaddress(address_type=address_type)) while True: From ee31f0dbde67dad35b4db9a4856a3c6dc826d641 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 22:59:20 +0200 Subject: [PATCH 019/710] migrate 'node' to 'tank' --- src/warnet/cli/bitcoin.py | 10 +++++----- src/warnet/cli/network.py | 9 +++++---- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 32d5b4550..f6bc9ca94 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -21,9 +21,9 @@ def rpc(node, method, params, network): Call bitcoin-cli [params] on in [network] """ if params: - cmd = f"kubectl exec warnet-node-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" + cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" else: - cmd = f"kubectl exec warnet-node-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" + cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" run_command(cmd) @@ -34,7 +34,7 @@ def debug_log(node, network): """ Fetch the Bitcoin Core debug log from in [network] """ - cmd = f"kubectl logs warnet-node-{node}" + cmd = f"kubectl logs warnet-tank-{node}" run_command(cmd) @@ -107,8 +107,8 @@ def grep_logs(pattern, network, show_k8s_timestamps, no_sort): for log_entry, pod_name in matching_logs: try: # Split the log entry into Kubernetes timestamp, Bitcoin timestamp, and the rest of the log - k8s_timestamp, rest = log_entry.split(' ', 1) - bitcoin_timestamp, log_message = rest.split(' ', 1) + k8s_timestamp, rest = log_entry.split(" ", 1) + bitcoin_timestamp, log_message = rest.split(" ", 1) # Format the output based on the show_k8s_timestamps option if show_k8s_timestamps: diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 0143ab116..102cb536a 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -113,7 +113,8 @@ def connect(graph_file: Path): for edge in edges: source = edge.get("source") target = edge.get("target") - command = f"kubectl exec -it warnet-node-{source} -- bitcoin-cli -rpcuser=user -rpcpassword=password addnode warnet-node-{target}-service:8333 add" + command = f"kubectl exec -it warnet-tank-{source} -- bitcoin-cli addnode warnet-tank-{target}:18443 onetry" + print(command) print(f"Connecting node {source} to node {target}") if run_command(command, stream_output=True): @@ -170,7 +171,7 @@ def create_node_deployment(node: int, data: dict) -> dict: ], } ], - "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-node-{node}"}}], + "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-tank-{node}"}}], }, } @@ -179,7 +180,7 @@ def create_node_service(node: int) -> dict: return { "apiVersion": "v1", "kind": "Service", - "metadata": {"name": f"warnet-node-{node}-service", "namespace": "warnet"}, + "metadata": {"name": f"warnet-tank-{node}-service", "namespace": "warnet"}, "spec": { "selector": {"app": "warnet", "node": str(node)}, "ports": [{"port": 8333, "targetPort": 8333}], @@ -192,7 +193,7 @@ def create_config_map(node: int, config: str) -> dict: "apiVersion": "v1", "kind": "ConfigMap", "metadata": { - "name": f"bitcoin-config-node-{node}", + "name": f"bitcoin-config-tank-{node}", "namespace": "warnet", }, "data": {"bitcoin.conf": config}, From fce7ae21d46efe746957469583a0d52680357157 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 20 Aug 2024 23:08:29 +0200 Subject: [PATCH 020/710] pass options to scenarios --- resources/images/commander/Dockerfile | 2 +- src/warnet/cli/scenarios.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile index 243f88a5e..c9a3510bf 100644 --- a/resources/images/commander/Dockerfile +++ b/resources/images/commander/Dockerfile @@ -8,4 +8,4 @@ FROM python:3.12-slim COPY src / # -u: force the stdout and stderr streams to be unbuffered -CMD ["python", "-u", "/scenario.py"] +ENTRYPOINT ["python", "-u", "/scenario.py"] diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index be1e9f4d1..81cc90112 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -102,6 +102,7 @@ def run(scenario, network, additional_args): { "name": name, "image": "warnet-commander:latest", + "args": additional_args, "imagePullPolicy": "Never", "volumeMounts": [ { From 840d4b732c4780120aed6e8d9ccb36807ee55858 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 23:26:58 +0200 Subject: [PATCH 021/710] remove requirements --- requirements.in | 5 ----- requirements.txt | 21 +-------------------- 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/requirements.in b/requirements.in index ada695b50..d18f137d3 100644 --- a/requirements.in +++ b/requirements.in @@ -1,14 +1,9 @@ click docker flask -Flask-JSONRPC jsonschema -jsonrpcserver -jsonrpcclient kubernetes networkx -numpy -requests<2.30 rich tabulate PyYAML diff --git a/requirements.txt b/requirements.txt index 2455034f3..0b7ef495b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,10 +21,6 @@ click==8.1.7 docker==7.0.0 # via -r requirements.in flask==3.0.0 - # via - # -r requirements.in - # flask-jsonrpc -flask-jsonrpc==1.1.0 # via -r requirements.in google-auth==2.25.2 # via kubernetes @@ -34,14 +30,8 @@ itsdangerous==2.1.2 # via flask jinja2==3.1.2 # via flask -jsonrpcclient==4.0.3 - # via -r requirements.in -jsonrpcserver==5.0.9 - # via -r requirements.in jsonschema==4.20.0 - # via - # -r requirements.in - # jsonrpcserver + # via -r requirements.in jsonschema-specifications==2023.12.1 # via jsonschema kubernetes==28.1.0 @@ -56,14 +46,10 @@ mdurl==0.1.2 # via markdown-it-py networkx==3.2.1 # via -r requirements.in -numpy==1.26.2 - # via -r requirements.in oauthlib==3.2.2 # via # kubernetes # requests-oauthlib -oslash==0.6.3 - # via jsonrpcserver packaging==23.2 # via docker pyasn1==0.5.1 @@ -86,7 +72,6 @@ referencing==0.32.0 # jsonschema-specifications requests==2.29.0 # via - # -r requirements.in # docker # kubernetes # requests-oauthlib @@ -106,10 +91,6 @@ six==1.16.0 # python-dateutil tabulate==0.9.0 # via -r requirements.in -typeguard==2.13.3 - # via flask-jsonrpc -typing-extensions==4.9.0 - # via oslash urllib3==1.26.18 # via # docker From a964809bcd572f268c5b2ea8e6513a6931dda0d1 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 23:32:13 +0200 Subject: [PATCH 022/710] DRY network cli --- src/warnet/cli/network.py | 298 ++++++++++++++++++-------------------- 1 file changed, 142 insertions(+), 156 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 102cb536a..10fb70923 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -2,6 +2,7 @@ import xml.etree.ElementTree as ET from importlib.resources import files from pathlib import Path +from typing import Any, Dict, List import click import networkx as nx @@ -26,15 +27,145 @@ def network(): """Network commands""" -# High-level network operations +def read_graph_file(graph_file: Path) -> nx.Graph: + with open(graph_file) as f: + return nx.parse_graphml(f.read()) + + +def generate_node_config(node: int, data: dict) -> str: + base_config = """ +regtest=1 +checkmempool=0 +acceptnonstdtxn=1 +debuglogfile=0 +logips=1 +logtimemicros=1 +capturemessages=1 +fallbackfee=0.00001000 +listen=1 + +[regtest] +rpcuser=user +rpcpassword=password +rpcport=18443 +rpcallowip=0.0.0.0/0 +rpcbind=0.0.0.0 + +zmqpubrawblock=tcp://0.0.0.0:28332 +zmqpubrawtx=tcp://0.0.0.0:28333 +""" + node_specific_config = data.get("bitcoin_config", "").replace(",", "\n") + return f"{base_config}\n{node_specific_config}" + + +def create_kubernetes_object( + kind: str, metadata: Dict[str, Any], spec: Dict[str, Any] = None +) -> Dict[str, Any]: + obj = { + "apiVersion": "v1", + "kind": kind, + "metadata": metadata, + } + if spec is not None: + obj["spec"] = spec + return obj + + +def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: + image = data.get("image", "bitcoindevproject/bitcoin:27.0") + version = data.get("version", "27.0") + + return create_kubernetes_object( + kind="Pod", + metadata={ + "name": f"warnet-tank-{node}", + "namespace": "warnet", + "labels": {"rank": "tank", "index": str(node)}, + }, + spec={ + "containers": [ + { + "name": "bitcoin", + "image": image, + "env": [{"name": "BITCOIN_VERSION", "value": version}], + "volumeMounts": [ + { + "name": "config", + "mountPath": "/root/.bitcoin/bitcoin.conf", + "subPath": "bitcoin.conf", + } + ], + } + ], + "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-tank-{node}"}}], + }, + ) + + +def create_node_service(node: int) -> Dict[str, Any]: + return create_kubernetes_object( + kind="Service", + metadata={"name": f"warnet-tank-{node}-service", "namespace": "warnet"}, + spec={ + "selector": {"app": "warnet", "node": str(node)}, + "ports": [{"port": 8333, "targetPort": 8333}], + }, + ) + + +def create_config_map(node: int, config: str) -> Dict[str, Any]: + return create_kubernetes_object( + kind="ConfigMap", + metadata={ + "name": f"bitcoin-config-tank-{node}", + "namespace": "warnet", + }, + # Note: We're not passing a 'spec' here, as ConfigMaps don't have a spec + ) + + +def generate_kubernetes_yaml(graph: nx.Graph) -> List[Dict[str, Any]]: + kubernetes_objects = [create_namespace()] + + for node, data in graph.nodes(data=True): + config = generate_node_config(node, data) + config_map = create_config_map(node, config) + config_map["data"] = {"bitcoin.conf": config} # Add data directly to the ConfigMap + kubernetes_objects.extend( + [ + config_map, + create_node_deployment(node, data), + create_node_service(node), + ] + ) + + return kubernetes_objects + + +def setup_logging_helm() -> bool: + helm_commands = [ + "helm repo add grafana https://grafana.github.io/helm-charts", + "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", + "helm repo update", + f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS}/loki_values.yaml loki grafana/loki --version 5.47.2", + "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", + "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", + f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS}/grafana_values.yaml", + ] + + for command in helm_commands: + if not run_command(command, stream_output=True): + print(f"Failed to run Helm command: {command}") + return False + return True + + @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) @click.option("--network", default="warnet", show_default=True) @click.option("--logging/--no-logging", default=False) def start(graph_file: Path, logging: bool, network: str): - """ - Start a warnet with topology loaded from a into [network] - """ + """Start a warnet with topology loaded from a into [network]""" graph = read_graph_file(graph_file) kubernetes_yaml = generate_kubernetes_yaml(graph) @@ -49,13 +180,8 @@ def start(graph_file: Path, logging: bool, network: str): print( "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." ) - if not logging: - print("Skipping install of logging charts") - else: - if setup_logging_helm(): - print("Helm charts installed successfully.") - else: - print("Failed to install Helm charts.") + if logging and not setup_logging_helm(): + print("Failed to install Helm charts.") else: print(f"Failed to start warnet '{network}'.") finally: @@ -65,9 +191,7 @@ def start(graph_file: Path, logging: bool, network: str): @network.command() @click.option("--network", default="warnet", show_default=True) def down(network: str): - """ - Bring down a running warnet named [network] - """ + """Bring down a running warnet named [network]""" if delete_namespace(network) and delete_namespace("warnet-logging"): print(f"Warnet '{network}' has been successfully brought down and the namespaces deleted.") else: @@ -78,9 +202,7 @@ def down(network: str): @click.option("--follow", "-f", is_flag=True, help="Follow logs") def logs(follow: bool): """Get Kubernetes logs from the RPC server""" - command = "kubectl logs rpc-0" - if follow: - command += " --follow" + command = f"kubectl logs rpc-0{' --follow' if follow else ''}" run_command(command, stream_output=follow) @@ -88,9 +210,7 @@ def logs(follow: bool): @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) @click.option("--output", "-o", default="warnet-deployment.yaml", help="Output YAML file") def generate_yaml(graph_file: Path, output: str): - """ - Generate a Kubernetes YAML file from a graph file for deploying warnet nodes. - """ + """Generate a Kubernetes YAML file from a graph file for deploying warnet nodes.""" graph = read_graph_file(graph_file) kubernetes_yaml = generate_kubernetes_yaml(graph) @@ -103,19 +223,14 @@ def generate_yaml(graph_file: Path, output: str): @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path(exists=True)) def connect(graph_file: Path): - """ - Connect nodes based on the edges defined in the graph file. - """ + """Connect nodes based on the edges defined in the graph file.""" tree = ET.parse(graph_file) root = tree.getroot() edges = root.findall(".//{http://graphml.graphdrawing.org/xmlns}edge") for edge in edges: - source = edge.get("source") - target = edge.get("target") + source, target = edge.get("source"), edge.get("target") command = f"kubectl exec -it warnet-tank-{source} -- bitcoin-cli addnode warnet-tank-{target}:18443 onetry" - print(command) - print(f"Connecting node {source} to node {target}") if run_command(command, stream_output=True): print(f"Successfully connected node {source} to node {target}") @@ -123,132 +238,3 @@ def connect(graph_file: Path): print(f"Failed to connect node {source} to node {target}") print("All connections attempted.") - - -# Kubernetes object generation -def generate_kubernetes_yaml(graph: nx.Graph) -> list: - kubernetes_objects = [create_namespace()] - - for node, data in graph.nodes(data=True): - config = generate_node_config(node, data) - kubernetes_objects.extend( - [ - create_config_map(node, config), - create_node_deployment(node, data), - create_node_service(node), - ] - ) - - return kubernetes_objects - - -def create_node_deployment(node: int, data: dict) -> dict: - image = data.get("image", "bitcoindevproject/bitcoin:27.0") - version = data.get("version", "27.0") - - return { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": f"warnet-tank-{node}", - "namespace": "warnet", - "labels": {"rank": "tank", "index": str(node)}, - }, - "spec": { - "containers": [ - { - "name": "bitcoin", - "image": image, - "env": [ - {"name": "BITCOIN_VERSION", "value": version}, - ], - "volumeMounts": [ - { - "name": "config", - "mountPath": "/root/.bitcoin/bitcoin.conf", - "subPath": "bitcoin.conf", - } - ], - } - ], - "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-tank-{node}"}}], - }, - } - - -def create_node_service(node: int) -> dict: - return { - "apiVersion": "v1", - "kind": "Service", - "metadata": {"name": f"warnet-tank-{node}-service", "namespace": "warnet"}, - "spec": { - "selector": {"app": "warnet", "node": str(node)}, - "ports": [{"port": 8333, "targetPort": 8333}], - }, - } - - -def create_config_map(node: int, config: str) -> dict: - return { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": f"bitcoin-config-tank-{node}", - "namespace": "warnet", - }, - "data": {"bitcoin.conf": config}, - } - - -# Utility functions -def read_graph_file(graph_file: Path) -> nx.Graph: - with open(graph_file) as f: - return nx.parse_graphml(f.read()) - - -def generate_node_config(node: int, data: dict) -> str: - base_config = """ -regtest=1 -checkmempool=0 -acceptnonstdtxn=1 -debuglogfile=0 -logips=1 -logtimemicros=1 -capturemessages=1 -fallbackfee=0.00001000 -listen=1 - -[regtest] -rpcuser=user -rpcpassword=password -rpcport=18443 -rpcallowip=0.0.0.0/0 -rpcbind=0.0.0.0 - -zmqpubrawblock=tcp://0.0.0.0:28332 -zmqpubrawtx=tcp://0.0.0.0:28333 -""" - node_specific_config = data.get("bitcoin_config", "") - node_specific_config = node_specific_config.replace(",", "\n") - return f"{base_config}\n{node_specific_config}" - - -def setup_logging_helm(): - """ - Run the required Helm commands for setting up Grafana, Prometheus, and Loki. - """ - helm_commands = [ - "helm repo add grafana https://grafana.github.io/helm-charts", - "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", - "helm repo update", - f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS}/loki_values.yaml loki grafana/loki --version 5.47.2", - "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", - "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", - f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS}/grafana_values.yaml", - ] - - for command in helm_commands: - if not run_command(command, stream_output=True): - print(f"Failed to run Helm command: {command}") - return False - return True From 4895f6fab4121a136a9d7a5b965c94c8cf3176fe Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 20 Aug 2024 23:43:24 +0200 Subject: [PATCH 023/710] warcli scenarios active --- src/warnet/cli/k8s.py | 21 +++++---------------- src/warnet/cli/network.py | 1 - src/warnet/cli/scenarios.py | 33 ++++++++++++++++++++++----------- 3 files changed, 27 insertions(+), 28 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 33aa638c0..9a9439184 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -23,24 +23,13 @@ def get_pods(): return sclient.list_namespaced_pod("warnet") -def get_tanks(): +def get_mission(mission): pods = get_pods() - tanks = [] - # TODO: filter tanks only!!!! + crew = [] for pod in pods.items: - if "rank" in pod.metadata.labels and pod.metadata.labels["rank"] == "tank": - tanks.append( - { - "tank": pod.metadata.name, - "chain": "regtest", - "rpc_host": pod.status.pod_ip, - "rpc_port": 18443, - "rpc_user": "user", - "rpc_password": "password", - "init_peers": [], - } - ) - return tanks + if "mission" in pod.metadata.labels and pod.metadata.labels["mission"] == mission: + crew.append(pod) + return crew def run_command(command, stream_output=False, env=None): diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 10fb70923..a3fb0eaea 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -26,7 +26,6 @@ def network(): """Network commands""" - def read_graph_file(graph_file: Path) -> nx.Graph: with open(graph_file) as f: return nx.parse_graphml(f.read()) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 81cc90112..5cb3181bc 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -12,7 +12,7 @@ from rich.console import Console from rich.table import Table -from .k8s import apply_kubernetes_yaml, create_namespace, get_tanks +from .k8s import apply_kubernetes_yaml, create_namespace, get_mission from .rpc import rpc_call @@ -67,7 +67,18 @@ def run(scenario, network, additional_args): name = f"commander-{scenario.replace('_', '')}-{int(time.time())}" - tanks = get_tanks() + tankpods = get_mission("tank") + tanks = [ + { + "tank": tank.metadata.name, + "chain": "regtest", + "rpc_host": tank.status.pod_ip, + "rpc_port": 18443, + "rpc_user": "user", + "rpc_password": "password", + "init_peers": [], + } for tank in tankpods + ] kubernetes_objects = [create_namespace()] kubernetes_objects.extend( [ @@ -95,9 +106,10 @@ def run(scenario, network, additional_args): "metadata": { "name": name, "namespace": "warnet", - "labels": {"app": "warnet"}, + "labels": {"mission": "commander"}, }, "spec": { + "restartPolicy": "Never", "containers": [ { "name": name, @@ -163,20 +175,19 @@ def active(): """ List running scenarios "name": "pid" pairs """ - console = Console() - result = rpc_call("scenarios_list_running", {}) - if not result: + commanders = get_mission("commander") + if len(commanders) == 0: print("No scenarios running") return - assert isinstance(result, list) # Make mypy happy table = Table(show_header=True, header_style="bold") - for key in result[0].keys(): # noqa: SIM118 - table.add_column(key.capitalize()) + table.add_column("Commander") + table.add_column("Status") - for scenario in result: - table.add_row(*[str(scenario[key]) for key in scenario]) + for commander in commanders: + table.add_row(commander.metadata.name, commander.status.phase) + console = Console() console.print(table) From 64d6c56a1be67da2ec58a21c2ca4ca5d42b2dadc Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 20 Aug 2024 23:46:17 +0200 Subject: [PATCH 024/710] addnode in config rather than later --- src/warnet/cli/network.py | 41 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index a3fb0eaea..aa065303d 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -1,5 +1,4 @@ import tempfile -import xml.etree.ElementTree as ET from importlib.resources import files from pathlib import Path from typing import Any, Dict, List @@ -31,7 +30,7 @@ def read_graph_file(graph_file: Path) -> nx.Graph: return nx.parse_graphml(f.read()) -def generate_node_config(node: int, data: dict) -> str: +def generate_node_config(node: int, data: dict, graph: nx.Graph) -> str: base_config = """ regtest=1 checkmempool=0 @@ -54,7 +53,12 @@ def generate_node_config(node: int, data: dict) -> str: zmqpubrawtx=tcp://0.0.0.0:28333 """ node_specific_config = data.get("bitcoin_config", "").replace(",", "\n") - return f"{base_config}\n{node_specific_config}" + + # Add addnode configurations for connected nodes + connected_nodes = list(graph.neighbors(node)) + addnode_configs = [f"addnode=warnet-tank-{index}" for index in connected_nodes] + + return f"{base_config}\n{node_specific_config}\n" + "\n".join(addnode_configs) def create_kubernetes_object( @@ -113,26 +117,25 @@ def create_node_service(node: int) -> Dict[str, Any]: def create_config_map(node: int, config: str) -> Dict[str, Any]: - return create_kubernetes_object( + config_map = create_kubernetes_object( kind="ConfigMap", metadata={ "name": f"bitcoin-config-tank-{node}", "namespace": "warnet", }, - # Note: We're not passing a 'spec' here, as ConfigMaps don't have a spec ) + config_map["data"] = {"bitcoin.conf": config} + return config_map def generate_kubernetes_yaml(graph: nx.Graph) -> List[Dict[str, Any]]: kubernetes_objects = [create_namespace()] for node, data in graph.nodes(data=True): - config = generate_node_config(node, data) - config_map = create_config_map(node, config) - config_map["data"] = {"bitcoin.conf": config} # Add data directly to the ConfigMap + config = generate_node_config(node, data, graph) kubernetes_objects.extend( [ - config_map, + create_config_map(node, config), create_node_deployment(node, data), create_node_service(node), ] @@ -217,23 +220,3 @@ def generate_yaml(graph_file: Path, output: str): yaml.dump_all(kubernetes_yaml, f) print(f"Kubernetes YAML file generated: {output}") - - -@network.command() -@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path(exists=True)) -def connect(graph_file: Path): - """Connect nodes based on the edges defined in the graph file.""" - tree = ET.parse(graph_file) - root = tree.getroot() - edges = root.findall(".//{http://graphml.graphdrawing.org/xmlns}edge") - - for edge in edges: - source, target = edge.get("source"), edge.get("target") - command = f"kubectl exec -it warnet-tank-{source} -- bitcoin-cli addnode warnet-tank-{target}:18443 onetry" - print(f"Connecting node {source} to node {target}") - if run_command(command, stream_output=True): - print(f"Successfully connected node {source} to node {target}") - else: - print(f"Failed to connect node {source} to node {target}") - - print("All connections attempted.") From 184b2589b3cd8df01517891c4afc1a538296a4c5 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 20 Aug 2024 23:54:07 +0200 Subject: [PATCH 025/710] rank -> mission --- src/warnet/cli/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index aa065303d..e46acc02e 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -83,7 +83,7 @@ def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: metadata={ "name": f"warnet-tank-{node}", "namespace": "warnet", - "labels": {"rank": "tank", "index": str(node)}, + "labels": {"mission": "tank", "index": str(node)}, }, spec={ "containers": [ From da6fb2826e4e3dd45dc0a6f3420dc279daa57ed4 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 00:04:14 +0200 Subject: [PATCH 026/710] fix addnode connections --- src/warnet/cli/network.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index e46acc02e..3bf410718 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -25,6 +25,7 @@ def network(): """Network commands""" + def read_graph_file(graph_file: Path) -> nx.Graph: with open(graph_file) as f: return nx.parse_graphml(f.read()) @@ -56,7 +57,7 @@ def generate_node_config(node: int, data: dict, graph: nx.Graph) -> str: # Add addnode configurations for connected nodes connected_nodes = list(graph.neighbors(node)) - addnode_configs = [f"addnode=warnet-tank-{index}" for index in connected_nodes] + addnode_configs = [f"addnode=warnet-tank-{index}-service" for index in connected_nodes] return f"{base_config}\n{node_specific_config}\n" + "\n".join(addnode_configs) @@ -83,7 +84,7 @@ def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: metadata={ "name": f"warnet-tank-{node}", "namespace": "warnet", - "labels": {"mission": "tank", "index": str(node)}, + "labels": {"app": "warnet", "mission": "tank", "index": str(node)}, }, spec={ "containers": [ @@ -98,6 +99,10 @@ def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: "subPath": "bitcoin.conf", } ], + "ports": [ + {"containerPort": 18444}, + {"containerPort": 18443}, + ], } ], "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-tank-{node}"}}], @@ -110,8 +115,11 @@ def create_node_service(node: int) -> Dict[str, Any]: kind="Service", metadata={"name": f"warnet-tank-{node}-service", "namespace": "warnet"}, spec={ - "selector": {"app": "warnet", "node": str(node)}, - "ports": [{"port": 8333, "targetPort": 8333}], + "selector": {"app": "warnet", "mission": "tank", "index": str(node)}, + "ports": [ + {"name": "p2p", "port": 18444, "targetPort": 18444}, + {"name": "rpc", "port": 18443, "targetPort": 18443}, + ], }, ) From 8e1d2637556d68f570198a0949682c425917d2c7 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 00:16:31 +0200 Subject: [PATCH 027/710] fix warcli scenarios available and remove scenarios stop --- resources/graphs/default.graphml | 2 +- src/warnet/cli/scenarios.py | 26 ++++++++------------------ 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/resources/graphs/default.graphml b/resources/graphs/default.graphml index ec591b000..aad5448ad 100644 --- a/resources/graphs/default.graphml +++ b/resources/graphs/default.graphml @@ -18,7 +18,7 @@ 27.0 - uacomment=w0 + uacomment=w0,debug=net true true diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 5cb3181bc..a93345f8a 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -2,6 +2,7 @@ import importlib import json import os +import pkgutil import sys import tempfile import time @@ -11,7 +12,7 @@ from rich import print from rich.console import Console from rich.table import Table - +from warnet import scenarios as SCENARIOS from .k8s import apply_kubernetes_yaml, create_namespace, get_mission from .rpc import rpc_call @@ -27,18 +28,17 @@ def available(): List available scenarios in the Warnet Test Framework """ console = Console() - result = rpc_call("scenarios_available", None) - if not isinstance(result, list): # Make mypy happy - print(f"Error. Expected list but got {type(result)}: {result}") - sys.exit(1) + + scenario_list = [] + for s in pkgutil.iter_modules(SCENARIOS.__path__): + scenario_list.append(s.name) # Create the table table = Table(show_header=True, header_style="bold") table.add_column("Name") - table.add_column("Description") - for scenario in result: - table.add_row(scenario[0], scenario[1]) + for scenario in scenario_list: + table.add_row(scenario) console.print(table) @@ -189,13 +189,3 @@ def active(): console = Console() console.print(table) - - -@scenarios.command() -@click.argument("pid", type=int) -def stop(pid: int): - """ - Stop scenario with PID from running - """ - params = {"pid": pid} - print(rpc_call("scenarios_stop", params)) From 2e8aa1bf6565ce7c56be18666c8e44e8aecc7991 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 00:18:19 +0200 Subject: [PATCH 028/710] disable ruff fail --- .github/workflows/test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bd5e7425f..e287d3aa7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -8,6 +8,8 @@ on: jobs: ruff: + # DISABLE FOR REWRITE + if: false runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 From d9c85266621d7a6f8a54dba8bfc5029252fe2997 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Tue, 20 Aug 2024 23:27:36 +0200 Subject: [PATCH 029/710] initial bitcoincore helm chart --- resources/charts/bitcoincore/.helmignore | 23 ++++ resources/charts/bitcoincore/Chart.yaml | 24 ++++ .../charts/bitcoincore/templates/NOTES.txt | 1 + .../charts/bitcoincore/templates/_helpers.tpl | 62 +++++++++ .../bitcoincore/templates/configmap.yaml | 9 ++ .../charts/bitcoincore/templates/pod.yaml | 59 ++++++++ .../bitcoincore/templates/serviceaccount.yaml | 13 ++ .../templates/tests/test-connection.yaml | 15 ++ resources/charts/bitcoincore/values.yaml | 129 ++++++++++++++++++ 9 files changed, 335 insertions(+) create mode 100644 resources/charts/bitcoincore/.helmignore create mode 100644 resources/charts/bitcoincore/Chart.yaml create mode 100644 resources/charts/bitcoincore/templates/NOTES.txt create mode 100644 resources/charts/bitcoincore/templates/_helpers.tpl create mode 100644 resources/charts/bitcoincore/templates/configmap.yaml create mode 100644 resources/charts/bitcoincore/templates/pod.yaml create mode 100644 resources/charts/bitcoincore/templates/serviceaccount.yaml create mode 100644 resources/charts/bitcoincore/templates/tests/test-connection.yaml create mode 100644 resources/charts/bitcoincore/values.yaml diff --git a/resources/charts/bitcoincore/.helmignore b/resources/charts/bitcoincore/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/charts/bitcoincore/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/charts/bitcoincore/Chart.yaml b/resources/charts/bitcoincore/Chart.yaml new file mode 100644 index 000000000..f99064472 --- /dev/null +++ b/resources/charts/bitcoincore/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: bitcoincore +description: A Helm chart for Bitcoin Core + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/bitcoincore/templates/NOTES.txt b/resources/charts/bitcoincore/templates/NOTES.txt new file mode 100644 index 000000000..a362b81c4 --- /dev/null +++ b/resources/charts/bitcoincore/templates/NOTES.txt @@ -0,0 +1 @@ +Thank you for installing {{ include "bitcoincore.fullname" . }}. \ No newline at end of file diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl new file mode 100644 index 000000000..de333a17b --- /dev/null +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "bitcoincore.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "bitcoincore.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "bitcoincore.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "bitcoincore.labels" -}} +helm.sh/chart: {{ include "bitcoincore.chart" . }} +{{ include "bitcoincore.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "bitcoincore.selectorLabels" -}} +app.kubernetes.io/name: {{ include "bitcoincore.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "bitcoincore.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "bitcoincore.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml new file mode 100644 index 000000000..9f25c8e5e --- /dev/null +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "bitcoincore.fullname" . }} + labels: + {{- include "bitcoincore.labels" . | nindent 4 }} +data: + bitcoin-conf: | + {{ tpl .Values.config . | nindent 2 }} \ No newline at end of file diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml new file mode 100644 index 000000000..4b1b784e0 --- /dev/null +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "bitcoincore.fullname" . }} + labels: + {{- include "bitcoincore.labels" . | nindent 4 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} +spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 4 }} + {{- end }} + serviceAccountName: {{ include "bitcoincore.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 4 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 8 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 8 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 8 }} + resources: + {{- toYaml .Values.resources | nindent 8 }} + volumeMounts: + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} + - mountPath: /root/.bitcoin/bitcoin.conf + name: config + subPath: bitcoin.conf + volumes: + {{- with .Values.volumes }} + {{- toYaml . | nindent 4 }} + {{- end }} + - configMap: + name: {{ include "bitcoincore.fullname" . }} + name: config + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/resources/charts/bitcoincore/templates/serviceaccount.yaml b/resources/charts/bitcoincore/templates/serviceaccount.yaml new file mode 100644 index 000000000..af2605409 --- /dev/null +++ b/resources/charts/bitcoincore/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "bitcoincore.serviceAccountName" . }} + labels: + {{- include "bitcoincore.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/resources/charts/bitcoincore/templates/tests/test-connection.yaml b/resources/charts/bitcoincore/templates/tests/test-connection.yaml new file mode 100644 index 000000000..a0855da45 --- /dev/null +++ b/resources/charts/bitcoincore/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "bitcoincore.fullname" . }}-test-connection" + labels: + {{- include "bitcoincore.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "bitcoincore.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml new file mode 100644 index 000000000..d1d875417 --- /dev/null +++ b/resources/charts/bitcoincore/values.yaml @@ -0,0 +1,129 @@ +# Default values for bitcoincore. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "27.0" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: false + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +livenessProbe: + exec: + command: + - pidof + - bitcoind + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 +readinessProbe: + failureThreshold: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 18443 + timeoutSeconds: 1 + + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +config: |2+ + regtest=1 + checkmempool=0 + acceptnonstdtxn=1 + debuglogfile=0 + logips=1 + logtimemicros=1 + capturemessages=1 + fallbackfee=0.00001000 + listen=1 + + [regtest] + rpcuser=user + rpcpassword=password + rpcport=18443 + rpcallowip=0.0.0.0/0 + rpcbind=0.0.0.0 + + zmqpubrawblock=tcp://0.0.0.0:28332 + zmqpubrawtx=tcp://0.0.0.0:28333 \ No newline at end of file From 7869606cfffcd8cd0cc68b66218b8f9728e1985d Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 12:04:47 +0200 Subject: [PATCH 030/710] warcli network connected and fix run_command / stream_command --- old/server.py | 611 -------------------------------------- src/warnet/cli/bitcoin.py | 14 +- src/warnet/cli/k8s.py | 88 +++--- src/warnet/cli/network.py | 74 +++-- 4 files changed, 116 insertions(+), 671 deletions(-) delete mode 100644 old/server.py diff --git a/old/server.py b/old/server.py deleted file mode 100644 index 55a7371db..000000000 --- a/old/server.py +++ /dev/null @@ -1,611 +0,0 @@ -import argparse -import base64 -import importlib -import io -import json -import logging -import logging.config -import os -import pkgutil -import platform -import shutil -import subprocess -import sys -import tarfile -import tempfile -import threading -import time -import traceback -from datetime import datetime - -import warnet.scenarios as scenarios -from flask import Flask, jsonify, request -from flask_jsonrpc.app import JSONRPC -from flask_jsonrpc.exceptions import ServerError -from warnet import SRC_DIR - -from .services import ServiceType -from .utils import gen_config_dir -from .warnet import Warnet - -WARNET_SERVER_PORT = 9276 -CONFIG_DIR_ALREADY_EXISTS = 32001 - - -class Server: - def __init__(self): - system = os.name - if system == "nt" or platform.system() == "Windows": - self.basedir = os.path.join(os.path.expanduser("~"), "warnet") - elif system == "posix" or platform.system() == "Linux" or platform.system() == "Darwin": - self.basedir = os.environ.get("XDG_STATE_HOME") - if self.basedir is None: - # ~/.warnet/warnet.log - self.basedir = os.path.join(os.environ["HOME"], ".warnet") - else: - # XDG_STATE_HOME / warnet / warnet.log - self.basedir = os.path.join(self.basedir, "warnet") - else: - raise NotImplementedError("Unsupported operating system") - - self.running_scenarios = [] - - self.app = Flask(__name__) - self.jsonrpc = JSONRPC(self.app, "/api") - - self.log_file_path = os.path.join(self.basedir, "warnet.log") - self.setup_global_exception_handler() - self.setup_logging() - self.setup_rpc() - self.warnets: dict = dict() - self.logger.info("Started server") - - # register a well known /-/healthy endpoint for liveness tests - # we regard warnet as healthy if the http server is up - # /-/healthy and /-/ready are often used (e.g. by the prometheus server) - self.app.add_url_rule("/-/healthy", view_func=self.healthy) - - # This is set while we bring a warnet up, which may include building a new image - # After warnet is up this will be released. - # This is used to delay api calls which rely on and image being built dynamically - # before the config dir is populated with the deployment info - self.image_build_lock = threading.Lock() - - def setup_global_exception_handler(self): - """ - Use flask to log traceback of unhandled exceptions - """ - - @self.app.errorhandler(Exception) - def handle_exception(e): - trace = traceback.format_exc() - self.logger.error(f"Unhandled exception: {e}\n{trace}") - response = { - "jsonrpc": "2.0", - "error": { - "code": -32603, - "message": "Internal server error", - "data": str(e), - }, - "id": request.json.get("id", None) if request.json else None, - } - return jsonify(response), 500 - - def healthy(self): - return "warnet is healthy" - - def setup_logging(self): - os.makedirs(os.path.dirname(self.log_file_path), exist_ok=True) - with open(SRC_DIR / "logging_config.json") as f: - logging_config = json.load(f) - logging_config["handlers"]["file"]["filename"] = str(self.log_file_path) - logging.config.dictConfig(logging_config) - self.logger = logging.getLogger("server") - self.scenario_logger = logging.getLogger("scenario") - self.logger.info("Logging started") - - def log_request(): - if "healthy" in request.path: - return # No need to log all these - if not request.path.startswith("/api"): - self.logger.debug(request.path) - else: - self.logger.debug(request.json) - - def build_check(): - timeout = 600 - check_interval = 10 - time_elapsed = 0 - - while time_elapsed < timeout: - # Attempt to acquire the lock without blocking - lock_acquired = self.image_build_lock.acquire(blocking=False) - # If we get the lock, release it and continue - if lock_acquired: - self.image_build_lock.release() - return - # Otherwise wait before trying again - else: - time.sleep(check_interval) - time_elapsed += check_interval - - # If we've reached here, the lock wasn't acquired in time - raise Exception( - f"Failed to acquire the build lock within {timeout} seconds, aborting RPC." - ) - - self.app.before_request(log_request) - self.app.before_request(build_check) - - def setup_rpc(self): - # Tanks - self.jsonrpc.register(self.tank_bcli) - self.jsonrpc.register(self.tank_lncli) - self.jsonrpc.register(self.tank_debug_log) - self.jsonrpc.register(self.tank_messages) - self.jsonrpc.register(self.tank_ln_pub_key) - # Scenarios - self.jsonrpc.register(self.scenarios_available) - self.jsonrpc.register(self.scenarios_run) - self.jsonrpc.register(self.scenarios_run_file) - self.jsonrpc.register(self.scenarios_stop) - self.jsonrpc.register(self.scenarios_list_running) - # Networks - self.jsonrpc.register(self.network_up) - self.jsonrpc.register(self.network_from_file) - self.jsonrpc.register(self.network_down) - self.jsonrpc.register(self.network_info) - self.jsonrpc.register(self.network_status) - self.jsonrpc.register(self.network_connected) - self.jsonrpc.register(self.network_export) - # Debug - self.jsonrpc.register(self.generate_deployment) - self.jsonrpc.register(self.exec_run) - # Logs - self.jsonrpc.register(self.logs_grep) - - def scenario_log(self, proc): - while not proc.stdout and not proc.stderr: - time.sleep(0.1) - for line in proc.stdout: - self.scenario_logger.info(line.decode().rstrip()) - for line in proc.stderr: - self.scenario_logger.error(line.decode().rstrip()) - - def get_warnet(self, network: str) -> Warnet: - """ - Will get a warnet from the cache if it exists. - Otherwise it will create the network using from_network() and save it - to the cache before returning it. - """ - if network in self.warnets: - return self.warnets[network] - wn = Warnet.from_network(network) - if isinstance(wn, Warnet): - self.warnets[network] = wn - return wn - raise ServerError(f"Could not find warnet {network}") - - def tank_bcli( - self, node: int, method: str, params: list[str] | None = None, network: str = "warnet" - ) -> str: - """ - Call bitcoin-cli on in [network] - """ - wn = self.get_warnet(network) - try: - return wn.container_interface.get_bitcoin_cli(wn.tanks[node], method, params) - except Exception as e: - msg = f"Sever error calling bitcoin-cli {method}: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def tank_lncli(self, node: int, command: list[str], network: str = "warnet") -> str: - """ - Call lightning cli on in [network] - """ - wn = self.get_warnet(network) - try: - return wn.container_interface.ln_cli(wn.tanks[node], command) - except Exception as e: - msg = f"Error calling lncli: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def tank_ln_pub_key(self, node: int, network: str = "warnet") -> str: - """ - Get lightning pub key on in [network] - """ - wn = self.get_warnet(network) - try: - return wn.container_interface.ln_pub_key(wn.tanks[node]) - except Exception as e: - msg = f"Error getting pub key: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def tank_debug_log(self, network: str, node: int) -> str: - """ - Fetch the Bitcoin Core debug log from - """ - wn = Warnet.from_network(network) - try: - return wn.container_interface.get_bitcoin_debug_log(wn.tanks[node].index) - except Exception as e: - msg = f"Error fetching debug logs: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def tank_messages(self, network: str, node_a: int, node_b: int) -> str: - """ - Fetch messages sent between and . - """ - wn = self.get_warnet(network) - try: - messages = [ - msg - for msg in wn.container_interface.get_messages( - wn.tanks[node_a].index, wn.tanks[node_b].index, wn.bitcoin_network - ) - if msg is not None - ] - if not messages: - msg = f"No messages found between {node_a} and {node_b}" - self.logger.error(msg) - raise ServerError(message=msg) - - messages_str_list = [] - - for message in messages: - # Check if 'time' key exists and its value is a number - if not (message.get("time") and isinstance(message["time"], int | float)): - continue - - timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime( - "%Y-%m-%d %H:%M:%S" - ) - direction = ">>>" if message.get("outbound", False) else "<<<" - msgtype = message.get("msgtype", "") - body_dict = message.get("body", {}) - - if not isinstance(body_dict, dict): # messages will be in dict form - continue - - body_str = ", ".join(f"{key}: {value}" for key, value in body_dict.items()) - messages_str_list.append(f"{timestamp} {direction} {msgtype} {body_str}") - - result_str = "\n".join(messages_str_list) - - return result_str - - except Exception as e: - msg = f"Error fetching messages between nodes {node_a} and {node_b}: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def network_export(self, network: str, activity: str | None, exclude: list[int]) -> bool: - """ - Export all data for a simln container running on the network - """ - wn = self.get_warnet(network) - if "simln" not in wn.services: - raise Exception("No simln service in network") - - # JSON object that will eventually be written to simln config file - config = {"nodes": []} - if activity: - config["activity"] = json.loads(activity) - # In-memory file to build tar archive - tar_buffer = io.BytesIO() - with tarfile.open(fileobj=tar_buffer, mode="w") as tar_file: - # tank LN nodes add their credentials to tar archive - wn.export(config, tar_file, exclude=exclude) - # write config file - config_bytes = json.dumps(config).encode("utf-8") - config_stream = io.BytesIO(config_bytes) - tarinfo = tarfile.TarInfo(name="sim.json") - tarinfo.size = len(config_bytes) - tar_file.addfile(tarinfo=tarinfo, fileobj=config_stream) - - # Write the archive to the RPC server's config directory - source_file = wn.config_dir / "simln.tar" - with open(source_file, "wb") as output: - tar_buffer.seek(0) - output.write(tar_buffer.read()) - - # Copy the archive to the "emptydir" volume in the simln pod - wn.container_interface.write_service_config(source_file, "simln", "/simln/") - return True - - def scenarios_available(self) -> list[tuple]: - """ - List available scenarios in the Warnet Test Framework - """ - try: - scenario_list = [] - for s in pkgutil.iter_modules(scenarios.__path__): - module_name = f"warnet.scenarios.{s.name}" - try: - m = importlib.import_module(module_name) - if hasattr(m, "cli_help"): - scenario_list.append((s.name, m.cli_help())) - except ModuleNotFoundError as e: - print(f"Module not found: {module_name}, error: {e}") - raise - return scenario_list - except Exception as e: - msg = f"Error listing scenarios: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def _start_scenario( - self, - scenario_path: str, - scenario_name: str, - additional_args: list[str], - network: str, - ) -> str: - try: - run_cmd = [sys.executable, scenario_path] + additional_args + [f"--network={network}"] - self.logger.debug(f"Running {run_cmd}") - proc = subprocess.Popen( - run_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - t = threading.Thread(target=lambda: self.scenario_log(proc)) - t.daemon = True - t.start() - cmd = f"{scenario_name} {' '.join(additional_args)}".strip() - self.running_scenarios.append( - { - "pid": proc.pid, - "cmd": cmd, - "proc": proc, - "network": network, - } - ) - return f"Running scenario {scenario_name} with PID {proc.pid} in the background..." - except Exception as e: - msg = f"Error running scenario: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def scenarios_run_file( - self, - scenario_base64: str, - scenario_name: str, - additional_args: list[str], - network: str = "warnet", - ) -> str: - # Extract just the filename without path and extension - with tempfile.NamedTemporaryFile( - prefix=scenario_name, - suffix=".py", - delete=False, - ) as temp_file: - scenario_path = temp_file.name - temp_file.write(base64.b64decode(scenario_base64)) - - if not os.path.exists(scenario_path): - raise ServerError(f"Scenario not found at {scenario_path}.") - - return self._start_scenario(scenario_path, scenario_name, additional_args, network) - - def scenarios_run( - self, scenario: str, additional_args: list[str], network: str = "warnet" - ) -> str: - # Use importlib.resources to get the scenario path - scenario_package = "warnet.scenarios" - scenario_filename = f"{scenario}.py" - - # Ensure the scenario file exists within the package - with importlib.resources.path(scenario_package, scenario_filename) as scenario_path: - scenario_path = str(scenario_path) # Convert Path object to string - - if not os.path.exists(scenario_path): - raise ServerError(f"Scenario {scenario} not found at {scenario_path}.") - - return self._start_scenario(scenario_path, scenario, additional_args, network) - - def scenarios_stop(self, pid: int) -> str: - matching_scenarios = [sc for sc in self.running_scenarios if sc["pid"] == pid] - if matching_scenarios: - matching_scenarios[0]["proc"].terminate() # sends SIGTERM - # Remove from running list - self.running_scenarios = [sc for sc in self.running_scenarios if sc["pid"] != pid] - return f"Stopped scenario with PID {pid}." - else: - msg = f"Could not find scenario with PID {pid}" - self.logger.error(msg) - raise ServerError(message=msg) - - def scenarios_list_running(self) -> list[dict]: - running = [ - { - "pid": sc["pid"], - "cmd": sc["cmd"], - "active": sc["proc"].poll() is None, - "return_code": sc["proc"].returncode, - "network": sc["network"], - } - for sc in self.running_scenarios - ] - return running - - def network_up(self, network: str = "warnet") -> str: - def thread_start(server: Server, network): - try: - wn = server.get_warnet(network) - wn.apply_network_conditions() - wn.wait_for_health() - server.logger.info( - f"Successfully resumed warnet named '{network}' from config dir {wn.config_dir}" - ) - except Exception as e: - trace = traceback.format_exc() - server.logger.error(f"Unhandled exception bringing network up: {e}\n{trace}") - - try: - t = threading.Thread(target=lambda: thread_start(self, network)) - t.daemon = True - t.start() - return "Resuming warnet..." - except Exception as e: - msg = f"Error bring up warnet: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def network_from_file( - self, graph_file: str, force: bool = False, network: str = "warnet" - ) -> dict: - """ - Run a warnet with topology loaded from a - """ - - def thread_start(server: Server, network): - with server.image_build_lock: - try: - wn = server.get_warnet(network) - wn.generate_deployment() - wn.warnet_build() - wn.warnet_up() - wn.wait_for_health() - wn.apply_network_conditions() - self.logger.info("Warnet started successfully") - except Exception as e: - trace = traceback.format_exc() - self.logger.error(f"Unhandled exception starting warnet: {e}\n{trace}") - - config_dir = gen_config_dir(network) - if config_dir.exists(): - if force: - shutil.rmtree(config_dir) - else: - message = f"Config dir {config_dir} already exists, not overwriting existing warnet without --force" - self.logger.error(message) - raise ServerError(message=message, code=CONFIG_DIR_ALREADY_EXISTS) - - try: - self.warnets[network] = Warnet.from_graph_file( - graph_file, - config_dir, - network, - ) - t = threading.Thread(target=lambda: thread_start(self, network)) - t.daemon = True - t.start() - return self.warnets[network]._warnet_dict_representation() - except Exception as e: - msg = f"Error bring up warnet: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def network_down(self, network: str = "warnet") -> str: - """ - Stop all containers in . - """ - wn = self.get_warnet(network) - try: - wn.warnet_down() - return "Stopping warnet" - except Exception as e: - msg = f"Error bringing warnet down: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def network_info(self, network: str = "warnet") -> dict: - """ - Get info about a warnet network named - """ - wn = self.get_warnet(network) - return wn._warnet_dict_representation() - - def network_status(self, network: str = "warnet") -> list[dict]: - """ - Get running status of a warnet network named - """ - try: - wn = self.get_warnet(network) - stats = [] - for tank in wn.tanks: - status = {"tank_index": tank.index, "bitcoin_status": tank.status.name.lower()} - if tank.lnnode is not None: - status["lightning_status"] = tank.lnnode.status.name.lower() - if tank.lnnode.cb is not None: - status["circuitbreaker_status"] = tank.lnnode.cb_status.name.lower() - stats.append(status) - return stats - except Exception as e: - msg = f"Error getting network status: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def network_connected(self, network: str = "warnet") -> bool: - """ - Indicate whether all of the graph edges are connected in - """ - try: - wn = self.get_warnet(network) - return wn.network_connected() - except Exception as e: - self.logger.error(f"{e}") - return False - - def generate_deployment(self, graph_file: str, network: str = "warnet") -> str: - """ - Generate the deployment file for a graph file - """ - try: - config_dir = gen_config_dir(network) - if config_dir.exists(): - message = f"Config dir {config_dir} already exists, not overwriting existing warnet without --force" - self.logger.error(message) - raise ServerError(message=message, code=CONFIG_DIR_ALREADY_EXISTS) - wn = self.get_warnet(network) - wn.generate_deployment() - if not wn.deployment_file or not wn.deployment_file.is_file(): - raise ServerError(f"No deployment file found at {wn.deployment_file}") - with open(wn.deployment_file) as f: - return f.read() - except Exception as e: - msg = f"Error generating deployment file: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def logs_grep( - self, pattern: str, network: str = "warnet", k8s_timestamps=False, no_sort=False - ) -> str: - """ - Grep the logs from the fluentd container for a regex pattern - """ - try: - wn = self.get_warnet(network) - return wn.container_interface.logs_grep(pattern, network, k8s_timestamps, no_sort) - except Exception as e: - msg = f"Error grepping logs using pattern {pattern}: {e}" - self.logger.error(msg) - raise ServerError(message=msg) from e - - def exec_run(self, index: int, service_type: int, cmd: str, network: str = "warnet") -> str: - """ - Execute an arbitrary command in an arbitrary container, - identified by tank index and ServiceType - """ - wn = self.get_warnet(network) - return wn.container_interface.exec_run(index, ServiceType(service_type), cmd) - - -def run_server(): - parser = argparse.ArgumentParser(description="Run the server") - parser.add_argument( - "--dev", action="store_true", help="Run in development mode with debug enabled" - ) - args = parser.parse_args() - debug_mode = args.dev - server = Server() - server.app.run(host="0.0.0.0", port=WARNET_SERVER_PORT, debug=debug_mode) - - -if __name__ == "__main__": - run_server() diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index f6bc9ca94..dff595156 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -3,7 +3,7 @@ import click -from .util import run_command +from .k8s import run_command @click.group(name="bitcoin") @@ -20,11 +20,15 @@ def rpc(node, method, params, network): """ Call bitcoin-cli [params] on in [network] """ + print(_rpc(node, method, params, network)) + + +def _rpc(node, method, params, network): if params: cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" else: cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" - run_command(cmd) + return run_command(cmd) @bitcoin.command() @@ -35,7 +39,7 @@ def debug_log(node, network): Fetch the Bitcoin Core debug log from in [network] """ cmd = f"kubectl logs warnet-tank-{node}" - run_command(cmd) + print(run_command(cmd)) # @bitcoin.command() @@ -62,7 +66,7 @@ def grep_logs(pattern, network, show_k8s_timestamps, no_sort): # Get all pods in the namespace command = f"kubectl get pods -n {network} -o json" - pods_json = run_command(command, return_output=True) + pods_json = run_command(command) if pods_json is False: print("Error: Failed to get pods information") @@ -91,7 +95,7 @@ def grep_logs(pattern, network, show_k8s_timestamps, no_sort): # Get logs from the specific container command = f"kubectl logs {pod_name} -c {container_name} -n {network} --timestamps" - logs = run_command(command, return_output=True) + logs = run_command(command) if logs is not False: # Process logs diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 9a9439184..2b7bdbfee 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -1,6 +1,8 @@ import os import subprocess from importlib.resources import files +import json +from typing import Any, Dict from kubernetes import client, config from kubernetes.dynamic import DynamicClient @@ -32,44 +34,54 @@ def get_mission(mission): return crew -def run_command(command, stream_output=False, env=None): - # Merge the current environment with the provided env - full_env = os.environ.copy() - if env: - # Convert all env values to strings (only a safeguard) - env = {k: str(v) for k, v in env.items()} - full_env.update(env) - - if stream_output: - process = subprocess.Popen( - ["/bin/bash", "-c", command], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - bufsize=1, - universal_newlines=True, - env=full_env, - ) - - for line in iter(process.stdout.readline, ""): - print(line, end="") - - process.stdout.close() - return_code = process.wait() - - if return_code != 0: - print(f"Command failed with return code {return_code}") - return False - return True - else: - result = subprocess.run( - command, shell=True, capture_output=True, text=True, executable="/bin/bash" - ) - if result.returncode != 0: - print(f"Error: {result.stderr}") - return False - print(result.stdout) - return True +def get_edges(): + sclient = get_static_client() + configmap = sclient.read_namespaced_config_map(name="edges", namespace="warnet") + return json.loads(configmap.data["data"]) + + +def run_command(command) -> str: + result = subprocess.run( + command, shell=True, capture_output=True, text=True, executable="/bin/bash" + ) + if result.returncode != 0: + raise Exception(result.stderr) + return result.stdout + + +def stream_command(command, env=None) -> bool: + process = subprocess.Popen( + ["/bin/bash", "-c", command], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + universal_newlines=True, + ) + + for line in iter(process.stdout.readline, ""): + print(line, end="") + + process.stdout.close() + return_code = process.wait() + + if return_code != 0: + print(f"Command failed with return code {return_code}") + return False + return True + + +def create_kubernetes_object( + kind: str, metadata: Dict[str, Any], spec: Dict[str, Any] = None +) -> Dict[str, Any]: + obj = { + "apiVersion": "v1", + "kind": kind, + "metadata": metadata, + } + if spec is not None: + obj["spec"] = spec + return obj def create_namespace() -> dict: diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 3bf410718..12d396228 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -1,3 +1,4 @@ +import json import tempfile from importlib.resources import files from pathlib import Path @@ -7,14 +8,18 @@ import networkx as nx import yaml from rich import print - +from .bitcoin import _rpc from .k8s import ( apply_kubernetes_yaml, create_namespace, delete_namespace, deploy_base_configurations, run_command, + stream_command, set_kubectl_context, + create_kubernetes_object, + get_edges, + get_mission ) DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") @@ -62,19 +67,6 @@ def generate_node_config(node: int, data: dict, graph: nx.Graph) -> str: return f"{base_config}\n{node_specific_config}\n" + "\n".join(addnode_configs) -def create_kubernetes_object( - kind: str, metadata: Dict[str, Any], spec: Dict[str, Any] = None -) -> Dict[str, Any]: - obj = { - "apiVersion": "v1", - "kind": kind, - "metadata": metadata, - } - if spec is not None: - obj["spec"] = spec - return obj - - def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: image = data.get("image", "bitcoindevproject/bitcoin:27.0") version = data.get("version", "27.0") @@ -84,7 +76,11 @@ def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: metadata={ "name": f"warnet-tank-{node}", "namespace": "warnet", - "labels": {"app": "warnet", "mission": "tank", "index": str(node)}, + "labels": {"app": "warnet", "mission": "tank"}, + "annotations": { + "index": node, + "data": json.dumps(data) + } }, spec={ "containers": [ @@ -136,6 +132,25 @@ def create_config_map(node: int, config: str) -> Dict[str, Any]: return config_map +def create_edges_map(graph): + edges = [] + for src, dst, data in graph.edges(data=True): + edges.append({ + "src": src, + "dst": dst, + "data": data + }) + config_map = create_kubernetes_object( + kind="ConfigMap", + metadata={ + "name": "edges", + "namespace": "warnet", + }, + ) + config_map["data"] = {"data": json.dumps(edges)} + return config_map + + def generate_kubernetes_yaml(graph: nx.Graph) -> List[Dict[str, Any]]: kubernetes_objects = [create_namespace()] @@ -148,6 +163,7 @@ def generate_kubernetes_yaml(graph: nx.Graph) -> List[Dict[str, Any]]: create_node_service(node), ] ) + kubernetes_objects.append(create_edges_map(graph)) return kubernetes_objects @@ -164,7 +180,7 @@ def setup_logging_helm() -> bool: ] for command in helm_commands: - if not run_command(command, stream_output=True): + if not stream_command(command): print(f"Failed to run Helm command: {command}") return False return True @@ -213,9 +229,33 @@ def down(network: str): def logs(follow: bool): """Get Kubernetes logs from the RPC server""" command = f"kubectl logs rpc-0{' --follow' if follow else ''}" - run_command(command, stream_output=follow) + stream_command(command) +@network.command() +def connected(): + """Determine if all p2p conenctions defined in graph are established""" + tanks = get_mission("tank") + edges = get_edges() + for tank in tanks: + # Get actual + index = tank.metadata.annotations["index"] + peerinfo = json.loads(_rpc(int(index), "getpeerinfo", "", "warnet")) + manuals = 0 + for peer in peerinfo: + if peer["connection_type"] == "manual": + manuals += 1 + # Get expected + init_peers = sum(1 for edge in edges if edge["src"] == index) + print(f"Tank {index} connections: expected={init_peers} actual={manuals}") + # Even if more edges are specifed, bitcoind only allows + # 8 manual outbound connections + if min(8, init_peers) > manuals: + print("Network not connected") + return False + print("Network connected") + return True + @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) @click.option("--output", "-o", default="warnet-deployment.yaml", help="Output YAML file") From bd7ca0aa57d2115eb6a615d1337be8c8c757407a Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 12:06:17 +0200 Subject: [PATCH 031/710] fix more stream_command --- src/warnet/cli/k8s.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 2b7bdbfee..a234f0f9e 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -93,7 +93,7 @@ def set_kubectl_context(namespace: str): Set the default kubectl context to the specified namespace. """ command = f"kubectl config set-context --current --namespace={namespace}" - result = run_command(command, stream_output=True) + result = stream_command(command) if result: print(f"Kubectl context set to namespace: {namespace}") else: @@ -109,7 +109,7 @@ def deploy_base_configurations(): for bconfig in base_configs: command = f"kubectl apply -f {WAR_MANIFESTS}/{bconfig}" - if not run_command(command, stream_output=True): + if not stream_command(command): print(f"Failed to apply {bconfig}") return False return True @@ -117,9 +117,9 @@ def deploy_base_configurations(): def apply_kubernetes_yaml(yaml_file: str): command = f"kubectl apply -f {yaml_file}" - return run_command(command, stream_output=True) + return stream_command(command) def delete_namespace(namespace: str): command = f"kubectl delete namespace {namespace}" - return run_command(command, stream_output=True) + return stream_command(command) From f2b4bb04d48f58ffae210bb9d45ff1fbd929ff70 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 12:21:09 +0200 Subject: [PATCH 032/710] delete more unused stuff --- resources/images/rpc/Dockerfile_dev | 29 ---- resources/images/rpc/Dockerfile_prod | 29 ---- .../images/rpc/Dockerfile_rpc.dockerignore | 16 --- resources/images/rpc/entrypoint.sh | 38 ------ resources/images/sidecar/Dockerfile | 12 -- resources/images/tor/Dockerfile_tor_da | 18 --- resources/images/tor/Dockerfile_tor_relay | 17 --- resources/images/tor/tor-entrypoint.sh | 7 - .../images/tor/tor-keys/authority_certificate | 45 ------- .../tor/tor-keys/authority_identity_key | 41 ------ .../images/tor/tor-keys/authority_signing_key | 27 ---- .../tor/tor-keys/ed25519_master_id_public_key | Bin 64 -> 0 bytes .../tor/tor-keys/ed25519_master_id_secret_key | Bin 96 -> 0 bytes .../images/tor/tor-keys/ed25519_signing_cert | Bin 172 -> 0 bytes .../tor/tor-keys/ed25519_signing_secret_key | Bin 96 -> 0 bytes resources/images/tor/tor-keys/secret_id_key | 15 --- .../images/tor/tor-keys/secret_onion_key | 15 --- .../images/tor/tor-keys/secret_onion_key_ntor | Bin 96 -> 0 bytes resources/images/tor/torrc | 34 ----- resources/images/tor/torrc.da | 41 ------ resources/images/tor/torrc.relay | 37 ----- resources/manifests/warnet-rpc-service.yaml | 21 --- .../manifests/warnet-rpc-statefulset-dev.yaml | 57 -------- .../manifests/warnet-rpc-statefulset.yaml | 23 ---- resources/scripts/build-k8s-rpc.sh | 45 ------- resources/scripts/deploy.sh | 50 ------- resources/scripts/startd.sh | 20 --- resources/scripts/stop.sh | 14 -- src/warnet/cli/cluster.py | 127 ------------------ src/warnet/cli/util.py | 45 ------- 30 files changed, 823 deletions(-) delete mode 100644 resources/images/rpc/Dockerfile_dev delete mode 100644 resources/images/rpc/Dockerfile_prod delete mode 100644 resources/images/rpc/Dockerfile_rpc.dockerignore delete mode 100755 resources/images/rpc/entrypoint.sh delete mode 100644 resources/images/sidecar/Dockerfile delete mode 100644 resources/images/tor/Dockerfile_tor_da delete mode 100644 resources/images/tor/Dockerfile_tor_relay delete mode 100755 resources/images/tor/tor-entrypoint.sh delete mode 100644 resources/images/tor/tor-keys/authority_certificate delete mode 100644 resources/images/tor/tor-keys/authority_identity_key delete mode 100644 resources/images/tor/tor-keys/authority_signing_key delete mode 100644 resources/images/tor/tor-keys/ed25519_master_id_public_key delete mode 100644 resources/images/tor/tor-keys/ed25519_master_id_secret_key delete mode 100644 resources/images/tor/tor-keys/ed25519_signing_cert delete mode 100644 resources/images/tor/tor-keys/ed25519_signing_secret_key delete mode 100644 resources/images/tor/tor-keys/secret_id_key delete mode 100644 resources/images/tor/tor-keys/secret_onion_key delete mode 100644 resources/images/tor/tor-keys/secret_onion_key_ntor delete mode 100644 resources/images/tor/torrc delete mode 100644 resources/images/tor/torrc.da delete mode 100644 resources/images/tor/torrc.relay delete mode 100644 resources/manifests/warnet-rpc-service.yaml delete mode 100644 resources/manifests/warnet-rpc-statefulset-dev.yaml delete mode 100644 resources/manifests/warnet-rpc-statefulset.yaml delete mode 100755 resources/scripts/build-k8s-rpc.sh delete mode 100755 resources/scripts/deploy.sh delete mode 100755 resources/scripts/startd.sh delete mode 100755 resources/scripts/stop.sh delete mode 100644 src/warnet/cli/cluster.py diff --git a/resources/images/rpc/Dockerfile_dev b/resources/images/rpc/Dockerfile_dev deleted file mode 100644 index 48f348228..000000000 --- a/resources/images/rpc/Dockerfile_dev +++ /dev/null @@ -1,29 +0,0 @@ -# Use an official Python runtime as the base image -FROM python:3.12-slim - -# Install procps, which includes pgrep -RUN apt-get update && \ - apt-get install -y procps openssh-client && \ - rm -rf /var/lib/apt/lists/* - -# Install `uv` package installer (https://github.com/astral-sh/uv) -RUN pip install uv - -# Set the working directory in the container -WORKDIR /root/warnet - -# Make port 9276 available to the world outside this container -# Change the port if your server is running on a different port -EXPOSE 9276 - -# Instead of copying the source code and installing dependencies at build time, -# we defer this to the entrypoint script for dev mode to enable hot-reloading. - -# Copy the entrypoint script into the container -COPY entrypoint.sh / - -# Set the entrypoint script to run when the container launches -ENTRYPOINT ["/entrypoint.sh"] - -# Default command -CMD ["warnet", "--dev"] diff --git a/resources/images/rpc/Dockerfile_prod b/resources/images/rpc/Dockerfile_prod deleted file mode 100644 index f34f0833f..000000000 --- a/resources/images/rpc/Dockerfile_prod +++ /dev/null @@ -1,29 +0,0 @@ -# Use an official Python runtime as the base image -FROM python:3.12-slim - -# Install procps, which includes pgrep -RUN apt-get update && \ - apt-get install -y procps openssh-client && \ - rm -rf /var/lib/apt/lists/* - -# Install `uv` package installer (https://github.com/astral-sh/uv) -RUN pip install uv - -# Set the working directory in the container -WORKDIR /root/warnet - -# Get better caching by installing before copying code -COPY requirements.txt . -RUN uv pip install --system --no-cache -r requirements.txt - -# Copy the source directory contents into the container -COPY . /root/warnet -# Install Warnet scripts -RUN uv pip install --system . - -# Make port 9276 available to the world outside this container -# Change the port if your server is running on a different port -EXPOSE 9276 - -# Run server.py when the container launches -CMD ["warnet"] diff --git a/resources/images/rpc/Dockerfile_rpc.dockerignore b/resources/images/rpc/Dockerfile_rpc.dockerignore deleted file mode 100644 index d19d3d622..000000000 --- a/resources/images/rpc/Dockerfile_rpc.dockerignore +++ /dev/null @@ -1,16 +0,0 @@ -.git - -**/__pycache__ -.trunk -.venv -build -dist -**/*.egg-info -**/*.egg/ -**/*.pyc -**/*.swp - -frontend -.ruff_cache -.idea -.ignored_extras diff --git a/resources/images/rpc/entrypoint.sh b/resources/images/rpc/entrypoint.sh deleted file mode 100755 index bb9d0e7d2..000000000 --- a/resources/images/rpc/entrypoint.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -set -e - -SOURCE_DIR="/root/warnet" -MAX_ATTEMPTS=30 -SLEEP_DURATION=1 - -echo "Checking for mounted source code at ${SOURCE_DIR}..." - -check_setup_toml() { - if [ -f "${SOURCE_DIR}/pyproject.toml" ]; then - return 0 - else - return 1 - fi -} - -attempt=1 -while ! check_setup_toml; do - echo "Waiting for source code to be mounted (attempt: ${attempt}/${MAX_ATTEMPTS})..." - sleep ${SLEEP_DURATION} - ((attempt++)) - - if [ ${attempt} -gt ${MAX_ATTEMPTS} ]; then - echo "Source code not mounted after ${MAX_ATTEMPTS} attempts. Proceeding without installation." - break - fi -done - -# If setup.py is found, install the package -if check_setup_toml; then - echo "Installing package from ${SOURCE_DIR}..." - cd ${SOURCE_DIR} - uv pip install --system --no-cache -e . -fi - -# Execute the CMD from the Dockerfile -exec "$@" diff --git a/resources/images/sidecar/Dockerfile b/resources/images/sidecar/Dockerfile deleted file mode 100644 index ec7f6a787..000000000 --- a/resources/images/sidecar/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM alpine:latest - -RUN apk add openssh - -RUN echo "root:" | chpasswd - -RUN ssh-keygen -A - -CMD ["/usr/sbin/sshd", "-D", \ - "-o", "PasswordAuthentication=yes", \ - "-o", "PermitEmptyPasswords=yes", \ - "-o", "PermitRootLogin=yes"] \ No newline at end of file diff --git a/resources/images/tor/Dockerfile_tor_da b/resources/images/tor/Dockerfile_tor_da deleted file mode 100644 index bbc2ecd2d..000000000 --- a/resources/images/tor/Dockerfile_tor_da +++ /dev/null @@ -1,18 +0,0 @@ -FROM debian:bookworm-slim -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt-get update && apt-get install -y tor - -RUN mkdir -p /home/debian-tor/.tor/keys -RUN chown -R debian-tor:debian-tor /home/debian-tor -RUN mkdir -p /var/log/tor -RUN chown -R debian-tor:debian-tor /var/log/tor - -COPY tor/tor-keys /home/debian-tor/.tor/keys -RUN chown -R debian-tor:debian-tor /home/debian-tor/.tor/keys -COPY tor/torrc.da /etc/tor/torrc - -EXPOSE 9050 - -USER debian-tor -CMD ["tor", "-f", "/etc/tor/torrc"] diff --git a/resources/images/tor/Dockerfile_tor_relay b/resources/images/tor/Dockerfile_tor_relay deleted file mode 100644 index bf03346a4..000000000 --- a/resources/images/tor/Dockerfile_tor_relay +++ /dev/null @@ -1,17 +0,0 @@ -FROM debian:bookworm-slim -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt-get update && apt-get install -y tor iproute2 gosu - -RUN mkdir -p /home/debian-tor/.tor/keys -RUN chown -R debian-tor:debian-tor /home/debian-tor -RUN mkdir -p /var/log/tor -RUN chown -R debian-tor:debian-tor /var/log/tor - -COPY tor/torrc.relay /etc/tor/torrc - -EXPOSE 9050 - -COPY tor/tor-entrypoint.sh /entrypoint.sh -ENTRYPOINT /entrypoint.sh -CMD ["tor", "-f", "/etc/tor/torrc"] diff --git a/resources/images/tor/tor-entrypoint.sh b/resources/images/tor/tor-entrypoint.sh deleted file mode 100755 index bebaef89b..000000000 --- a/resources/images/tor/tor-entrypoint.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e - -echo "Address $(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)" >> /etc/tor/torrc -# mkdir -p /home/debian-tor/.tor/keys -# chown -R debian-tor:debian-tor /home/debian-tor -gosu debian-tor tor -f /etc/tor/torrc diff --git a/resources/images/tor/tor-keys/authority_certificate b/resources/images/tor/tor-keys/authority_certificate deleted file mode 100644 index 4c4bcaad9..000000000 --- a/resources/images/tor/tor-keys/authority_certificate +++ /dev/null @@ -1,45 +0,0 @@ -dir-key-certificate-version 3 -fingerprint 15E09A6BE3619593076D8324A2E1DBEEAD4539CD -dir-key-published 2023-09-08 18:28:10 -dir-key-expires 2025-09-08 18:28:10 -dir-identity-key ------BEGIN RSA PUBLIC KEY----- -MIIBigKCAYEAs1eKRRP+mWy2XpLbkY3dPkEfdKIfPMDDiG3o/Xu0c3fin1aJ32uG -BY3PGtwS2KQZHEJETjSaACq+2x9+fb4RV5bWhaptbt6l7dvDn15NbEklnkV3sx86 -VxiKqybUp5IQ3cNkM0AxUmQ1M/N+zzIe95T1XplaHVBSQN4xhJgYlDkDkNexIHpP -0uFxjO+2hro3cMRotjqR6xZw5OWKVJh5Is8jiSUs57xomeUy5HaUsADw46b88Ff9 -iVfTP975jzuFF7H0kKOxRpIZGRnfnuDdAtYXWHYXtIVwKBdSeHeK/N53++kxtGET -AcyWoHqO2xlvhtPbiKLHkwq0nNFFfwnhgJJSWHqYs6YI300pEMRHJMFWNsZwgwVE -dplmxnOLv9/TnJsD06d+kc05/JTfZXE/3JFs4rMci0jIhMlw+HtFfRwpDMjGI00u -VyADI0DMYTFWr+wFWrhs5N17BdQVSZUZlfxL5trZFdQpUZhxuKJowNLV1EdWzHFJ -srwV+C0LLWr/AgMBAAE= ------END RSA PUBLIC KEY----- -dir-signing-key ------BEGIN RSA PUBLIC KEY----- -MIIBCgKCAQEAyARJ1Nano6bZsTf3UplBhaP8BfrhvDLrJmmk8x4sAot0aHPG/eOH -qGak3y3CM1I+uxozyE58w9mBOuueUIzZelUZVjgg91dsqT5/3lIYEhB4riV29Of0 -AJeh2uibEVrv1ecXo5HYFKEcCHMRTvZaWIWSKjV6TPqPbpXhDBwIcZ+/tHpAml8h -CfGMFRYIEmisNL9xjoNU1R4Iyu07xQw85+xeMU/9UJgsXnvkqkAPbAhwxZq9/8yj -/9V4jcE5NR0KdDuPblEcD5ZNjMckUeTzuVDLgdsazOROD05zaTx8kP1UixJdon8Z -oJ/fsNWsgpzNv6ns8BIwwEAOnd7seXlmfQIDAQAB ------END RSA PUBLIC KEY----- -dir-key-crosscert ------BEGIN ID SIGNATURE----- -DJzp0QmipBz3IJ6zWziyBEkNBPA5J6QDhq2XDFNjdq2RYcOh1hZ1D089587wP3Xg -FoQehBDQccZCrmerEegdZ89dk+QXeNWrGGVpRwp7v+ok7lRPooC1IV3BIHcLs0BQ -fm7d3kYByVl1IaY7D9mpKG6COS/WOrKEnxp75YW18KEH7oIA60c395DfEkUZvDWt -a2ba2eBizM76cFUknQNlExWkw32DNtj1Axz9QyS6IQmyGxvlA1kWL6hiIPAV16NS -E5FcU/wQHL7a7tqAM3UkuLKT3nhcbdYGcaYpbzU/4jF61bzm7ETcqqL40EMQg5Hs -y3kcXZ4ItPFBn/LISp/zCg== ------END ID SIGNATURE----- -dir-key-certification ------BEGIN SIGNATURE----- -e9OQlThM7Y1jBIvZHYsm00ZcCR4L0JRGnzPtZhPtrczAi7dT7gdLhJZSYY5BM1VU -EB24flAJXeP03BtnOuFOURaazvW7J154sVHGQf96OyuCjSOjTlDNQLc6kNL1f0WD -bIXoJ0mPfRJzGX5NmGUa3KmW7/PgNO49VJSCuQQpBmC2qzQBPTCkATehJxCfiB8p -B6toi/ODKmXtFce1J0K9TRmAEIhmA5jzxqd3JISgGx0iP1iXmnMPHESXMS8QJ13e -U4t46NbobZDCAk7xkF/DNjuu0ZSV/IFg7EbB7f/qdIGvvK9bu7Esq5FoVr8sFZ50 -jxeNBEJgNk7Z/DwmNyBODqUfWVSnOydaqRCAxyHIbHGJGHLXKOOdym4sqOBe1O4+ -lyXjp/iIcrTN8rCoKFI9uT4DZj9rzzRv2tzQYl7Iqlt1fImaPN4PAnFpG/LOMrxe -v8S85DhkLpzl5uhojmVKAp810T9ege5MqXXGMDsisUtCyXDtqeQE6xqBDvydTm1m ------END SIGNATURE----- diff --git a/resources/images/tor/tor-keys/authority_identity_key b/resources/images/tor/tor-keys/authority_identity_key deleted file mode 100644 index e3b484bae..000000000 --- a/resources/images/tor/tor-keys/authority_identity_key +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN ENCRYPTED PRIVATE KEY----- -MIIHKjAcBgoqhkiG9w0BDAEDMA4ECBw6S2JG/GCKAgIIAASCBwgnCUGiqRPi2GZW -UBCZMd3jnzhts341Ry1AfHZgpn+kKa1gDbkTbW6aqEsmQJS/janksZnw2W+446fE -01JOo1tQ0OBdWbd++lbDv3sQLVHX4xDVB7HDnUG6ddrnTq2Dc2YS99A6E6Ss9mgp -Ua4XLFyB6gXp3QOdO5QT6Q50DGmGind2xmjqDO+fhupv/dXHh/DWgwhxFMHmqb+y -a7HTZuHOVsMDc/a6ZrGIq85l3NQIWm8+kXcNTHZidG3n6ydakZIvV6Jzh+1R1rEa -9TO9OZDUTVj2PHO95WCVJLWB7JmQINl1VZkEtcvz+LcyIegW3zr9b10026eWe0sj -ymK+CE9hh9Ia2JJA7KZOKqvgzZrWSKPF4Bu1BIRlnJ28iKmUzh25Fq28P+T0m+r/ -qvyuDxmmaDOUetUYXT8YRNd3Jfh6Fb+JNu6E35tyBloNDwQNB/WHgZiGpJ+TRSJr -7I+9v2pWZovI/7TwcZwxrLeHBRX56SD4wp26Ac7yD9RZk138EBPzHtuRww4Yo8XY -FJ0HFX/kGoFJo0GaKaWNMxcFEnefI3KxJB/fYcawTnkHFB/u8LqtjJ7n40zMsyib -jQhBQqmJ5asEarOQRrBwjRi59CvA7GHJEsl+WFTrMcpaL/UpPrAFYRtIHKVHorcI -iqtt8vVASE8Y9dfArA67FwPEhemwvVv1yPGBMewxvJkkMoNHU2NMd7lT8tQEGs9B -kTamf33NZcRjfoBw2apK2vxP8WqiardgzFZW1zNvKQCbsCcQodtjKNegh0AcVOZr -7rs2dX8dK02OPJ66/MkButMvOzxjf9Lou7nUDxo4zBE4LkWmy3gNtZwXoAHfWd60 -GNGKLyw9cK5hjEMfJobt3u/i2pRKsHxc/ZKv+aOCp8U6q/jdLW2uUYuw2bL0LVUD -K4Yu4iEpWYgQA1kXJHOxh4+3iZlmEQhF+PDD1w+M+5kysuOn0ZjG4jEwa1umVyjq -DPw59qrzG7U5ud6ZWjAys5OMM54tYFTbiRtwNkTIFZu7/gUHoPpiBSfD5qGxMcUw -ZC2NoLB6Z0ijiQLJcU52xmFlcipV6GCYAPcJOumGw+czPurSM5UvMuWb8G+UIer6 -T/iIyXYrhOWtYfGOs7pzNWx3USaZYQblnx+gHmD5LxR7YXRmwqLfadlsRATsw6Qj -q1/0hDWB+j3Ckf+alBzbSmDsX2/b43kskAZRPBpxecG4VW3HwQbDIckzFtM6NWFv -cYP2ENzeNR3Qy+8E87l0ZZPLXWXykH6Dsas94oKesTHN86LSpznODZMIMMjHntzo -IGR82pi6O+01ntcWXeJBhamy4eG0fl9Klfu6wJ3r67pe/9jHlWzl8JaulZ15+Myx -c3e/qtG3emT07diOXo+9ChalIYwvmiL87DlhoqREu+MsoYJ5NgnA6aQQA0BoeIEi -ZRjTzPjx+vNsk2leEoWEb34e+ft7ebgZEdak1zaINJpFeayBMYyFoCtdegb9wBzy -tZMgCjNDCb2hbpcvyXx+0HXPDW4iP7SP92lfaDSP2tgvaxeI+mjDfX2xGTaaAzzJ -wOV6FmwlurwCOZt8uGHJnBNoTWaMumf5oXKmFP4LYskDhto66lEgr0mbj1X3f6NR -8zRKtKxaFKDKQL8ddotamei+TxVajm+lyp349AocCQD8It7h497xi5C1NZPIFdWU -bKAKTuwf+ZWX2vt/Dli0YpObmD+hG+SKU39t9vpznJC6BmWneNkZ7BRabMdDlJPO -h6HSSkcSqkqwsCbjiu73n1Wj0tLUgdqvdEwjZdmGjCm6+tffcrYZZEUVzR8ErcCY -EGq/4tOWuhZc6UdsUh8JBMJAX35xXFDHbJBEMKaKD8flZBcnV+bgDTnTKEAJAcvD -WJgtJWRr0QN+BUXumqh8exzGJJVFbb47qJiCUSu3wm4XKWv15gZgBH0ZjpI/qnHu -QtijZz7pvNx/d6jilo4ph1esIwmmSHUfXF1IFshs6BfRnajpp0d5+44p4k6U2GhB -mv2nhp5iXvCn0v79GV1iO3MNzLYCuOwm0q+YqoFiYnS67BriWqfQdgupnE0iLINE -fD6jAhIgOIQ9GQ7SdmWGtAXNFm1INDxgxTgTbdTQcBkVoTVIefqp5Prgt76qmIrg -03MZqayUA9WIItHaKMXgvoDxnUlI4wWVQ44LQnBBxsIw+Wi6GXlqpjIYQ34ICjEi -xB7+ux/Wv12heEk+VeugvB2+ZKLQoq+dtKyNsgfc+emIPWBfufDS7bg+0g8evCYt -u6e99Mm6RJp9BGaEwAPiQfd03FbAnLJmH2I0U5P5R8h7ec0H01e/flG2wqD+/ejh -keYCQIG3obSCKj7ps0GUY496aaL1OZzqDepPzliBf59sXB3myQw7IYUxGwKenrpD -AO5X54JZORhV3fvT0QE= ------END ENCRYPTED PRIVATE KEY----- diff --git a/resources/images/tor/tor-keys/authority_signing_key b/resources/images/tor/tor-keys/authority_signing_key deleted file mode 100644 index 7c4723395..000000000 --- a/resources/images/tor/tor-keys/authority_signing_key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAyARJ1Nano6bZsTf3UplBhaP8BfrhvDLrJmmk8x4sAot0aHPG -/eOHqGak3y3CM1I+uxozyE58w9mBOuueUIzZelUZVjgg91dsqT5/3lIYEhB4riV2 -9Of0AJeh2uibEVrv1ecXo5HYFKEcCHMRTvZaWIWSKjV6TPqPbpXhDBwIcZ+/tHpA -ml8hCfGMFRYIEmisNL9xjoNU1R4Iyu07xQw85+xeMU/9UJgsXnvkqkAPbAhwxZq9 -/8yj/9V4jcE5NR0KdDuPblEcD5ZNjMckUeTzuVDLgdsazOROD05zaTx8kP1UixJd -on8ZoJ/fsNWsgpzNv6ns8BIwwEAOnd7seXlmfQIDAQABAoIBAAm+YGdgfpb5EOfy -cUICP8AgzS1Fu7s/4sHYCdD4cmM8WRMhOBDUWvPamOOwtmIVeq4Bgy4Z7reEEBN8 -o2rKoGnhHTnHRF8wOyr30GGrmksU/NVaSLQlBuIEK6kURZY/7xOP7VBKpvNYUYXd -hHrA2Fqxb72j0HL8DhfbGspiJOIotHMHVKcPN/qb7pEPg9UOlapE8HjZf+G82l0f -CPo3tLSOVCQdn4y4DSFbC/KlwVmvcJYEdBfs/XdjbGN0ytEcLihFgoPKnLDqnl3U -jy5VVL0VO1yY9r7Vq9UmKPfWnwntAE3P5FZv/ZnYSIMNG0JYGKVzSOdco8X3qyTX -69I87LECgYEA/0zcKCzRDOu8gUPAQjttSKeFXoR0uJMonMwiGWm8mhsuB8B+Tzqn -vL3zPSdfjnhhQVmLhhVLOJddKcg8gWECmu3UB/hX9hm/J8sZ355/pdEs+zOkw6WB -MHWXZ+JsqCflGMQKB1GCvnA00IfQnHBC2K6oVkMhSjh6eWU8IrYpU20CgYEAyJCj -C7UOxGFbS/7814j3w0B3niHCzYa0td5aYo8AvT8t6fC6Suba9wqt0qfcGbDGXMpq -O9yL5+SbQohm/nd7brGQfOKzpgqjHhpXctRgvBGqSHpJ0yhKDQJ8L5cfW3JHiArh -fQ2YvgSc03Y3RIRmj4OVfV1647cbLdWAPCDsZVECgYEAhaSzfuhvCseAp15TD5jS -TX08SM0n2NNYKDSICSubykQ+JVq0BD+dPSVmZnXtBMSpjK8WZbtR5C8AWvXyDnw9 -A+NJ4l4zlaXGtksQoUn0YlYMqPdQ4gYKidaUypHx9VjlCcDdyxT1T0GntB3Uq3/s -zkcn4fhEPfkwy8md4EHhgkUCgYARxwg8tGq/q2V9QffFXwWfD+rKYHG05/jCmhfm -3ogRPjVipAzPMNE9znuDzY8r08hxVxu9fJoGDvRYHGEMsyiEskZ9W1bTI+Q7edhA -fGSqpuIyFGzQw6R0rMC3Myz7XRDMFTLRc9ATH7OK5tKVRysUE3S/rPaEkqldEayR -J9XsUQKBgQCBidpMXIxmL0DLaOX45h7l9QWK2g0l6zWhmE1XjJ+fBbkyE1jLcnaN -fPvWJEATyKn/7hbwe+ay+bp04U+jwK37XoRmHCCA+WdNlfkO8qGY+RDDVH62WKS9 -MC5pE3n4didM8kSm1OCDBHWEL3tUPoLs09zHauaqnGFy8KYAN7Y5JA== ------END RSA PRIVATE KEY----- diff --git a/resources/images/tor/tor-keys/ed25519_master_id_public_key b/resources/images/tor/tor-keys/ed25519_master_id_public_key deleted file mode 100644 index b05aa494f48a7577c12d4b03c3fd1a7619aae7b6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 64 zcmV-G0Kfk|Js@RdGBq_ZId(BEaCKsAX=6Gdba`-PFd#iW0000W|M*!iNr6+Jscakg W`084K7tz$t8ByZaJZ->B+rPKRp&F9_ diff --git a/resources/images/tor/tor-keys/ed25519_master_id_secret_key b/resources/images/tor/tor-keys/ed25519_master_id_secret_key deleted file mode 100644 index ccc1f80398c211cccc9aab1819561fa1866f5892..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96 zcmV-m0H6OoJs@RdGBq_ZId(BEb7f<4Wpp|qba`-PFd#iW0001xB_15BiR)IfcOLui zcwfJb2U78I@G=Qomh=M~x;{x-7m=#fG+9KMeBx!^LdS!AZ5M4ZFvla&(_KOZ#yY5x C6edgn diff --git a/resources/images/tor/tor-keys/ed25519_signing_cert b/resources/images/tor/tor-keys/ed25519_signing_cert deleted file mode 100644 index b3736275e2d317b8b3e720fb71f710e36f7e6aa0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 172 zcmV;d08{@xJs@RdGBq_ZId(BEV`Xx5Iv{j;aAhB+rPJtny3C!riU$(g~ONjDi@TJS}qO(bGy*^jU9q&i@s^b@PB?PSU?`F aDh*0XfAy>mx+V1B!|;f{Mw{bh*pUs1FiXP# diff --git a/resources/images/tor/tor-keys/ed25519_signing_secret_key b/resources/images/tor/tor-keys/ed25519_signing_secret_key deleted file mode 100644 index c65350151b2fc412de5aa52f37f7e7b6af2e0ba8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96 zcmV-m0H6OoJs@RdGBq_ZId(BEb7f<4Wpp|qba`-PG$1`a0001x^xD=qLU`+hR8rCK zUnKkMRi1>0(wM))lM#i>^v-1vnWXXeeJGqh63&&$luQ?RvVK!A#pw|0qD;SP%8FU=}!6+hfR7GqP+mz(ZT tV75K#Rc!tF2k}~og?r4-8az2?yTdPjr{EOkfXOvq+TSK{MRA?<0sxq+BM|@q diff --git a/resources/images/tor/torrc b/resources/images/tor/torrc deleted file mode 100644 index e1b3bf675..000000000 --- a/resources/images/tor/torrc +++ /dev/null @@ -1,34 +0,0 @@ -# Common -Log err file /var/log/tor/debug.log -DataDirectory /home/debian-tor/.tor/ -RunAsDaemon 1 -ControlPort 9051 -CookieAuthentication 1 -CookieAuthFileGroupReadable 1 -DataDirectoryGroupReadable 1 -TestingTorNetwork 1 -ClientUseIPv6 0 -ClientUseIPv4 1 - -# Relay -DirAuthority orport=9001 no-v2 v3ident=15E09A6BE3619593076D8324A2E1DBEEAD4539CD 100.20.15.18:9030 03E942A4F12D85B2CF7CBA4E910F321AE98EC233 -AssumeReachable 1 -ExitRelay 0 - -# Reduce resource usage -CircuitPadding 0 -MaxMemInQueues 10 Mbytes -BridgeRecordUsageByCountry 0 -DirReqStatistics 0 -ExtraInfoStatistics 0 -HiddenServiceStatistics 0 -OverloadStatistics 0 -PaddingStatistics 0 -# BandwidthBurst 10 Mbytes -# BandwidthRate 10 Mbytes -ConstrainedSockets 1 -ConstrainedSockSize 8192 Bytes -NumEntryGuards 1 -NumDirectoryGuards 1 - -# `Address ` will be added by docker_entrypoint.sh diff --git a/resources/images/tor/torrc.da b/resources/images/tor/torrc.da deleted file mode 100644 index a5ef0c893..000000000 --- a/resources/images/tor/torrc.da +++ /dev/null @@ -1,41 +0,0 @@ -# Common -Log err stdout -DataDirectory /home/debian-tor/.tor -RunAsDaemon 0 -ControlPort 9051 -ORPort 9001 IPv4Only -CookieAuthentication 1 -CookieAuthFileGroupReadable 1 -DataDirectoryGroupReadable 1 - -ExitPolicy accept *:* -TestingTorNetwork 1 -ClientUseIPv6 0 -ClientUseIPv4 1 - -# Relay -DirAuthority orport=9001 no-v2 v3ident=15E09A6BE3619593076D8324A2E1DBEEAD4539CD 100.20.15.18:9030 03E942A4F12D85B2CF7CBA4E910F321AE98EC233 -AssumeReachable 1 - -# Directory Authority -DirPort 9030 IPv4Only -AuthoritativeDirectory 1 -V3AuthoritativeDirectory 1 -PathsNeededToBuildCircuits 0.25 -TestingDirAuthVoteExit * -TestingDirAuthVoteHSDir * -V3AuthNIntervalsValid 2 -ContactInfo winston_churchill@warnet.dev - -# Reduce resource usage -MaxMemInQueues 200 Mbytes -BridgeRecordUsageByCountry 0 -DirReqStatistics 0 -ExtraInfoStatistics 0 -HiddenServiceStatistics 0 -OverloadStatistics 0 -PaddingStatistics 0 -ConstrainedSockets 1 -ConstrainedSockSize 8192 Bytes - -Address 100.20.15.18 diff --git a/resources/images/tor/torrc.relay b/resources/images/tor/torrc.relay deleted file mode 100644 index 712f51ef3..000000000 --- a/resources/images/tor/torrc.relay +++ /dev/null @@ -1,37 +0,0 @@ -# Common -Log err stdout -DataDirectory /home/debian-tor/.tor -RunAsDaemon 0 -ControlPort 9051 -CookieAuthentication 1 -CookieAuthFileGroupReadable 1 -DataDirectoryGroupReadable 1 -ORPort 9001 -ExitPolicy accept *:* -TestingTorNetwork 1 -ClientUseIPv6 0 -ClientUseIPv4 1 - -# Relay -DirAuthority orport=9001 no-v2 v3ident=15E09A6BE3619593076D8324A2E1DBEEAD4539CD 100.20.15.18:9030 03E942A4F12D85B2CF7CBA4E910F321AE98EC233 -AssumeReachable 1 -PathsNeededToBuildCircuits 0.25 -TestingDirAuthVoteExit * -TestingDirAuthVoteHSDir * -V3AuthNIntervalsValid 2 - -# Reduce resource usage -MaxMemInQueues 64 Mbytes -BridgeRecordUsageByCountry 0 -DirReqStatistics 0 -ExtraInfoStatistics 0 -HiddenServiceStatistics 0 -OverloadStatistics 0 -PaddingStatistics 0 -ConstrainedSockets 1 -ConstrainedSockSize 8192 Bytes -# NumEntryGuards 1 -# NumDirectoryGuards 1 -# UseMicrodescriptors 1 - -# `Address ` will be added by tor-entrypoint.sh diff --git a/resources/manifests/warnet-rpc-service.yaml b/resources/manifests/warnet-rpc-service.yaml deleted file mode 100644 index 4dc390959..000000000 --- a/resources/manifests/warnet-rpc-service.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - namespace: warnet - annotations: - kompose.cmd: kompose convert --controller statefulset - kompose.version: 1.31.2 (a92241f79) - creationTimestamp: null - labels: - io.kompose.service: rpc - name: rpc -spec: - clusterIP: None - ports: - - name: headless - port: 9276 - targetPort: 0 - selector: - io.kompose.service: rpc -status: - loadBalancer: {} diff --git a/resources/manifests/warnet-rpc-statefulset-dev.yaml b/resources/manifests/warnet-rpc-statefulset-dev.yaml deleted file mode 100644 index 91692e35a..000000000 --- a/resources/manifests/warnet-rpc-statefulset-dev.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: rpc - namespace: warnet -spec: - serviceName: "rpc" - replicas: 1 - selector: - matchLabels: - io.kompose.service: rpc - template: - metadata: - labels: - io.kompose.service: rpc - spec: - containers: - - name: warnet-rpc - imagePullPolicy: Never - image: warnet/dev - ports: - - containerPort: 9276 - volumeMounts: - - name: source-code - mountPath: /root/warnet - livenessProbe: - # fail (restart) the pod if we can't find pid of warnet - exec: - command: - - /bin/sh - - -c - - | - if pgrep -f warnet > /dev/null; then - exit 0 - else - exit 1 - fi - initialDelaySeconds: 20 - periodSeconds: 5 - failureThreshold: 3 - readinessProbe: - # mark the pod as ready if we can get a 200 response from - # the /-/healthy endpoint on port 9276. - # If we can't, don't send traffic to the pod - httpGet: - path: /-/healthy - port: 9276 - initialDelaySeconds: 1 - periodSeconds: 2 - failureThreshold: 2 - timeoutSeconds: 2 - volumes: - - name: source-code - hostPath: - path: /mnt/src - type: Directory - diff --git a/resources/manifests/warnet-rpc-statefulset.yaml b/resources/manifests/warnet-rpc-statefulset.yaml deleted file mode 100644 index 3d899ab62..000000000 --- a/resources/manifests/warnet-rpc-statefulset.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: rpc - namespace: warnet -spec: - serviceName: "rpc" - replicas: 1 - selector: - matchLabels: - io.kompose.service: rpc - template: - metadata: - labels: - io.kompose.service: rpc - spec: - containers: - - name: warnet-rpc - imagePullPolicy: Always - image: bitcoindevproject/warnet-rpc:latest - ports: - - containerPort: 9276 - diff --git a/resources/scripts/build-k8s-rpc.sh b/resources/scripts/build-k8s-rpc.sh deleted file mode 100755 index a61291291..000000000 --- a/resources/scripts/build-k8s-rpc.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# Run with e.g.: -# $ DOCKER_REGISTRY=bitcoindevproject/warnet-rpc TAG=0.1 LATEST=1 ./scripts/build-k8s-rpc.sh Dockerfile_prod - -# Fail on any step -set -ex - -# Create a new builder to enable building multi-platform images -BUILDER_NAME="warnet-rpc-builder" -docker buildx create --name "$BUILDER_NAME" --use - -# Read DOCKER_REGISTRY and TAG from the environment -: "${DOCKER_REGISTRY?Need to set DOCKER_REGISTRY}" -: "${TAG?Need to set TAG}" -: "${LATEST:=0}" - -# Architectures for building -ARCHS="linux/amd64,linux/arm64" - -# Read Dockerfile from the first argument -DOCKERFILE_PATH=$1 -if [[ ! -f "$DOCKERFILE_PATH" ]]; then - echo "Dockerfile does not exist at the specified path: $DOCKERFILE_PATH" - exit 1 -fi - -# Determine the image tags -IMAGE_FULL_NAME="$DOCKER_REGISTRY:$TAG" -TAGS="--tag $IMAGE_FULL_NAME" - -# If LATEST=1, add the latest tag -if [[ "$LATEST" -eq 1 ]]; then - LATEST_TAG_IMAGE="$DOCKER_REGISTRY:latest" - TAGS="$TAGS --tag $LATEST_TAG_IMAGE" -fi - -# Use Buildx to build the image for the specified architectures and tag it accordingly -docker buildx build --platform "$ARCHS" \ - --file "$DOCKERFILE_PATH" \ - --progress=plain \ - $TAGS \ - . --push - -docker buildx rm "$BUILDER_NAME" diff --git a/resources/scripts/deploy.sh b/resources/scripts/deploy.sh deleted file mode 100755 index f6d74da28..000000000 --- a/resources/scripts/deploy.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - - -if [ -z "${WAR_MANIFESTS+x}" ]; then - echo "WAR_MANIFESTS is unset. Please provide a path to warnet manifests." - exit 1 -fi - -# Function to check if warnet-rpc container is already running -check_warnet_rpc() { - if kubectl get pods --all-namespaces | grep -q "bitcoindevproject/warnet-rpc"; then - echo "warnet-rpc pod found" - exit 1 - fi -} - -# Deploy base configurations -kubectl apply -f "$WAR_MANIFESTS/namespace.yaml" -kubectl apply -f "$WAR_MANIFESTS/rbac-config.yaml" -kubectl apply -f "$WAR_MANIFESTS/warnet-rpc-service.yaml" - -# Deploy rpc server -if [ -n "${WAR_DEV+x}" ]; then # Dev mode selector - # Build image in local registry - docker build -t warnet/dev -f "$WAR_RPC/Dockerfile_dev" "$WAR_RPC" --load - if [ "$(kubectl config current-context)" = "docker-desktop" ]; then - sed "s?/mnt/src?$(pwd)?g" "$WAR_MANIFESTS/warnet-rpc-statefulset-dev.yaml" | kubectl apply -f - - else # assuming minikube - minikube image load warnet/dev - kubectl apply -f "$WAR_MANIFESTS/warnet-rpc-statefulset-dev.yaml" - fi -else - kubectl apply -f "$WAR_MANIFESTS/warnet-rpc-statefulset.yaml" -fi - -kubectl config set-context --current --namespace=warnet - -# Check for warnet-rpc container -check_warnet_rpc - -until kubectl get pod rpc-0 --namespace=warnet; do - echo "Waiting for server to find pod rpc-0..." - sleep 4 -done - -echo "⏲️ This could take a minute or so." -kubectl wait --for=condition=Ready --timeout=2m pod rpc-0 - -echo Done... diff --git a/resources/scripts/startd.sh b/resources/scripts/startd.sh deleted file mode 100755 index 84343510b..000000000 --- a/resources/scripts/startd.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -if [ $# -eq 0 ]; then - echo "Please provide a path as an argument." - exit 1 -fi -RPC_PATH="$1" - -docker build -t warnet/dev -f "$RPC_PATH/Dockerfile_rpc_dev src/warnet/templates/rpc" --load -kubectl apply -f "$RPC_PATH/namespace.yaml" -kubectl apply -f "$RPC_PATH/rbac-config.yaml" -kubectl apply -f "$RPC_PATH/warnet-rpc-service.yaml" -sed "s?/mnt/src?$(pwd)?g" "$RPC_PATH/warnet-rpc-statefulset-dev.yaml" | kubectl apply -f - -kubectl config set-context --current --namespace=warnet - -echo waiting for rpc to come online -kubectl wait --for=condition=Ready --timeout=2m pod rpc-0 - -echo Done... diff --git a/resources/scripts/stop.sh b/resources/scripts/stop.sh deleted file mode 100755 index 7ea07cd1e..000000000 --- a/resources/scripts/stop.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -# Delete namespaces -kubectl delete namespace warnet --ignore-not-found -kubectl delete namespace warnet-logging --ignore-not-found - -# Set the context to default namespace -kubectl config set-context --current --namespace=default - -# Delete minikube, if it exists -if command -v minikube &> /dev/null; then - minikube delete || true -fi diff --git a/src/warnet/cli/cluster.py b/src/warnet/cli/cluster.py deleted file mode 100644 index f9a26f230..000000000 --- a/src/warnet/cli/cluster.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import subprocess -import sys -from importlib.resources import files - -import click - -from .util import run_command - -MANIFEST_PATH = files("manifests") -RPC_PATH = files("images").joinpath("rpc") - -SCRIPTS_PATH = files("scripts") -START_SCRIPT = SCRIPTS_PATH / "start.sh" -DEPLOY_SCRIPT = SCRIPTS_PATH / "deploy.sh" -INSTALL_LOGGING_SCRIPT = SCRIPTS_PATH / "install_logging.sh" -CONNECT_LOGGING_SCRIPT = SCRIPTS_PATH / "connect_logging.sh" - - -@click.group(name="cluster", chain=True) -def cluster(): - """Start, configure and stop a warnet k8s cluster\n - \b - Supports chaining, e.g: - warcli cluster deploy-logging connect-logging - """ - pass - - -@cluster.command() -@click.option("--clean", is_flag=True, help="Remove configuration files") -def setup_minikube(clean): - """Configure a local minikube cluster""" - memory = click.prompt( - "How much RAM should we assign to the minikube cluster? (MB)", - type=int, - default=4000, - ) - cpu = click.prompt( - "How many CPUs should we assign to the minikube cluster?", type=int, default=4 - ) - env = {"WAR_MEM": str(memory), "WAR_CPU": str(cpu), "WAR_RPC": RPC_PATH} - run_command(SCRIPTS_PATH / "setup_minikube.sh", stream_output=True, env=env) - - -# TODO: Add a --dev flag to this -@cluster.command() -@click.option("--dev", is_flag=True, help="Remove configuration files") -def deploy(dev: bool): - """Deploy Warnet using the current kubectl-configured cluster""" - env = {"WAR_MANIFESTS": str(MANIFEST_PATH), "WAR_RPC": RPC_PATH} - if dev: - env["WAR_DEV"] = 1 - res = run_command(SCRIPTS_PATH / "deploy.sh", stream_output=True, env=env) - if res: - _port_start_internal() - - -@cluster.command() -def teardown(): - """Stop the warnet server and tear down the cluster""" - run_command(SCRIPTS_PATH / "stop.sh", stream_output=True) - _port_stop_internal() - - -@cluster.command() -def deploy_logging(): - """Deploy logging configurations to the cluster using helm""" - run_command(SCRIPTS_PATH / "install_logging.sh", stream_output=True) - - -@cluster.command() -def connect_logging(): - """Connect kubectl to cluster logging""" - run_command(CONNECT_LOGGING_SCRIPT, stream_output=True) - - -def is_windows(): - return sys.platform.startswith("win") - - -def run_detached_process(command): - if is_windows(): - # For Windows, use CREATE_NEW_PROCESS_GROUP and DETACHED_PROCESS - subprocess.Popen( - command, - shell=True, - stdin=None, - stdout=None, - stderr=None, - close_fds=True, - creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.DETACHED_PROCESS, - ) - else: - # For Unix-like systems, use nohup and redirect output - command = f"nohup {command} > /dev/null 2>&1 &" - subprocess.Popen(command, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) - - print(f"Started detached process: {command}") - - -def _port_start_internal(): - command = "kubectl port-forward svc/rpc 9276:9276" - run_detached_process(command) - print( - "Port forwarding on port 9276 started in the background. Use 'warcli' (or 'kubectl') to manage the warnet." - ) - - -@cluster.command() -def port_start(): - """Port forward (runs as a detached process)""" - _port_start_internal() - - -def _port_stop_internal(): - if is_windows(): - os.system("taskkill /F /IM kubectl.exe") - else: - os.system("pkill -f 'kubectl port-forward svc/rpc 9276:9276'") - print("Port forwarding stopped.") - - -@cluster.command() -def port_stop(): - """Stop the port forwarding process""" - _port_stop_internal() diff --git a/src/warnet/cli/util.py b/src/warnet/cli/util.py index 18ce7c004..d57ab5c55 100644 --- a/src/warnet/cli/util.py +++ b/src/warnet/cli/util.py @@ -19,51 +19,6 @@ SRC_DIR = files("warnet") -def run_command(command, stream_output=False, env=None, return_output=False): - # Merge the current environment with the provided env - full_env = os.environ.copy() - if env: - # Convert all env values to strings (only a safeguard) - env = {k: str(v) for k, v in env.items()} - full_env.update(env) - - if stream_output: - process = subprocess.Popen( - ["/bin/bash", "-c", command], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - bufsize=1, - universal_newlines=True, - env=full_env, - ) - output = [] - for line in iter(process.stdout.readline, ""): - print(line, end="") - output.append(line) - process.stdout.close() - return_code = process.wait() - if return_code != 0: - print(f"Command failed with return code {return_code}") - return False if not return_output else "".join(output) - return True if not return_output else "".join(output) - else: - result = subprocess.run( - command, - shell=True, - capture_output=True, - text=True, - executable="/bin/bash", - env=full_env, - ) - if result.returncode != 0: - print(f"Error: {result.stderr}") - return False if not return_output else result.stderr - if not return_output: - print(result.stdout) - return True if not return_output else result.stdout - - def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): try: # Use nx.MultiDiGraph() so we get directed edges (source->target) From 3058844c48515ca7c4cf335b4f7964e32f73411e Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 12:28:09 +0200 Subject: [PATCH 033/710] more deletes --- src/warnet/cli/rpc.py | 36 ------------------------------------ src/warnet/cli/scenarios.py | 4 ++-- 2 files changed, 2 insertions(+), 38 deletions(-) delete mode 100644 src/warnet/cli/rpc.py diff --git a/src/warnet/cli/rpc.py b/src/warnet/cli/rpc.py deleted file mode 100644 index 4ecd6bada..000000000 --- a/src/warnet/cli/rpc.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging -import sys -from typing import Any - -import requests -from jsonrpcclient.requests import request -from jsonrpcclient.responses import Error, Ok, parse - -WARNET_SERVER_PORT = 9276 - - -class JSONRPCException(Exception): - def __init__(self, code, message): - try: - errmsg = f"{code} {message}" - except (KeyError, TypeError): - errmsg = "" - super().__init__(errmsg) - - -def rpc_call(rpc_method, params: dict[str, Any] | tuple[Any, ...] | None): - payload = request(rpc_method, params) - url = f"http://127.0.0.1:{WARNET_SERVER_PORT}/api" - try: - response = requests.post(url, json=payload) - except ConnectionRefusedError as e: - print(f"Error connecting to {url}. Is the server running and using matching API URL?") - logging.debug(e) - return - match parse(response.json()): - case Ok(result, _): - return result - case Error(code, message, _, _): - print(f"{code}: {message}") - sys.exit(1) - # raise JSONRPCException(code, message) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index a93345f8a..f063cf018 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -14,7 +14,6 @@ from rich.table import Table from warnet import scenarios as SCENARIOS from .k8s import apply_kubernetes_yaml, create_namespace, get_mission -from .rpc import rpc_call @click.group(name="scenarios") @@ -167,7 +166,8 @@ def run_file(scenario_path, network, additional_args, name=""): "additional_args": additional_args, "network": network, } - print(rpc_call("scenarios_run_file", params)) + # TODO + # print(rpc_call("scenarios_run_file", params)) @scenarios.command() From 53e72e8ecf0827fe2c6ec62cb09dac6a0263e12f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 12:37:36 +0200 Subject: [PATCH 034/710] move index back to label --- src/warnet/cli/network.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 12d396228..b0378d0d7 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -76,9 +76,8 @@ def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: metadata={ "name": f"warnet-tank-{node}", "namespace": "warnet", - "labels": {"app": "warnet", "mission": "tank"}, + "labels": {"app": "warnet", "mission": "tank", "index": str(node)}, "annotations": { - "index": node, "data": json.dumps(data) } }, From ae7c979394d434833726005d72f1b46ea05bf67f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 12:38:31 +0200 Subject: [PATCH 035/710] fix network connected --- src/warnet/cli/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index b0378d0d7..b5fbb348b 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -238,7 +238,7 @@ def connected(): edges = get_edges() for tank in tanks: # Get actual - index = tank.metadata.annotations["index"] + index = tank.metadata.labels["index"] peerinfo = json.loads(_rpc(int(index), "getpeerinfo", "", "warnet")) manuals = 0 for peer in peerinfo: From 25ee58d9e54cce9751a6a56fc2241e315d5e5e62 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 15:09:21 +0200 Subject: [PATCH 036/710] extract test_framework lib --- resources/images/commander/Dockerfile | 3 ++- .../images/commander/src => src}/test_framework/__init__.py | 0 .../images/commander/src => src}/test_framework/address.py | 0 .../images/commander/src => src}/test_framework/authproxy.py | 0 {resources/images/commander/src => src}/test_framework/bdb.py | 0 .../src => src}/test_framework/bip340_test_vectors.csv | 0 .../images/commander/src => src}/test_framework/blockfilter.py | 0 .../images/commander/src => src}/test_framework/blocktools.py | 0 .../images/commander/src => src}/test_framework/coverage.py | 0 .../images/commander/src => src}/test_framework/descriptors.py | 0 .../images/commander/src => src}/test_framework/ellswift.py | 0 .../test_framework/ellswift_decode_test_vectors.csv | 0 {resources/images/commander/src => src}/test_framework/key.py | 0 .../images/commander/src => src}/test_framework/messages.py | 0 .../images/commander/src => src}/test_framework/muhash.py | 0 .../images/commander/src => src}/test_framework/netutil.py | 0 {resources/images/commander/src => src}/test_framework/p2p.py | 0 {resources/images/commander/src => src}/test_framework/psbt.py | 0 .../images/commander/src => src}/test_framework/ripemd160.py | 0 .../images/commander/src => src}/test_framework/script.py | 0 .../images/commander/src => src}/test_framework/script_util.py | 0 .../images/commander/src => src}/test_framework/secp256k1.py | 0 .../images/commander/src => src}/test_framework/segwit_addr.py | 0 .../images/commander/src => src}/test_framework/siphash.py | 0 .../images/commander/src => src}/test_framework/socks5.py | 0 .../commander/src => src}/test_framework/test_framework.py | 0 .../images/commander/src => src}/test_framework/test_node.py | 0 .../images/commander/src => src}/test_framework/test_shell.py | 0 {resources/images/commander/src => src}/test_framework/util.py | 0 .../images/commander/src => src}/test_framework/wallet.py | 0 .../images/commander/src => src}/test_framework/wallet_util.py | 0 .../src => src}/test_framework/xswiftec_inv_test_vectors.csv | 0 32 files changed, 2 insertions(+), 1 deletion(-) rename {resources/images/commander/src => src}/test_framework/__init__.py (100%) rename {resources/images/commander/src => src}/test_framework/address.py (100%) rename {resources/images/commander/src => src}/test_framework/authproxy.py (100%) rename {resources/images/commander/src => src}/test_framework/bdb.py (100%) rename {resources/images/commander/src => src}/test_framework/bip340_test_vectors.csv (100%) rename {resources/images/commander/src => src}/test_framework/blockfilter.py (100%) rename {resources/images/commander/src => src}/test_framework/blocktools.py (100%) rename {resources/images/commander/src => src}/test_framework/coverage.py (100%) rename {resources/images/commander/src => src}/test_framework/descriptors.py (100%) rename {resources/images/commander/src => src}/test_framework/ellswift.py (100%) rename {resources/images/commander/src => src}/test_framework/ellswift_decode_test_vectors.csv (100%) rename {resources/images/commander/src => src}/test_framework/key.py (100%) rename {resources/images/commander/src => src}/test_framework/messages.py (100%) rename {resources/images/commander/src => src}/test_framework/muhash.py (100%) rename {resources/images/commander/src => src}/test_framework/netutil.py (100%) rename {resources/images/commander/src => src}/test_framework/p2p.py (100%) rename {resources/images/commander/src => src}/test_framework/psbt.py (100%) rename {resources/images/commander/src => src}/test_framework/ripemd160.py (100%) rename {resources/images/commander/src => src}/test_framework/script.py (100%) rename {resources/images/commander/src => src}/test_framework/script_util.py (100%) rename {resources/images/commander/src => src}/test_framework/secp256k1.py (100%) rename {resources/images/commander/src => src}/test_framework/segwit_addr.py (100%) rename {resources/images/commander/src => src}/test_framework/siphash.py (100%) rename {resources/images/commander/src => src}/test_framework/socks5.py (100%) rename {resources/images/commander/src => src}/test_framework/test_framework.py (100%) rename {resources/images/commander/src => src}/test_framework/test_node.py (100%) rename {resources/images/commander/src => src}/test_framework/test_shell.py (100%) rename {resources/images/commander/src => src}/test_framework/util.py (100%) rename {resources/images/commander/src => src}/test_framework/wallet.py (100%) rename {resources/images/commander/src => src}/test_framework/wallet_util.py (100%) rename {resources/images/commander/src => src}/test_framework/xswiftec_inv_test_vectors.csv (100%) diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile index c9a3510bf..489c19793 100644 --- a/resources/images/commander/Dockerfile +++ b/resources/images/commander/Dockerfile @@ -5,7 +5,8 @@ FROM python:3.12-slim #RUN pip install --no-cache-dir prometheus_client # Prometheus exporter script for bitcoind -COPY src / +COPY resources/images/commander/src / +COPY src/test_framework /test_framework # -u: force the stdout and stderr streams to be unbuffered ENTRYPOINT ["python", "-u", "/scenario.py"] diff --git a/resources/images/commander/src/test_framework/__init__.py b/src/test_framework/__init__.py similarity index 100% rename from resources/images/commander/src/test_framework/__init__.py rename to src/test_framework/__init__.py diff --git a/resources/images/commander/src/test_framework/address.py b/src/test_framework/address.py similarity index 100% rename from resources/images/commander/src/test_framework/address.py rename to src/test_framework/address.py diff --git a/resources/images/commander/src/test_framework/authproxy.py b/src/test_framework/authproxy.py similarity index 100% rename from resources/images/commander/src/test_framework/authproxy.py rename to src/test_framework/authproxy.py diff --git a/resources/images/commander/src/test_framework/bdb.py b/src/test_framework/bdb.py similarity index 100% rename from resources/images/commander/src/test_framework/bdb.py rename to src/test_framework/bdb.py diff --git a/resources/images/commander/src/test_framework/bip340_test_vectors.csv b/src/test_framework/bip340_test_vectors.csv similarity index 100% rename from resources/images/commander/src/test_framework/bip340_test_vectors.csv rename to src/test_framework/bip340_test_vectors.csv diff --git a/resources/images/commander/src/test_framework/blockfilter.py b/src/test_framework/blockfilter.py similarity index 100% rename from resources/images/commander/src/test_framework/blockfilter.py rename to src/test_framework/blockfilter.py diff --git a/resources/images/commander/src/test_framework/blocktools.py b/src/test_framework/blocktools.py similarity index 100% rename from resources/images/commander/src/test_framework/blocktools.py rename to src/test_framework/blocktools.py diff --git a/resources/images/commander/src/test_framework/coverage.py b/src/test_framework/coverage.py similarity index 100% rename from resources/images/commander/src/test_framework/coverage.py rename to src/test_framework/coverage.py diff --git a/resources/images/commander/src/test_framework/descriptors.py b/src/test_framework/descriptors.py similarity index 100% rename from resources/images/commander/src/test_framework/descriptors.py rename to src/test_framework/descriptors.py diff --git a/resources/images/commander/src/test_framework/ellswift.py b/src/test_framework/ellswift.py similarity index 100% rename from resources/images/commander/src/test_framework/ellswift.py rename to src/test_framework/ellswift.py diff --git a/resources/images/commander/src/test_framework/ellswift_decode_test_vectors.csv b/src/test_framework/ellswift_decode_test_vectors.csv similarity index 100% rename from resources/images/commander/src/test_framework/ellswift_decode_test_vectors.csv rename to src/test_framework/ellswift_decode_test_vectors.csv diff --git a/resources/images/commander/src/test_framework/key.py b/src/test_framework/key.py similarity index 100% rename from resources/images/commander/src/test_framework/key.py rename to src/test_framework/key.py diff --git a/resources/images/commander/src/test_framework/messages.py b/src/test_framework/messages.py similarity index 100% rename from resources/images/commander/src/test_framework/messages.py rename to src/test_framework/messages.py diff --git a/resources/images/commander/src/test_framework/muhash.py b/src/test_framework/muhash.py similarity index 100% rename from resources/images/commander/src/test_framework/muhash.py rename to src/test_framework/muhash.py diff --git a/resources/images/commander/src/test_framework/netutil.py b/src/test_framework/netutil.py similarity index 100% rename from resources/images/commander/src/test_framework/netutil.py rename to src/test_framework/netutil.py diff --git a/resources/images/commander/src/test_framework/p2p.py b/src/test_framework/p2p.py similarity index 100% rename from resources/images/commander/src/test_framework/p2p.py rename to src/test_framework/p2p.py diff --git a/resources/images/commander/src/test_framework/psbt.py b/src/test_framework/psbt.py similarity index 100% rename from resources/images/commander/src/test_framework/psbt.py rename to src/test_framework/psbt.py diff --git a/resources/images/commander/src/test_framework/ripemd160.py b/src/test_framework/ripemd160.py similarity index 100% rename from resources/images/commander/src/test_framework/ripemd160.py rename to src/test_framework/ripemd160.py diff --git a/resources/images/commander/src/test_framework/script.py b/src/test_framework/script.py similarity index 100% rename from resources/images/commander/src/test_framework/script.py rename to src/test_framework/script.py diff --git a/resources/images/commander/src/test_framework/script_util.py b/src/test_framework/script_util.py similarity index 100% rename from resources/images/commander/src/test_framework/script_util.py rename to src/test_framework/script_util.py diff --git a/resources/images/commander/src/test_framework/secp256k1.py b/src/test_framework/secp256k1.py similarity index 100% rename from resources/images/commander/src/test_framework/secp256k1.py rename to src/test_framework/secp256k1.py diff --git a/resources/images/commander/src/test_framework/segwit_addr.py b/src/test_framework/segwit_addr.py similarity index 100% rename from resources/images/commander/src/test_framework/segwit_addr.py rename to src/test_framework/segwit_addr.py diff --git a/resources/images/commander/src/test_framework/siphash.py b/src/test_framework/siphash.py similarity index 100% rename from resources/images/commander/src/test_framework/siphash.py rename to src/test_framework/siphash.py diff --git a/resources/images/commander/src/test_framework/socks5.py b/src/test_framework/socks5.py similarity index 100% rename from resources/images/commander/src/test_framework/socks5.py rename to src/test_framework/socks5.py diff --git a/resources/images/commander/src/test_framework/test_framework.py b/src/test_framework/test_framework.py similarity index 100% rename from resources/images/commander/src/test_framework/test_framework.py rename to src/test_framework/test_framework.py diff --git a/resources/images/commander/src/test_framework/test_node.py b/src/test_framework/test_node.py similarity index 100% rename from resources/images/commander/src/test_framework/test_node.py rename to src/test_framework/test_node.py diff --git a/resources/images/commander/src/test_framework/test_shell.py b/src/test_framework/test_shell.py similarity index 100% rename from resources/images/commander/src/test_framework/test_shell.py rename to src/test_framework/test_shell.py diff --git a/resources/images/commander/src/test_framework/util.py b/src/test_framework/util.py similarity index 100% rename from resources/images/commander/src/test_framework/util.py rename to src/test_framework/util.py diff --git a/resources/images/commander/src/test_framework/wallet.py b/src/test_framework/wallet.py similarity index 100% rename from resources/images/commander/src/test_framework/wallet.py rename to src/test_framework/wallet.py diff --git a/resources/images/commander/src/test_framework/wallet_util.py b/src/test_framework/wallet_util.py similarity index 100% rename from resources/images/commander/src/test_framework/wallet_util.py rename to src/test_framework/wallet_util.py diff --git a/resources/images/commander/src/test_framework/xswiftec_inv_test_vectors.csv b/src/test_framework/xswiftec_inv_test_vectors.csv similarity index 100% rename from resources/images/commander/src/test_framework/xswiftec_inv_test_vectors.csv rename to src/test_framework/xswiftec_inv_test_vectors.csv From 10d3774f8b70034866a7b0bed2d9713fdff2c0ae Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 15:09:46 +0200 Subject: [PATCH 037/710] cli: enable 'bitcoin messages' --- src/warnet/cli/bitcoin.py | 250 +++++++++++++++++++++++++++++++++----- 1 file changed, 218 insertions(+), 32 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index dff595156..ceb9c2aae 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -1,8 +1,14 @@ import json +import os import re +from datetime import datetime +from io import BytesIO import click +from test_framework.messages import ser_uint256 +from test_framework.p2p import MESSAGEMAP + from .k8s import run_command @@ -11,26 +17,6 @@ def bitcoin(): """Control running bitcoin nodes""" -@bitcoin.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.argument("method", type=str) -@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments -@click.option("--network", default="warnet", show_default=True) -def rpc(node, method, params, network): - """ - Call bitcoin-cli [params] on in [network] - """ - print(_rpc(node, method, params, network)) - - -def _rpc(node, method, params, network): - if params: - cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" - else: - cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" - return run_command(cmd) - - @bitcoin.command() @click.argument("node", type=int, required=True) @click.option("--network", default="warnet", show_default=True) @@ -42,18 +28,6 @@ def debug_log(node, network): print(run_command(cmd)) -# @bitcoin.command() -# @click.argument("node_a", type=int, required=True) -# @click.argument("node_b", type=int, required=True) -# @click.option("--network", default="warnet", show_default=True) -# def messages(node_a, node_b, network): -# """ -# Fetch messages sent between and in [network] -# """ -# print(rpc_call("tank_messages", {"network": network, "node_a": node_a, "node_b": node_b})) -# -# - @bitcoin.command() @click.argument("pattern", type=str, required=True) @click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) @@ -124,3 +98,215 @@ def grep_logs(pattern, network, show_k8s_timestamps, no_sort): print(f"{pod_name}: {log_entry}") return matching_logs + + +@bitcoin.command() +@click.argument("node_a", type=int, required=True) +@click.argument("node_b", type=int, required=True) +@click.option("--network", default="regtest", show_default=True) +def messages(node_a, node_b, network): + """ + Fetch messages sent between and in [network] + """ + try: + # Get the messages + messages = get_messages(node_a, node_b, network) + + if not messages: + print(f"No messages found between {node_a} and {node_b}") + return + + # Process and print messages + for message in messages: + if not (message.get("time") and isinstance(message["time"], (int, float))): + continue + + timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime( + "%Y-%m-%d %H:%M:%S" + ) + direction = ">>>" if message.get("outbound", False) else "<<<" + msgtype = message.get("msgtype", "") + body_dict = message.get("body", {}) + + if not isinstance(body_dict, dict): + continue + + body_str = ", ".join(f"{key}: {value}" for key, value in body_dict.items()) + print(f"{timestamp} {direction} {msgtype} {body_str}") + + except Exception as e: + print(f"Error fetching messages between nodes {node_a} and {node_b}: {e}") + + +@bitcoin.command(context_settings={"ignore_unknown_options": True}) +@click.argument("node", type=int) +@click.argument("method", type=str) +@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments +@click.option("--network", default="warnet", show_default=True) +def rpc(node, method, params, network): + """ + Call bitcoin-cli [params] on in [network] + """ + print(_rpc(node, method, params, network)) + + +def get_messages(node_a, node_b, network): + """ + Fetch messages from the message capture files + """ + subdir = "" if network == "main" else f"{network}/" + base_dir = f"/root/.bitcoin/{subdir}message_capture" + + # Get the IP of node_b + cmd = f"kubectl get pod warnet-tank-{node_b} -o jsonpath='{{.status.podIP}}'" + node_b_ip = run_command(cmd).strip() + + # Get the service IP of node_b + cmd = f"kubectl get service warnet-tank-{node_b}-service -o jsonpath='{{.spec.clusterIP}}'" + node_b_service_ip = run_command(cmd).strip() + + # List directories in the message capture folder + cmd = f"kubectl exec warnet-tank-{node_a} -- ls {base_dir}" + dirs = run_command(cmd).splitlines() + + messages = [] + + for dir_name in dirs: + if node_b_ip in dir_name or node_b_service_ip in dir_name: + for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: + file_path = f"{base_dir}/{dir_name}/{file}" + + # Fetch the file contents from the container + cmd = f"kubectl exec warnet-tank-{node_a} -- cat {file_path}" + import subprocess + + blob = subprocess.run( + cmd, shell=True, capture_output=True, executable="/bin/bash" + ).stdout + + # Parse the blob + json = parse_raw_messages(blob, outbound) + messages = messages + json + + messages.sort(key=lambda x: x["time"]) + return messages + + +# This function is a hacked-up copy of process_file() from +# Bitcoin Core contrib/message-capture/message-capture-parser.py +def parse_raw_messages(blob, outbound): + TIME_SIZE = 8 + LENGTH_SIZE = 4 + MSGTYPE_SIZE = 12 + + messages = [] + offset = 0 + while True: + # Read the Header + header_len = TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE + tmp_header_raw = blob[offset : offset + header_len] + + offset = offset + header_len + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int + msgtype = tmp_header.read(MSGTYPE_SIZE).split(b"\x00", 1)[0] # type: bytes + length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int + + # Start converting the message to a dictionary + msg_dict = {} + msg_dict["outbound"] = outbound + msg_dict["time"] = time + msg_dict["size"] = length # "size" is less readable here, but more readable in the output + + msg_ser = BytesIO(blob[offset : offset + length]) + offset = offset + length + + # Determine message type + if msgtype not in MESSAGEMAP: + # Unrecognized message type + try: + msgtype_tmp = msgtype.decode() + if not msgtype_tmp.isprintable(): + raise UnicodeDecodeError + msg_dict["msgtype"] = msgtype_tmp + except UnicodeDecodeError: + msg_dict["msgtype"] = "UNREADABLE" + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unrecognized message type." + messages.append(msg_dict) + # print(f"WARNING - Unrecognized message type {msgtype}", file=sys.stderr) + continue + + # Deserialize the message + msg = MESSAGEMAP[msgtype]() + msg_dict["msgtype"] = msgtype.decode() + + try: + msg.deserialize(msg_ser) + except KeyboardInterrupt: + raise + except Exception: + # Unable to deserialize message body + msg_ser.seek(0, os.SEEK_SET) + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unable to deserialize message." + messages.append(msg_dict) + # print("WARNING - Unable to deserialize message", file=sys.stderr) + continue + + # Convert body of message into a jsonable object + if length: + msg_dict["body"] = to_jsonable(msg) + messages.append(msg_dict) + return messages + + +def to_jsonable(obj): + HASH_INTS = [ + "blockhash", + "block_hash", + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "hashstop", + "prev_header", + "sha256", + "stop_hash", + ] + + HASH_INT_VECTORS = [ + "hashes", + "headers", + "vHave", + "vHash", + ] + + if hasattr(obj, "__dict__"): + return obj.__dict__ + elif hasattr(obj, "__slots__"): + ret = {} # type: Any + for slot in obj.__slots__: + val = getattr(obj, slot, None) + if slot in HASH_INTS and isinstance(val, int): + ret[slot] = ser_uint256(val).hex() + elif slot in HASH_INT_VECTORS and all(isinstance(a, int) for a in val): + ret[slot] = [ser_uint256(a).hex() for a in val] + else: + ret[slot] = to_jsonable(val) + return ret + elif isinstance(obj, list): + return [to_jsonable(a) for a in obj] + elif isinstance(obj, bytes): + return obj.hex() + else: + return obj + + +def _rpc(node, method, params, network): + if params: + cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" + else: + cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" + return run_command(cmd) From 4dd3d6a7efbc56690373f44f8985a851b27484a5 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 15:22:36 +0200 Subject: [PATCH 038/710] rpc_test passing --- src/warnet/cli/bitcoin.py | 51 ++-- src/warnet/cli/network.py | 43 ++- src/warnet/cli/scenarios.py | 11 +- src/warnet/logging_config.json | 6 - src/warnet/utils.py | 479 +++++++++++++++++++++++++++++++ test/data/12_node_ring.graphml | 14 +- test/data/build_v24_test.graphml | 2 - test/data/ln.graphml | 5 - test/data/services.graphml | 2 +- test/rpc_test.py | 1 - test/test_base.py | 76 +---- 11 files changed, 552 insertions(+), 138 deletions(-) create mode 100644 src/warnet/utils.py diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index ceb9c2aae..c7d0166f4 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -17,12 +17,30 @@ def bitcoin(): """Control running bitcoin nodes""" +@bitcoin.command(context_settings={"ignore_unknown_options": True}) +@click.argument("node", type=int) +@click.argument("method", type=str) +@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments +def rpc(node, method, params): + """ + Call bitcoin-cli [params] on + """ + print(_rpc(node, method, params)) + + +def _rpc(node, method, params): + if params: + cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" + else: + cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" + return run_command(cmd) + + @bitcoin.command() @click.argument("node", type=int, required=True) -@click.option("--network", default="warnet", show_default=True) -def debug_log(node, network): +def debug_log(node): """ - Fetch the Bitcoin Core debug log from in [network] + Fetch the Bitcoin Core debug log from """ cmd = f"kubectl logs warnet-tank-{node}" print(run_command(cmd)) @@ -32,14 +50,13 @@ def debug_log(node, network): @click.argument("pattern", type=str, required=True) @click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) @click.option("--no-sort", is_flag=True, default=False, show_default=True) -@click.option("--network", default="warnet", show_default=True) -def grep_logs(pattern, network, show_k8s_timestamps, no_sort): +def grep_logs(pattern, show_k8s_timestamps, no_sort): """ Grep combined bitcoind logs using regex """ # Get all pods in the namespace - command = f"kubectl get pods -n {network} -o json" + command = f"kubectl get pods -n warnet -o json" pods_json = run_command(command) if pods_json is False: @@ -68,7 +85,7 @@ def grep_logs(pattern, network, show_k8s_timestamps, no_sort): continue # Get logs from the specific container - command = f"kubectl logs {pod_name} -c {container_name} -n {network} --timestamps" + command = f"kubectl logs {pod_name} -c {container_name} -n warnet --timestamps" logs = run_command(command) if logs is not False: @@ -138,18 +155,6 @@ def messages(node_a, node_b, network): print(f"Error fetching messages between nodes {node_a} and {node_b}: {e}") -@bitcoin.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.argument("method", type=str) -@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments -@click.option("--network", default="warnet", show_default=True) -def rpc(node, method, params, network): - """ - Call bitcoin-cli [params] on in [network] - """ - print(_rpc(node, method, params, network)) - - def get_messages(node_a, node_b, network): """ Fetch messages from the message capture files @@ -302,11 +307,3 @@ def to_jsonable(obj): return obj.hex() else: return obj - - -def _rpc(node, method, params, network): - if params: - cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" - else: - cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" - return run_command(cmd) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index b5fbb348b..9b0f56091 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -187,10 +187,9 @@ def setup_logging_helm() -> bool: @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) -@click.option("--network", default="warnet", show_default=True) @click.option("--logging/--no-logging", default=False) -def start(graph_file: Path, logging: bool, network: str): - """Start a warnet with topology loaded from a into [network]""" +def start(graph_file: Path, logging: bool): + """Start a warnet with topology loaded from a """ graph = read_graph_file(graph_file) kubernetes_yaml = generate_kubernetes_yaml(graph) @@ -200,27 +199,26 @@ def start(graph_file: Path, logging: bool, network: str): try: if deploy_base_configurations() and apply_kubernetes_yaml(temp_file_path): - print(f"Warnet '{network}' started successfully.") - if not set_kubectl_context(network): + print(f"Warnet network started successfully.") + if not set_kubectl_context("warnet"): print( "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." ) if logging and not setup_logging_helm(): print("Failed to install Helm charts.") else: - print(f"Failed to start warnet '{network}'.") + print(f"Failed to start warnet network.") finally: Path(temp_file_path).unlink() @network.command() -@click.option("--network", default="warnet", show_default=True) -def down(network: str): - """Bring down a running warnet named [network]""" - if delete_namespace(network) and delete_namespace("warnet-logging"): - print(f"Warnet '{network}' has been successfully brought down and the namespaces deleted.") +def down(): + """Bring down a running warnet""" + if delete_namespace("warnet") and delete_namespace("warnet-logging"): + print(f"Warnet network has been successfully brought down and the namespaces deleted.") else: - print(f"Failed to bring down warnet '{network}' or delete the namespaces.") + print(f"Failed to bring down warnet network or delete the namespaces.") @network.command() @@ -234,12 +232,15 @@ def logs(follow: bool): @network.command() def connected(): """Determine if all p2p conenctions defined in graph are established""" + print(_connected()) + +def _connected(): tanks = get_mission("tank") edges = get_edges() for tank in tanks: # Get actual index = tank.metadata.labels["index"] - peerinfo = json.loads(_rpc(int(index), "getpeerinfo", "", "warnet")) + peerinfo = json.loads(_rpc(int(index), "getpeerinfo", "")) manuals = 0 for peer in peerinfo: if peer["connection_type"] == "manual": @@ -255,6 +256,22 @@ def connected(): print("Network connected") return True + +@network.command() +def status(): + """Return pod status""" + # TODO: make it a pretty table + print(_status()) + +def _status(): + tanks = get_mission("tank") + stats = [] + for tank in tanks: + status = {"tank_index": tank.metadata.labels["index"], "bitcoin_status": tank.status.phase.lower()} + stats.append(status) + return stats + + @network.command() @click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) @click.option("--output", "-o", default="warnet-deployment.yaml", help="Output YAML file") diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index f063cf018..438152c11 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -44,10 +44,9 @@ def available(): @scenarios.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario", type=str) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -@click.option("--network", default="warnet", show_default=True) -def run(scenario, network, additional_args): +def run(scenario, additional_args): """ - Run from the Warnet Test Framework on [network] with optional arguments + Run from the Warnet Test Framework with optional arguments """ # Use importlib.resources to get the scenario path @@ -147,10 +146,9 @@ def run(scenario, network, additional_args): @click.argument("scenario_path", type=str) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) @click.option("--name", type=str) -@click.option("--network", default="warnet", show_default=True) -def run_file(scenario_path, network, additional_args, name=""): +def run_file(scenario_path, additional_args, name=""): """ - Run from the Warnet Test Framework on [network] with optional arguments + Run from the Warnet Test Framework with optional arguments """ if not scenario_path.endswith(".py"): print("Error. Currently only python scenarios are supported") @@ -164,7 +162,6 @@ def run_file(scenario_path, network, additional_args, name=""): "scenario_base64": scenario_base64, "scenario_name": scenario_name, "additional_args": additional_args, - "network": network, } # TODO # print(rpc_call("scenarios_run_file", params)) diff --git a/src/warnet/logging_config.json b/src/warnet/logging_config.json index 9ab9cabca..3aec3ad34 100644 --- a/src/warnet/logging_config.json +++ b/src/warnet/logging_config.json @@ -11,17 +11,11 @@ "datefmt": "%Y-%m-%d %H:%M:%S" } }, - "filters": { - "no_errors": { - "()": "warnet.utils.NonErrorFilter" - } - }, "handlers": { "stdout": { "class": "logging.StreamHandler", "level": "DEBUG", "formatter": "simple", - "filters": ["no_errors"], "stream": "ext://sys.stdout" }, "stderr": { diff --git a/src/warnet/utils.py b/src/warnet/utils.py new file mode 100644 index 000000000..23dad566b --- /dev/null +++ b/src/warnet/utils.py @@ -0,0 +1,479 @@ +import functools +import ipaddress +import json +import logging +import os +import random +import re +import stat +import subprocess +import sys +import time +from io import BytesIO +from pathlib import Path + +import networkx as nx +from jsonschema import validate +from test_framework.messages import ser_uint256 +from test_framework.p2p import MESSAGEMAP +from warnet import SRC_DIR + +logger = logging.getLogger("utils") + + +SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] +DEFAULT_TAG = SUPPORTED_TAGS[0] +WEIGHTED_TAGS = [ + tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) +] + + +class NonErrorFilter(logging.Filter): + def filter(self, record: logging.LogRecord) -> bool | logging.LogRecord: + return record.levelno <= logging.INFO + + +def exponential_backoff(max_retries=5, base_delay=1, max_delay=32): + """ + A decorator for exponential backoff. + + Parameters: + - max_retries: Maximum number of retries before giving up. + - base_delay: Initial delay in seconds. + - max_delay: Maximum delay in seconds. + """ + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + retries = 0 + while retries < max_retries: + try: + return func(*args, **kwargs) + except Exception as e: + error_msg = str(e).replace("\n", " ").replace("\t", " ") + logger.error(f"rpc error: {error_msg}") + retries += 1 + if retries == max_retries: + raise e + delay = min(base_delay * (2**retries), max_delay) + logger.warning(f"exponential_backoff: retry in {delay} seconds...") + time.sleep(delay) + + return wrapper + + return decorator + + +def handle_json(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + result = "" + try: + result = func(*args, **kwargs) + logger.debug(f"{result=:}") + if isinstance(result, dict): + return result + parsed_result = json.loads(result) + return parsed_result + except json.JSONDecodeError as e: + logging.error( + f"JSON parsing error in {func.__name__}: {e}. Undecodable result: {result}" + ) + raise + except Exception as e: + logger.error(f"Error in {func.__name__}: {e}") + raise + + return wrapper + + +def get_architecture(): + """ + Get the architecture of the machine. + :return: The architecture of the machine or None if an error occurred + """ + result = subprocess.run(["uname", "-m"], stdout=subprocess.PIPE) + arch = result.stdout.decode("utf-8").strip() + if arch == "x86_64": + arch = "amd64" + if arch is None: + raise Exception("Failed to detect architecture.") + return arch + + +def generate_ipv4_addr(subnet): + """ + Generate a valid random IPv4 address within the given subnet. + + :param subnet: Subnet in CIDR notation (e.g., '100.0.0.0/8') + :return: Random IP address within the subnet + """ + reserved_ips = [ + "0.0.0.0/8", + "10.0.0.0/8", + "100.64.0.0/10", + "127.0.0.0/8", + "169.254.0.0/16", + "172.16.0.0/12", + "192.0.0.0/24", + "192.0.2.0/24", + "192.88.99.0/24", + "192.168.0.0/16", + "198.18.0.0/15", + "198.51.100.0/24", + "203.0.113.0/24", + "224.0.0.0/4", + ] + + def is_public(ip): + for reserved in reserved_ips: + if ipaddress.ip_address(ip) in ipaddress.ip_network(reserved, strict=False): + return False + return True + + network = ipaddress.ip_network(subnet, strict=False) + + # Generate a random IP within the subnet range + while True: + ip_int = random.randint(int(network.network_address), int(network.broadcast_address)) + ip_str = str(ipaddress.ip_address(ip_int)) + if is_public(ip_str): + return ip_str + + +def sanitize_tc_netem_command(command: str) -> bool: + """ + Sanitize the tc-netem command to ensure it's valid and safe to execute, as we run it as root on a container. + + Args: + - command (str): The tc-netem command to sanitize. + + Returns: + - bool: True if the command is valid and safe, False otherwise. + """ + if not command.startswith("tc qdisc add dev eth0 root netem"): + return False + + tokens = command.split()[7:] # Skip the prefix + + # Valid tc-netem parameters and their patterns + valid_params = { + "delay": r"^\d+ms(\s\d+ms)?(\sdistribution\s(normal|pareto|paretonormal|uniform))?$", + "loss": r"^\d+(\.\d+)?%$", + "duplicate": r"^\d+(\.\d+)?%$", + "corrupt": r"^\d+(\.\d+)?%$", + "reorder": r"^\d+(\.\d+)?%\s\d+(\.\d+)?%$", + "rate": r"^\d+(kbit|mbit|gbit)$", + } + + # Validate each param + i = 0 + while i < len(tokens): + param = tokens[i] + if param not in valid_params: + return False + i += 1 + value_tokens = [] + while i < len(tokens) and tokens[i] not in valid_params: + value_tokens.append(tokens[i]) + i += 1 + value = " ".join(value_tokens) + if not re.match(valid_params[param], value): + return False + + return True + + +def parse_bitcoin_conf(file_content): + """ + Custom parser for INI-style bitcoin.conf + + Args: + - file_content (str): The content of the INI-style file. + + Returns: + - dict: A dictionary representation of the file content. + Key-value pairs are stored as tuples so one key may have + multiple values. Sections are represented as arrays of these tuples. + """ + current_section = None + result = {current_section: []} + + for line in file_content.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + + if line.startswith("[") and line.endswith("]"): + current_section = line[1:-1] + result[current_section] = [] + elif "=" in line: + key, value = line.split("=", 1) + result[current_section].append((key.strip(), value.strip())) + + return result + + +def dump_bitcoin_conf(conf_dict, for_graph=False): + """ + Converts a dictionary representation of bitcoin.conf content back to INI-style string. + + Args: + - conf_dict (dict): A dictionary representation of the file content. + + Returns: + - str: The INI-style string representation of the input dictionary. + """ + result = [] + + # Print global section at the top first + values = conf_dict[None] + for sub_key, sub_value in values: + result.append(f"{sub_key}={sub_value}") + + # Then print any named subsections + for section, values in conf_dict.items(): + if section is not None: + result.append(f"\n[{section}]") + else: + continue + for sub_key, sub_value in values: + result.append(f"{sub_key}={sub_value}") + + if for_graph: + return ",".join(result) + + # Terminate file with newline + return "\n".join(result) + "\n" + + +def to_jsonable(obj): + HASH_INTS = [ + "blockhash", + "block_hash", + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "hashstop", + "prev_header", + "sha256", + "stop_hash", + ] + + HASH_INT_VECTORS = [ + "hashes", + "headers", + "vHave", + "vHash", + ] + + if hasattr(obj, "__dict__"): + return obj.__dict__ + elif hasattr(obj, "__slots__"): + ret = {} # type: Any + for slot in obj.__slots__: + val = getattr(obj, slot, None) + if slot in HASH_INTS and isinstance(val, int): + ret[slot] = ser_uint256(val).hex() + elif slot in HASH_INT_VECTORS and all(isinstance(a, int) for a in val): + ret[slot] = [ser_uint256(a).hex() for a in val] + else: + ret[slot] = to_jsonable(val) + return ret + elif isinstance(obj, list): + return [to_jsonable(a) for a in obj] + elif isinstance(obj, bytes): + return obj.hex() + else: + return obj + + +# This function is a hacked-up copy of process_file() from +# Bitcoin Core contrib/message-capture/message-capture-parser.py +def parse_raw_messages(blob, outbound): + TIME_SIZE = 8 + LENGTH_SIZE = 4 + MSGTYPE_SIZE = 12 + + messages = [] + offset = 0 + while True: + # Read the Header + header_len = TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE + tmp_header_raw = blob[offset : offset + header_len] + + offset = offset + header_len + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int + msgtype = tmp_header.read(MSGTYPE_SIZE).split(b"\x00", 1)[0] # type: bytes + length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int + + # Start converting the message to a dictionary + msg_dict = {} + msg_dict["outbound"] = outbound + msg_dict["time"] = time + msg_dict["size"] = length # "size" is less readable here, but more readable in the output + + msg_ser = BytesIO(blob[offset : offset + length]) + offset = offset + length + + # Determine message type + if msgtype not in MESSAGEMAP: + # Unrecognized message type + try: + msgtype_tmp = msgtype.decode() + if not msgtype_tmp.isprintable(): + raise UnicodeDecodeError + msg_dict["msgtype"] = msgtype_tmp + except UnicodeDecodeError: + msg_dict["msgtype"] = "UNREADABLE" + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unrecognized message type." + messages.append(msg_dict) + print(f"WARNING - Unrecognized message type {msgtype}", file=sys.stderr) + continue + + # Deserialize the message + msg = MESSAGEMAP[msgtype]() + msg_dict["msgtype"] = msgtype.decode() + + try: + msg.deserialize(msg_ser) + except KeyboardInterrupt: + raise + except Exception: + # Unable to deserialize message body + msg_ser.seek(0, os.SEEK_SET) + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unable to deserialize message." + messages.append(msg_dict) + print("WARNING - Unable to deserialize message", file=sys.stderr) + continue + + # Convert body of message into a jsonable object + if length: + msg_dict["body"] = to_jsonable(msg) + messages.append(msg_dict) + return messages + + +def gen_config_dir(network: str) -> Path: + """ + Determine a config dir based on network name + """ + config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.warnet")) + config_dir = Path(config_dir) / "warnet" / network + return config_dir + + +def remove_version_prefix(version_str): + if version_str.startswith("0."): + return version_str[2:] + return version_str + + +def set_execute_permission(file_path): + current_permissions = os.stat(file_path).st_mode + os.chmod(file_path, current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + + +def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): + try: + # Use nx.MultiDiGraph() so we get directed edges (source->target) + # and still allow parallel edges (L1 p2p connections + LN channels) + graph = nx.generators.cycle_graph(n, nx.MultiDiGraph()) + except TypeError as e: + msg = f"Failed to create graph: {e}" + logger.error(msg) + return msg + + # Graph is a simply cycle graph with all nodes connected in a loop, including both ends. + # Ensure each node has at least 8 outbound connections by making 7 more outbound connections + for src_node in graph.nodes(): + logger.debug(f"Creating additional connections for node {src_node}") + for _ in range(8): + # Choose a random node to connect to + # Make sure it's not the same node and they aren't already connected in either direction + potential_nodes = [ + dst_node + for dst_node in range(n) + if dst_node != src_node + and not graph.has_edge(dst_node, src_node) + and not graph.has_edge(src_node, dst_node) + ] + if potential_nodes: + chosen_node = random.choice(potential_nodes) + graph.add_edge(src_node, chosen_node) + logger.debug(f"Added edge: {src_node}:{chosen_node}") + logger.debug(f"Node {src_node} edges: {graph.edges(src_node)}") + + # parse and process conf file + conf_contents = "" + if bitcoin_conf is not None: + conf = Path(bitcoin_conf) + if conf.is_file(): + with open(conf) as f: + # parse INI style conf then dump using for_graph + conf_dict = parse_bitcoin_conf(f.read()) + conf_contents = dump_bitcoin_conf(conf_dict, for_graph=True) + + # populate our custom fields + for i, node in enumerate(graph.nodes()): + if random_version: + graph.nodes[node]["version"] = random.choice(WEIGHTED_TAGS) + else: + # One node demoing the image tag + if i == 1: + graph.nodes[node]["image"] = f"bitcoindevproject/bitcoin:{version}" + else: + graph.nodes[node]["version"] = version + graph.nodes[node]["bitcoin_config"] = conf_contents + graph.nodes[node]["tc_netem"] = "" + graph.nodes[node]["build_args"] = "" + graph.nodes[node]["exporter"] = False + graph.nodes[node]["collect_logs"] = False + + convert_unsupported_attributes(graph) + return graph + + +def convert_unsupported_attributes(graph: nx.Graph): + # Sometimes networkx complains about invalid types when writing the graph + # (it just generated itself!). Try to convert them here just in case. + for _, node_data in graph.nodes(data=True): + for key, value in node_data.items(): + if isinstance(value, set): + node_data[key] = list(value) + elif isinstance(value, int | float | str): + continue + else: + node_data[key] = str(value) + + for _, _, edge_data in graph.edges(data=True): + for key, value in edge_data.items(): + if isinstance(value, set): + edge_data[key] = list(value) + elif isinstance(value, int | float | str): + continue + else: + edge_data[key] = str(value) + + +def load_schema(): + with open(SRC_DIR / "graph_schema.json") as schema_file: + return json.load(schema_file) + + +def validate_graph_schema(graph: nx.Graph): + """ + Validate a networkx.Graph against the node schema + """ + graph_schema = load_schema() + validate(instance=graph.graph, schema=graph_schema["graph"]) + for n in list(graph.nodes): + validate(instance=graph.nodes[n], schema=graph_schema["node"]) + for e in list(graph.edges): + validate(instance=graph.edges[e], schema=graph_schema["edge"]) diff --git a/test/data/12_node_ring.graphml b/test/data/12_node_ring.graphml index 7cdf3a7f7..a45889765 100644 --- a/test/data/12_node_ring.graphml +++ b/test/data/12_node_ring.graphml @@ -18,47 +18,39 @@ 27.0 - -uacomment=w0 -debug=validation + debug=validation 27.0 - -uacomment=w1 -debug=validation + debug=validation 27.0 - -uacomment=w2 -debug=validation + debug=validation 27.0 - -uacomment=w3 27.0 - -uacomment=w4 27.0 - -uacomment=w5 27.0 - -uacomment=w6 27.0 - -uacomment=w7 27.0 - -uacomment=w8 27.0 - -uacomment=w9 27.0 - -uacomment=w10 27.0 diff --git a/test/data/build_v24_test.graphml b/test/data/build_v24_test.graphml index 5dc8c7297..d55c3611c 100644 --- a/test/data/build_v24_test.graphml +++ b/test/data/build_v24_test.graphml @@ -18,11 +18,9 @@ 27.0 - -uacomment=w0 bitcoin/bitcoin#24.x - -uacomment=v24_build --disable-zmq diff --git a/test/data/ln.graphml b/test/data/ln.graphml index e0606c93f..efd0c359f 100644 --- a/test/data/ln.graphml +++ b/test/data/ln.graphml @@ -19,34 +19,29 @@ simln 27.0 - -uacomment=w0 lnd lightninglabs/lnd:v0.17.5-beta true 27.0 - -uacomment=w1 lnd pinheadmz/circuitbreaker:278737d true 27.0 - -uacomment=w2 lnd pinheadmz/circuitbreaker:278737d --bitcoin.timelockdelta=33 27.0 - -uacomment=w2 cln --cltv-delta=33 27.0 - -uacomment=w3 diff --git a/test/data/services.graphml b/test/data/services.graphml index c9e0a0d01..25a9fa44e 100644 --- a/test/data/services.graphml +++ b/test/data/services.graphml @@ -19,7 +19,7 @@ 27.0 - -uacomment=w0 -debug=validation + debug=validation true lnd diff --git a/test/rpc_test.py b/test/rpc_test.py index 642151e5f..b53ccb0fb 100755 --- a/test/rpc_test.py +++ b/test/rpc_test.py @@ -13,7 +13,6 @@ def __init__(self): self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "12_node_ring.graphml" def run_test(self): - self.start_server() try: self.setup_network() self.test_rpc_commands() diff --git a/test/test_base.py b/test/test_base.py index 5ad3c9125..895c7032a 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -6,14 +6,13 @@ import re import threading from pathlib import Path -from subprocess import PIPE, Popen, run +from subprocess import run from tempfile import mkdtemp from time import sleep from warnet import SRC_DIR -from warnet.cli.rpc import rpc_call -from warnet.utils import exponential_backoff -from warnet.warnet import Warnet +from warnet.cli.network import _status as network_status +from warnet.cli.network import _connected as network_connected class TestBase: @@ -30,11 +29,6 @@ def setup_environment(self): self.tmpdir = Path(mkdtemp(prefix="warnet-test-")) os.environ["XDG_STATE_HOME"] = str(self.tmpdir) self.logfilepath = self.tmpdir / "warnet.log" - # Use the same dir name for the warnet network name - # replacing underscores which throws off k8s - self.network_name = self.tmpdir.name.replace("_", "") - self.server = None - self.server_thread = None self.stop_threads = threading.Event() self.network = True @@ -47,8 +41,6 @@ def setup_logging(self): self.log.info("Logging started") def cleanup(self, signum=None, frame=None): - if self.server is None: - return try: self.log.info("Stopping network") if self.network: @@ -58,10 +50,6 @@ def cleanup(self, signum=None, frame=None): self.log.error(f"Error bringing network down: {e}") finally: self.stop_threads.set() - self.server.terminate() - self.server.wait() - self.server_thread.join() - self.server = None def _print_and_assert_msgs(self, message): print(message) @@ -76,61 +64,20 @@ def assert_log_msgs(self): ), f"Log assertion failed. Expected message not found: {self.log_expected_msgs}" self.log_msg_assertions_passed = False - def warcli(self, cmd, network=True): + def warcli(self, cmd): self.log.debug(f"Executing warcli command: {cmd}") command = ["warcli"] + cmd.split() - if network: - command += ["--network", self.network_name] proc = run(command, capture_output=True) if proc.stderr: raise Exception(proc.stderr.decode().strip()) return proc.stdout.decode().strip() - def rpc(self, method, params=None) -> dict | list: - """Execute a warnet RPC API call directly""" - self.log.debug(f"Executing RPC method: {method}") - return rpc_call(method, params) - - @exponential_backoff(max_retries=20) - def wait_for_rpc(self, method, params=None): - """Repeatedly execute an RPC until it succeeds""" - return self.rpc(method, params) - def output_reader(self, pipe, func): while not self.stop_threads.is_set(): line = pipe.readline().strip() if line: func(line) - def start_server(self): - """Start the Warnet server and wait for RPC interface to respond""" - - if self.server is not None: - raise Exception("Server is already running") - - # TODO: check for conflicting warnet process - # maybe also ensure that no conflicting docker networks exist - - # For kubernetes we assume the server is started outside test base, - # but we can still read its log output - self.log.info("Starting Warnet server") - self.server = Popen( - ["kubectl", "logs", "-f", "rpc-0", "--since=1s"], - stdout=PIPE, - stderr=PIPE, - bufsize=1, - universal_newlines=True, - ) - - self.server_thread = threading.Thread( - target=self.output_reader, args=(self.server.stdout, self._print_and_assert_msgs) - ) - self.server_thread.daemon = True - self.server_thread.start() - - self.log.info("Waiting for RPC") - self.wait_for_rpc("scenarios_available") - def stop_server(self): self.cleanup() @@ -148,8 +95,8 @@ def wait_for_predicate(self, predicate, timeout=5 * 60, interval=5): ) def get_tank(self, index): - wn = Warnet.from_network(self.network_name) - return wn.tanks[index] + # TODO + return None def wait_for_all_tanks_status(self, target="running", timeout=20 * 60, interval=5): """Poll the warnet server for container status @@ -157,8 +104,11 @@ def wait_for_all_tanks_status(self, target="running", timeout=20 * 60, interval= """ def check_status(): - tanks = self.wait_for_rpc("network_status", {"network": self.network_name}) + tanks = network_status() stats = {"total": 0} + # "Probably" means all tanks are stopped and deleted + if len(tanks) == 0: + return True for tank in tanks: for service in ["bitcoin", "lightning", "circuitbreaker"]: status = tank.get(f"{service}_status") @@ -174,11 +124,7 @@ def wait_for_all_edges(self, timeout=20 * 60, interval=5): """Ensure all tanks have all the connections they are supposed to have Block until all success """ - - def check_status(): - return self.wait_for_rpc("network_connected", {"network": self.network_name}) - - self.wait_for_predicate(check_status, timeout, interval) + self.wait_for_predicate(network_connected, timeout, interval) def wait_for_all_scenarios(self): def check_scenarios(): From 8e53c3a822df046486d16f00d767f23a7fa22646 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 15:27:39 +0200 Subject: [PATCH 039/710] ci: build commander image --- .github/workflows/deploy.yml | 80 ++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 12fae71e5..62ef02a69 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -1,60 +1,60 @@ -name: deploy +name: Publish Commander Docker image + on: - workflow_run: - workflows: ["test"] - types: - - completed + push: + branches: + - dev + paths: + - resources/images/commander/Dockerfile + tags-ignore: + - "*" jobs: - deploy-to-dockerhub: + push_to_registry: + name: Push commander Docker image to Docker Hub runs-on: ubuntu-latest - # if: github.event.workflow_run.conclusion == 'success' - # DISABLE FOR REWRITE - if: false + permissions: + packages: write + contents: read + attestations: write + id-token: write steps: - - uses: actions/checkout@v4 - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + - name: Check out the repo + uses: actions/checkout@v4 + + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Docker meta id: meta uses: docker/metadata-action@v5 with: - images: ${{ secrets.DOCKERHUB_USERNAME }}/${{ secrets.DOCKERHUB_RPC_REPO }} + images: bitcoindevproject/warnet-commander tags: | type=ref,event=tag type=ref,event=pr type=raw,value=latest,enable={{is_default_branch}} labels: | maintainer=bitcoindevproject - org.opencontainers.image.title=warnet-rpc - org.opencontainers.image.description=Warnet RPC server - - name: Login to Docker Hub - if: github.ref == 'refs/heads/main' - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build production RPC image - uses: docker/build-push-action@v5 + org.opencontainers.image.title=warnet-commander + org.opencontainers.image.description=Warnet Commander + + - name: Build and push Docker image + id: push + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 with: - file: resources/images/rpc/Dockerfile_prod - platforms: linux/amd64,linux/arm64 context: . - push: ${{ github.ref == 'refs/heads/main' }} + file: resources/images/commander/Dockerfile + push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max - - name: Build dev RPC image - uses: docker/build-push-action@v5 + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@v1 with: - file: resources/images/rpc/Dockerfile_dev - platforms: linux/amd64,linux/arm64 - context: resources/images/rpc - push: ${{ github.ref == 'refs/heads/main' }} - tags: ${{ secrets.DOCKERHUB_USERNAME }}/${{ secrets.DOCKERHUB_RPC_REPO }}:dev - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max + subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}} + subject-digest: ${{ steps.push.outputs.digest }} + push-to-registry: true From 3d2e02cb9aa18bb8f6cc5457b62044433404886a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 15:31:36 +0200 Subject: [PATCH 040/710] ci: re-enable apidocs job --- .github/workflows/apidocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/apidocs.yml b/.github/workflows/apidocs.yml index 4f8b6731a..78f07d76e 100644 --- a/.github/workflows/apidocs.yml +++ b/.github/workflows/apidocs.yml @@ -2,7 +2,7 @@ name: Format-api-docs on: push: branches: - - main + - dev jobs: format-api-docs: runs-on: ubuntu-latest From 0d08babdbcfbcee7277c2fcb6ab14b46a8bc61fd Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 13:32:07 +0000 Subject: [PATCH 041/710] Update apidocs and/or graphdocs --- docs/warcli.md | 75 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index 8257bdfc7..9cd9f6c66 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -30,6 +30,46 @@ options: Check Warnet requirements are installed +## Bitcoin + +### `warcli bitcoin debug-log` +Fetch the Bitcoin Core debug log from \ + +options: +| name | type | required | default | +|--------|--------|------------|-----------| +| node | Int | yes | | + +### `warcli bitcoin grep-logs` +Grep combined bitcoind logs using regex \ + +options: +| name | type | required | default | +|---------------------|--------|------------|-----------| +| pattern | String | yes | | +| show_k8s_timestamps | Bool | | False | +| no_sort | Bool | | False | + +### `warcli bitcoin messages` +Fetch messages sent between \ and \ in [network] + +options: +| name | type | required | default | +|---------|--------|------------|-----------| +| node_a | Int | yes | | +| node_b | Int | yes | | +| network | String | | "regtest" | + +### `warcli bitcoin rpc` +Call bitcoin-cli \ [params] on \ + +options: +| name | type | required | default | +|--------|--------|------------|-----------| +| node | Int | yes | | +| method | String | yes | | +| params | String | | | + ## Graph ### `warcli graph create` @@ -86,21 +126,13 @@ options: ## Network -### `warcli network connect` -Connect nodes based on the edges defined in the graph file. +### `warcli network connected` +Determine if all p2p conenctions defined in graph are established -options: -| name | type | required | default | -|------------|--------|------------|----------------------------------| -| graph_file | Path | | resources/graphs/default.graphml | ### `warcli network down` -Bring down a running warnet named [network] +Bring down a running warnet -options: -| name | type | required | default | -|---------|--------|------------|-----------| -| network | String | | "warnet" | ### `warcli network generate-yaml` Generate a Kubernetes YAML file from a graph file for deploying warnet nodes. @@ -120,15 +152,18 @@ options: | follow | Bool | | False | ### `warcli network start` -Start a warnet with topology loaded from a \ into [network] +Start a warnet with topology loaded from a \ options: | name | type | required | default | |------------|--------|------------|----------------------------------| | graph_file | Path | | resources/graphs/default.graphml | -| network | String | | "warnet" | | logging | Bool | | False | +### `warcli network status` +Return pod status + + ## Scenarios ### `warcli scenarios active` @@ -140,17 +175,16 @@ List available scenarios in the Warnet Test Framework ### `warcli scenarios run` -Run \ from the Warnet Test Framework on [network] with optional arguments +Run \ from the Warnet Test Framework with optional arguments options: | name | type | required | default | |-----------------|--------|------------|-----------| | scenario | String | yes | | | additional_args | String | | | -| network | String | | "warnet" | ### `warcli scenarios run-file` -Run \ from the Warnet Test Framework on [network] with optional arguments +Run \ from the Warnet Test Framework with optional arguments options: | name | type | required | default | @@ -158,14 +192,5 @@ options: | scenario_path | String | yes | | | additional_args | String | | | | name | String | | | -| network | String | | "warnet" | - -### `warcli scenarios stop` -Stop scenario with PID \ from running - -options: -| name | type | required | default | -|--------|--------|------------|-----------| -| pid | Int | yes | | From 0913c8d69f7d42b2d674192fe36ccb03fc887fd4 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 15:33:50 +0200 Subject: [PATCH 042/710] ci: move disabled jobs to dev branch --- .github/workflows/publish-dist.yml | 2 +- .github/workflows/test.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-dist.yml b/.github/workflows/publish-dist.yml index 35c41ae39..e78b04f93 100644 --- a/.github/workflows/publish-dist.yml +++ b/.github/workflows/publish-dist.yml @@ -1,4 +1,4 @@ -name: Publish Python 🐍 distribution 📦 to PyPI +name: Publish 🐍 📦 to PyPI on: push diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e287d3aa7..6d25ac925 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - main + - dev jobs: ruff: From b0c3a861192cd09911132c8d39b47ac88b58ec87 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 21 Aug 2024 13:56:23 +0200 Subject: [PATCH 043/710] hooked up network start to helm --- networks/6_node_bitcoin/defaults.yaml | 41 +++++++++++++++++ networks/6_node_bitcoin/network.yaml | 10 +++++ src/warnet/cli/k8s.py | 37 ++-------------- src/warnet/cli/main.py | 2 + src/warnet/cli/network.py | 28 ++++++------ src/warnet/cli/network2.py | 63 +++++++++++++++++++++++++++ src/warnet/cli/process.py | 33 ++++++++++++++ 7 files changed, 166 insertions(+), 48 deletions(-) create mode 100644 networks/6_node_bitcoin/defaults.yaml create mode 100644 networks/6_node_bitcoin/network.yaml create mode 100644 src/warnet/cli/network2.py create mode 100644 src/warnet/cli/process.py diff --git a/networks/6_node_bitcoin/defaults.yaml b/networks/6_node_bitcoin/defaults.yaml new file mode 100644 index 000000000..438b33d8c --- /dev/null +++ b/networks/6_node_bitcoin/defaults.yaml @@ -0,0 +1,41 @@ +collectLogs: true +metricsExport: true + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "27.0" + +config: |2+ + regtest=1 + checkmempool=0 + acceptnonstdtxn=1 + debuglogfile=0 + logips=1 + logtimemicros=1 + capturemessages=1 + fallbackfee=0.00001000 + listen=1 + + [regtest] + rpcuser=user + rpcpassword=password + rpcport=18443 + rpcallowip=0.0.0.0/0 + rpcbind=0.0.0.0 + + zmqpubrawblock=tcp://0.0.0.0:28332 + zmqpubrawtx=tcp://0.0.0.0:28333 \ No newline at end of file diff --git a/networks/6_node_bitcoin/network.yaml b/networks/6_node_bitcoin/network.yaml new file mode 100644 index 000000000..c7c01095e --- /dev/null +++ b/networks/6_node_bitcoin/network.yaml @@ -0,0 +1,10 @@ +nodes: + - name: tank-0001 + config: + image: + tag: "26.0" + - name: tank-0002 + - name: tank-0003 + - name: tank-0004 + - name: tank-0005 + - name: tank-0006 \ No newline at end of file diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index a234f0f9e..5dcd5cd65 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -1,12 +1,12 @@ -import os -import subprocess -from importlib.resources import files import json +from importlib.resources import files from typing import Any, Dict from kubernetes import client, config from kubernetes.dynamic import DynamicClient +from .process import stream_command + WAR_MANIFESTS = files("manifests") @@ -40,37 +40,6 @@ def get_edges(): return json.loads(configmap.data["data"]) -def run_command(command) -> str: - result = subprocess.run( - command, shell=True, capture_output=True, text=True, executable="/bin/bash" - ) - if result.returncode != 0: - raise Exception(result.stderr) - return result.stdout - - -def stream_command(command, env=None) -> bool: - process = subprocess.Popen( - ["/bin/bash", "-c", command], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - bufsize=1, - universal_newlines=True, - ) - - for line in iter(process.stdout.readline, ""): - print(line, end="") - - process.stdout.close() - return_code = process.wait() - - if return_code != 0: - print(f"Command failed with return code {return_code}") - return False - return True - - def create_kubernetes_object( kind: str, metadata: Dict[str, Any], spec: Dict[str, Any] = None ) -> Dict[str, Any]: diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index 95197bd29..ed8744190 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -11,6 +11,7 @@ # from .ln import ln from .network import network +from .network2 import network2 from .scenarios import scenarios QUICK_START_PATH = files("scripts").joinpath("quick_start.sh") @@ -26,6 +27,7 @@ def cli(): cli.add_command(image) # cli.add_command(ln) cli.add_command(network) +cli.add_command(network2) cli.add_command(scenarios) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 9b0f56091..23b4b065c 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -8,20 +8,21 @@ import networkx as nx import yaml from rich import print + from .bitcoin import _rpc from .k8s import ( apply_kubernetes_yaml, + create_kubernetes_object, create_namespace, delete_namespace, deploy_base_configurations, - run_command, - stream_command, - set_kubectl_context, - create_kubernetes_object, get_edges, - get_mission + get_mission, + set_kubectl_context, ) +from .process import stream_command + DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") WAR_MANIFESTS = files("manifests") @@ -77,9 +78,7 @@ def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: "name": f"warnet-tank-{node}", "namespace": "warnet", "labels": {"app": "warnet", "mission": "tank", "index": str(node)}, - "annotations": { - "data": json.dumps(data) - } + "annotations": {"data": json.dumps(data)}, }, spec={ "containers": [ @@ -134,11 +133,7 @@ def create_config_map(node: int, config: str) -> Dict[str, Any]: def create_edges_map(graph): edges = [] for src, dst, data in graph.edges(data=True): - edges.append({ - "src": src, - "dst": dst, - "data": data - }) + edges.append({"src": src, "dst": dst, "data": data}) config_map = create_kubernetes_object( kind="ConfigMap", metadata={ @@ -234,6 +229,7 @@ def connected(): """Determine if all p2p conenctions defined in graph are established""" print(_connected()) + def _connected(): tanks = get_mission("tank") edges = get_edges() @@ -263,11 +259,15 @@ def status(): # TODO: make it a pretty table print(_status()) + def _status(): tanks = get_mission("tank") stats = [] for tank in tanks: - status = {"tank_index": tank.metadata.labels["index"], "bitcoin_status": tank.status.phase.lower()} + status = { + "tank_index": tank.metadata.labels["index"], + "bitcoin_status": tank.status.phase.lower(), + } stats.append(status) return stats diff --git a/src/warnet/cli/network2.py b/src/warnet/cli/network2.py new file mode 100644 index 000000000..8a439c618 --- /dev/null +++ b/src/warnet/cli/network2.py @@ -0,0 +1,63 @@ +import os +import tempfile +from pathlib import Path + +import click +import yaml + +from .process import run_command + +NETWORK_DIR = Path("networks") +DEFAULT_NETWORK = "6_node_bitcoin" +NETWORK_FILE = "network.yaml" +DEFAULTS_FILE = "defaults.yaml" +HELM_COMMAND = "helm upgrade --install --create-namespace" +BITCOIN_CHART_LOCATION = "./resources/charts/bitcoincore" +NAMESPACE = "warnet" + + +@click.group(name="network2") +def network2(): + """Network commands""" + + +@network2.command() +@click.argument("network_name", default=DEFAULT_NETWORK) +@click.option("--network", default="warnet", show_default=True) +@click.option("--logging/--no-logging", default=False) +def start2(network_name: str, logging: bool, network: str): + """Start a warnet with topology loaded from into [network]""" + full_path = os.path.join(NETWORK_DIR, network_name) + network_file_path = os.path.join(full_path, NETWORK_FILE) + defaults_file_path = os.path.join(full_path, DEFAULTS_FILE) + + network_file = {} + with open(network_file_path) as f: + network_file = yaml.safe_load(f) + + for node in network_file["nodes"]: + print(f"Starting node: {node.get('name')}") + try: + temp_override_file_path = "" + node_name = node.get("name") + node_config_override = node.get("config") + + cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {NAMESPACE} -f {defaults_file_path}" + + if node_config_override: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".yaml", delete=False + ) as temp_file: + yaml.dump(node_config_override, temp_file) + temp_override_file_path = temp_file.name + cmd = f"{cmd} -f {temp_override_file_path}" + + if not run_command(cmd, stream_output=True): + print(f"Failed to run Helm command: {cmd}") + return + except Exception as e: + print(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() diff --git a/src/warnet/cli/process.py b/src/warnet/cli/process.py new file mode 100644 index 000000000..4131b89b7 --- /dev/null +++ b/src/warnet/cli/process.py @@ -0,0 +1,33 @@ +import os +import subprocess + + +def run_command(command) -> str: + result = subprocess.run( + command, shell=True, capture_output=True, text=True, executable="/bin/bash" + ) + if result.returncode != 0: + raise Exception(result.stderr) + return result.stdout + + +def stream_command(command, env=None) -> bool: + process = subprocess.Popen( + ["/bin/bash", "-c", command], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + universal_newlines=True, + ) + + for line in iter(process.stdout.readline, ""): + print(line, end="") + + process.stdout.close() + return_code = process.wait() + + if return_code != 0: + print(f"Command failed with return code {return_code}") + return False + return True From 9d3206a9407b86be284600d22f222c8494a2a886 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 15:59:55 +0200 Subject: [PATCH 044/710] fix run_command import --- src/warnet/cli/bitcoin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index c7d0166f4..d00c0e91a 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -9,7 +9,7 @@ from test_framework.messages import ser_uint256 from test_framework.p2p import MESSAGEMAP -from .k8s import run_command +from .process import run_command @click.group(name="bitcoin") From 4620ff9a0f2331b24daf838c542323d1acf54593 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 14:00:59 +0000 Subject: [PATCH 045/710] Update apidocs and/or graphdocs --- docs/warcli.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/warcli.md b/docs/warcli.md index 9cd9f6c66..2b55f4a07 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -164,6 +164,18 @@ options: Return pod status +## Network2 + +### `warcli network2 start2` +Start a warnet with topology loaded from \ into [network] + +options: +| name | type | required | default | +|--------------|--------|------------|------------------| +| network_name | String | | "6_node_bitcoin" | +| network | String | | "warnet" | +| logging | Bool | | False | + ## Scenarios ### `warcli scenarios active` From 3877210b76b4a10b377b1732cab7aa866f8cfbe3 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 16:06:53 +0200 Subject: [PATCH 046/710] ci: re-enable rpc_test.py --- .github/workflows/test.yml | 46 ++++++-------------------------------- 1 file changed, 7 insertions(+), 39 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6d25ac925..84e8348d7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,6 +7,7 @@ on: - dev jobs: + ruff: # DISABLE FOR REWRITE if: false @@ -17,6 +18,7 @@ jobs: - run: uv venv - run: uv pip install ruff - run: source .venv/bin/activate; ruff check . + ruff-format: runs-on: ubuntu-latest steps: @@ -25,58 +27,24 @@ jobs: - run: uv venv - run: uv pip install ruff - run: source .venv/bin/activate; ruff format . + test: - # DISABLE FOR REWRITE - if: false runs-on: ubuntu-latest strategy: matrix: - test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] + # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] + test: [rpc_test.py] steps: - uses: actions/checkout@v4 - uses: hynek/setup-cached-uv@v1 - uses: azure/setup-helm@v4.2.0 - uses: medyagh/setup-minikube@master - with: - mount-path: ${{ github.workspace }}:/mnt/src - - uses: actions/download-artifact@v4 - with: - name: warnet - path: /tmp - - run: | - echo loading the image directly into minikube docker - eval $(minikube -p minikube docker-env) - docker load --input /tmp/warnet.tar - docker image ls -a - + - name: Run tests + run: | echo Installing warnet python package for cli uv venv uv pip install -e . - - echo "Contents of warnet-rpc-statefulset-dev.yaml being used:" - cat resources/manifests/warnet-rpc-statefulset-dev.yaml - - echo Setting up k8s - kubectl apply -f resources/manifests/namespace.yaml - kubectl apply -f resources/manifests/rbac-config.yaml - kubectl apply -f resources/manifests/warnet-rpc-service.yaml - kubectl apply -f resources/manifests/warnet-rpc-statefulset-dev.yaml - kubectl config set-context --current --namespace=warnet - - echo sleeping for 30s to give k8s time to boot - sleep 30 - kubectl describe pod rpc-0 - kubectl logs rpc-0 - - echo Waiting for rpc-0 to come online - until kubectl get pod rpc-0 --namespace=warnet; do - echo "Waiting for server to find pod rpc-0..." - sleep 4 - done - kubectl wait --for=condition=Ready --timeout=2m pod rpc-0 shell: bash - - run: | - kubectl port-forward svc/rpc 9276:9276 & - name: Run tests run: | source .venv/bin/activate From 1c5c2aea8f65cc0e9b50178edef03e8ba9986a6f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 16:52:03 +0200 Subject: [PATCH 047/710] scenarios_test passing --- resources/images/commander/src/commander.py | 13 ---- src/warnet/cli/k8s.py | 5 ++ src/warnet/cli/scenarios.py | 65 +++++++++--------- src/warnet/scenarios/miner_std.py | 5 +- test/data/scenario_p2p_interface.py | 12 ++-- test/scenarios_test.py | 73 +++++++++------------ 6 files changed, 75 insertions(+), 98 deletions(-) diff --git a/resources/images/commander/src/commander.py b/resources/images/commander/src/commander.py index 98c9cdc71..fa1761ea9 100644 --- a/resources/images/commander/src/commander.py +++ b/resources/images/commander/src/commander.py @@ -52,19 +52,6 @@ def ensure_miner(node): node.createwallet("miner", descriptors=True) return node.get_wallet_rpc("miner") - def network_connected(self): - for tank in self.nodes: - peerinfo = tank.getpeerinfo() - manuals = 0 - for peer in peerinfo: - if peer["connection_type"] == "manual": - manuals += 1 - # Even if more edges are specifed, bitcoind only allows - # 8 manual outbound connections - if min(8, len(tank.init_peers)) > manuals: - return False - return True - def handle_sigterm(self, signum, frame): print("SIGTERM received, stopping...") self.shutdown() diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 5dcd5cd65..727ada6ee 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -92,3 +92,8 @@ def apply_kubernetes_yaml(yaml_file: str): def delete_namespace(namespace: str): command = f"kubectl delete namespace {namespace}" return stream_command(command) + + +def delete_pod(pod_name: str): + command = f"kubectl delete pod {pod_name}" + return stream_command(command) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 438152c11..20322025a 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -28,7 +28,7 @@ def available(): """ console = Console() - scenario_list = [] + scenario_list = _available() for s in pkgutil.iter_modules(SCENARIOS.__path__): scenario_list.append(s.name) @@ -41,6 +41,13 @@ def available(): console.print(table) +def _available(): + scenario_list = [] + for s in pkgutil.iter_modules(SCENARIOS.__path__): + scenario_list.append(s.name) + return scenario_list + + @scenarios.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario", type=str) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) @@ -56,14 +63,32 @@ def run(scenario, additional_args): # Ensure the scenario file exists within the package with importlib.resources.path(scenario_package, scenario_filename) as scenario_path: scenario_path = str(scenario_path) # Convert Path object to string + return run_scenario(scenario_path, additional_args) + + +@scenarios.command(context_settings={"ignore_unknown_options": True}) +@click.argument("scenario_path", type=str) +@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) +def run_file(scenario_path, additional_args): + """ + Run from the Warnet Test Framework with optional arguments + """ + if not scenario_path.endswith(".py"): + print("Error. Currently only python scenarios are supported") + sys.exit(1) + return run_scenario(scenario_path, additional_args) + +def run_scenario(scenario_path, additional_args): if not os.path.exists(scenario_path): - raise Exception(f"Scenario {scenario} not found at {scenario_path}.") + raise Exception(f"Scenario file not found at {scenario_path}.") with open(scenario_path) as file: scenario_text = file.read() - name = f"commander-{scenario.replace('_', '')}-{int(time.time())}" + scenario_name = os.path.splitext(os.path.basename(scenario_path))[0] + + name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" tankpods = get_mission("tank") tanks = [ @@ -142,37 +167,12 @@ def run(scenario, additional_args): apply_kubernetes_yaml(temp_file_path) -@scenarios.command(context_settings={"ignore_unknown_options": True}) -@click.argument("scenario_path", type=str) -@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -@click.option("--name", type=str) -def run_file(scenario_path, additional_args, name=""): - """ - Run from the Warnet Test Framework with optional arguments - """ - if not scenario_path.endswith(".py"): - print("Error. Currently only python scenarios are supported") - sys.exit(1) - scenario_name = name if name else os.path.splitext(os.path.basename(scenario_path))[0] - scenario_base64 = "" - with open(scenario_path, "rb") as f: - scenario_base64 = base64.b64encode(f.read()).decode("utf-8") - - params = { - "scenario_base64": scenario_base64, - "scenario_name": scenario_name, - "additional_args": additional_args, - } - # TODO - # print(rpc_call("scenarios_run_file", params)) - - @scenarios.command() def active(): """ List running scenarios "name": "pid" pairs """ - commanders = get_mission("commander") + commanders = _active() if len(commanders) == 0: print("No scenarios running") return @@ -182,7 +182,12 @@ def active(): table.add_column("Status") for commander in commanders: - table.add_row(commander.metadata.name, commander.status.phase) + table.add_row(commander["commander"], commander["status"]) console = Console() console.print(table) + + +def _active(): + commanders = get_mission("commander") + return [{"commander": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] diff --git a/src/warnet/scenarios/miner_std.py b/src/warnet/scenarios/miner_std.py index 64c0db993..1f54f86f9 100755 --- a/src/warnet/scenarios/miner_std.py +++ b/src/warnet/scenarios/miner_std.py @@ -46,10 +46,7 @@ def add_options(self, parser): ) def run_test(self): - while not self.network_connected(): - self.log.info("Waiting for complete network connection...") - sleep(5) - self.log.info("Network connected. Starting miners.") + self.log.info("Starting miners.") max_miners = 1 if self.options.allnodes: diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index 440fb86f7..fe5f4a8d8 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -4,8 +4,9 @@ from test_framework.messages import CInv, msg_getdata from test_framework.p2p import P2PInterface -from warnet.test_framework_bridge import WarnetTestFramework +# The base class exists inside the commander container +from commander import Commander def cli_help(): return "Run P2P GETDATA test" @@ -21,20 +22,15 @@ def on_block(self, message): self.blocks[message.block.sha256] += 1 -class GetdataTest(WarnetTestFramework): +class GetdataTest(Commander): def set_test_params(self): self.num_nodes = 1 def run_test(self): - while not self.warnet.network_connected(): - self.log.info("Waiting for complete network connection...") - sleep(5) - self.log.info("Network connected") - self.log.info("Adding the p2p connection") p2p_block_store = self.nodes[0].add_p2p_connection( - P2PStoreBlock(), dstaddr=self.warnet.tanks[0].ipv4, dstport=18444 + P2PStoreBlock(), dstaddr=self.nodes[0].rpchost, dstport=18444 ) self.log.info("test that an invalid GETDATA doesn't prevent processing of future messages") diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 734b2cae4..69499b2e9 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -2,7 +2,9 @@ import os from pathlib import Path - +from warnet.cli.scenarios import _available as scenarios_available +from warnet.cli.scenarios import _active as scenarios_active +from warnet.cli.k8s import delete_pod from test_base import TestBase @@ -13,7 +15,6 @@ def __init__(self): def run_test(self): try: - self.start_server() self.setup_network() self.test_scenarios() finally: @@ -23,85 +24,71 @@ def setup_network(self): self.log.info("Setting up network") self.log.info(self.warcli(f"network start {self.graph_file_path}")) self.wait_for_all_tanks_status(target="running") + self.wait_for_all_edges() def test_scenarios(self): self.check_available_scenarios() - self.run_and_check_miner_scenario("miner_std") - self.run_and_check_miner_scenario_from_file("src/warnet/scenarios/miner_std.py") - self.run_and_check_scenario_from_file("test/data/scenario_p2p_interface.py") + self.run_and_check_miner_scenario() + self.run_and_check_miner_scenario_from_file() + self.run_and_check_scenario_from_file() def check_available_scenarios(self): self.log.info("Checking available scenarios") # Use rpc instead of warcli so we get raw JSON object - scenarios = self.rpc("scenarios_available") + scenarios = scenarios_available() assert len(scenarios) == 4, f"Expected 4 available scenarios, got {len(scenarios)}" self.log.info(f"Found {len(scenarios)} available scenarios") def scenario_running(self, scenario_name: str): """Check that we are only running a single scenario of the correct name""" - active = self.rpc("scenarios_list_running") - running = scenario_name in active[0]["cmd"] - return running and len(active) == 1 + active = scenarios_active() + assert len(active) == 1 + return scenario_name in active[0]["commander"] - def run_and_check_scenario_from_file(self, scenario_file): - scenario_name = self.get_scenario_name_from_path(scenario_file) + def run_and_check_scenario_from_file(self): + scenario_file = "test/data/scenario_p2p_interface.py" def check_scenario_clean_exit(): - running = self.rpc("scenarios_list_running") - scenarios = [s for s in running if s["cmd"].strip() == scenario_name] - if not scenarios: - return False - scenario = scenarios[0] - if scenario["active"]: - return False - if scenario["return_code"] != 0: - raise Exception( - f"Scenario {scenario_name} failed with return code {scenario['return_code']}" - ) - return True - - self.log.info(f"Running scenario: {scenario_name}") + active = scenarios_active() + assert len(active) == 1 + return active[0]["status"] == "succeeded" + + self.log.info(f"Running scenario from: {scenario_file}") self.warcli(f"scenarios run-file {scenario_file}") self.wait_for_predicate(lambda: check_scenario_clean_exit()) - def run_and_check_miner_scenario(self, scenario_name): - self.log.info(f"Running scenario: {scenario_name}") - self.warcli(f"scenarios run {scenario_name} --allnodes --interval=1") - self.wait_for_predicate(lambda: self.scenario_running(scenario_name)) + def run_and_check_miner_scenario(self): + sc = "miner_std" + self.log.info(f"Running scenario {sc}") + self.warcli(f"scenarios run {sc} --allnodes --interval=1") + self.wait_for_predicate(lambda: self.scenario_running("commander-minerstd")) self.wait_for_predicate(lambda: self.check_blocks(30)) self.stop_scenario() - def run_and_check_miner_scenario_from_file(self, scenario_file): + def run_and_check_miner_scenario_from_file(self): + scenario_file = "src/warnet/scenarios/miner_std.py" self.log.info(f"Running scenario from file: {scenario_file}") self.warcli(f"scenarios run-file {scenario_file} --allnodes --interval=1") start = int(self.warcli("bitcoin rpc 0 getblockcount")) - scenario_name = self.get_scenario_name_from_path(scenario_file) - self.wait_for_predicate(lambda: self.scenario_running(scenario_name)) + self.wait_for_predicate(lambda: self.scenario_running("commander-minerstd")) self.wait_for_predicate(lambda: self.check_blocks(2, start=start)) self.stop_scenario() - def get_scenario_name_from_path(self, scenario_file): - return os.path.splitext(os.path.basename(scenario_file))[0] - def check_blocks(self, target_blocks, start: int = 0): - running = self.rpc("scenarios_list_running") - assert len(running) == 1, f"Expected one running scenario, got {len(running)}" - assert running[0]["active"], "Scenario should be active" - count = int(self.warcli("bitcoin rpc 0 getblockcount")) self.log.debug(f"Current block count: {count}, target: {start + target_blocks}") return count >= start + target_blocks def stop_scenario(self): self.log.info("Stopping running scenario") - running = self.rpc("scenarios_list_running") + running = scenarios_active() assert len(running) == 1, f"Expected one running scenario, got {len(running)}" - assert running[0]["active"], "Scenario should be active" - self.warcli(f"scenarios stop {running[0]['pid']}", False) + assert running[0]["status"] == "running", "Scenario should be running" + delete_pod(running[0]["commander"]) self.wait_for_predicate(self.check_scenario_stopped) def check_scenario_stopped(self): - running = self.rpc("scenarios_list_running") + running = scenarios_active() self.log.debug(f"Checking if scenario stopped. Running scenarios: {len(running)}") return len(running) == 0 From 3474ea71c2df108af5e00013836b3a4b57c23f19 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 16:53:01 +0200 Subject: [PATCH 048/710] ci: run scenarios test --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 84e8348d7..65c538ca7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [rpc_test.py] + test: [rpc_test.py, scenarios_test.py] steps: - uses: actions/checkout@v4 - uses: hynek/setup-cached-uv@v1 From 4b5cbadde6cf293f5d997cf31b309dd388ff4bed Mon Sep 17 00:00:00 2001 From: pinheadmz Date: Wed, 21 Aug 2024 14:53:36 +0000 Subject: [PATCH 049/710] Update apidocs and/or graphdocs --- docs/warcli.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/warcli.md b/docs/warcli.md index 2b55f4a07..11cbe63c7 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -203,6 +203,5 @@ options: |-----------------|--------|------------|-----------| | scenario_path | String | yes | | | additional_args | String | | | -| name | String | | | From 255ff565f510de5f4a83dae10117491af32e3841 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 17:00:20 +0200 Subject: [PATCH 050/710] ci: re-enable some graph tests --- .github/workflows/test.yml | 2 +- src/warnet/cli/util.py | 1 - test/graph_test.py | 57 +++++++++++++++++++------------------- 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 65c538ca7..caa22e57a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [rpc_test.py, scenarios_test.py] + test: [graph_test.py, rpc_test.py, scenarios_test.py] steps: - uses: actions/checkout@v4 - uses: hynek/setup-cached-uv@v1 diff --git a/src/warnet/cli/util.py b/src/warnet/cli/util.py index d57ab5c55..b455f3934 100644 --- a/src/warnet/cli/util.py +++ b/src/warnet/cli/util.py @@ -74,7 +74,6 @@ def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_ve graph.nodes[node]["build_args"] = "" graph.nodes[node]["exporter"] = False graph.nodes[node]["collect_logs"] = False - graph.nodes[node]["resources"] = None convert_unsupported_attributes(graph) return graph diff --git a/test/graph_test.py b/test/graph_test.py index 68485f93d..973d247e9 100755 --- a/test/graph_test.py +++ b/test/graph_test.py @@ -7,7 +7,8 @@ from pathlib import Path from test_base import TestBase -from warnet.lnd import LNDNode + +# from warnet.lnd import LNDNode from warnet.utils import DEFAULT_TAG @@ -24,12 +25,11 @@ def __init__(self): def run_test(self): self.test_graph_creation_and_import() self.validate_graph_schema() - - self.start_server() try: - self.test_graph_with_optional_services() + # TODO: re-enable these when we add lightning back + # self.test_graph_with_optional_services() self.test_created_graph() - self.test_imported_graph() + # self.test_imported_graph() finally: self.stop_server() @@ -37,7 +37,7 @@ def test_graph_creation_and_import(self): self.log.info(f"CLI tool creating test graph file: {self.tf_create}") self.log.info( self.warcli( - f"graph create 10 --outfile={self.tf_create} --version={DEFAULT_TAG}", network=False + f"graph create 10 --outfile={self.tf_create} --version={DEFAULT_TAG}" ) ) self.wait_for_predicate(lambda: Path(self.tf_create).exists()) @@ -46,16 +46,15 @@ def test_graph_creation_and_import(self): self.log.info( self.warcli( f"graph import-json {self.json_file_path} --outfile={self.tf_import} --ln_image=carlakirkcohen/lnd:attackathon --cb=carlakirkcohen/circuitbreaker:attackathon-test", - network=False, ) ) self.wait_for_predicate(lambda: Path(self.tf_import).exists()) def validate_graph_schema(self): self.log.info("Validating graph schema") - assert "invalid" not in self.warcli(f"graph validate {Path(self.tf_create)}", False) - assert "invalid" not in self.warcli(f"graph validate {Path(self.tf_import)}", False) - assert "invalid" not in self.warcli(f"graph validate {self.graph_file_path}", False) + assert "invalid" not in self.warcli(f"graph validate {Path(self.tf_create)}") + assert "invalid" not in self.warcli(f"graph validate {Path(self.tf_import)}") + assert "invalid" not in self.warcli(f"graph validate {self.graph_file_path}") def test_graph_with_optional_services(self): self.log.info("Testing graph with optional services...") @@ -70,7 +69,7 @@ def test_graph_with_optional_services(self): def test_created_graph(self): self.log.info("Testing created graph...") - self.log.info(self.warcli(f"network start {Path(self.tf_create)} --force")) + self.log.info(self.warcli(f"network start {Path(self.tf_create)}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() self.warcli("bitcoin rpc 0 getblockcount") @@ -79,7 +78,7 @@ def test_created_graph(self): def test_imported_graph(self): self.log.info("Testing imported graph...") - self.log.info(self.warcli(f"network start {Path(self.tf_import)} --force")) + self.log.info(self.warcli(f"network start {Path(self.tf_import)}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() self.warcli("bitcoin rpc 0 getblockcount") @@ -90,23 +89,23 @@ def test_imported_graph(self): def verify_ln_channel_policies(self): self.log.info("Ensuring warnet LN channel policies match imported JSON description") - with open(self.json_file_path) as file: - actual = json.loads(self.warcli("ln rpc 0 describegraph"))["edges"] - expected = json.loads(file.read())["edges"] - expected = sorted(expected, key=lambda chan: int(chan["channel_id"])) - for chan_index, actual_chan_json in enumerate(actual): - expected_chan = LNDNode.lnchannel_from_json(expected[chan_index]) - actual_chan = LNDNode.lnchannel_from_json(actual_chan_json) - if not expected_chan.channel_match(actual_chan): - self.log.info( - f"Channel {chan_index} policy mismatch, testing flipped channel: {actual_chan.short_chan_id}" - ) - if not expected_chan.channel_match(actual_chan.flip()): - raise Exception( - f"Channel policy doesn't match source: {actual_chan.short_chan_id}\n" - + f"Actual:\n{actual_chan}\n" - + f"Expected:\n{expected_chan}\n" - ) + # with open(self.json_file_path) as file: + # actual = json.loads(self.warcli("ln rpc 0 describegraph"))["edges"] + # expected = json.loads(file.read())["edges"] + # expected = sorted(expected, key=lambda chan: int(chan["channel_id"])) + # for chan_index, actual_chan_json in enumerate(actual): + # expected_chan = LNDNode.lnchannel_from_json(expected[chan_index]) + # actual_chan = LNDNode.lnchannel_from_json(actual_chan_json) + # if not expected_chan.channel_match(actual_chan): + # self.log.info( + # f"Channel {chan_index} policy mismatch, testing flipped channel: {actual_chan.short_chan_id}" + # ) + # if not expected_chan.channel_match(actual_chan.flip()): + # raise Exception( + # f"Channel policy doesn't match source: {actual_chan.short_chan_id}\n" + # + f"Actual:\n{actual_chan}\n" + # + f"Expected:\n{expected_chan}\n" + # ) if __name__ == "__main__": From bd7bccf5013ed5aa030f7f649a22ab128f7f8867 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 16:59:49 +0200 Subject: [PATCH 051/710] scenarios: commander image --- src/warnet/cli/scenarios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 20322025a..3b5f936ca 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -136,7 +136,7 @@ def run_scenario(scenario_path, additional_args): "containers": [ { "name": name, - "image": "warnet-commander:latest", + "image": "bitcoindevproject/warnet-commander:latest", "args": additional_args, "imagePullPolicy": "Never", "volumeMounts": [ From 5e68e5d1a8f6192a8fd677e596e87d584b38ce7e Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 17:13:34 +0200 Subject: [PATCH 052/710] actually pull commander image --- src/warnet/cli/scenarios.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 3b5f936ca..3d5b5dc5b 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -138,7 +138,6 @@ def run_scenario(scenario_path, additional_args): "name": name, "image": "bitcoindevproject/warnet-commander:latest", "args": additional_args, - "imagePullPolicy": "Never", "volumeMounts": [ { "name": "warnetjson", From 9f437e8bea99258d2c32c3661ee8b7b9c4e6f69f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 17:25:02 +0200 Subject: [PATCH 053/710] ci: run graph test --- .github/workflows/test.yml | 2 +- src/warnet/cli/k8s.py | 2 +- test/logging_test.py | 4 ---- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index caa22e57a..bef355556 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [graph_test.py, rpc_test.py, scenarios_test.py] + test: [graph_test.py, rpc_test.py, scenarios_test.py, graph_test.py] steps: - uses: actions/checkout@v4 - uses: hynek/setup-cached-uv@v1 diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 727ada6ee..ac35de212 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -90,7 +90,7 @@ def apply_kubernetes_yaml(yaml_file: str): def delete_namespace(namespace: str): - command = f"kubectl delete namespace {namespace}" + command = f"kubectl delete namespace {namespace} --ignore-not-found" return stream_command(command) diff --git a/test/logging_test.py b/test/logging_test.py index 2b7797970..b24826072 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -21,7 +21,6 @@ def __init__(self): self.connect_logging_logger = logging.getLogger("cnct_log") def run_test(self): - self.start_server() try: self.start_logging() self.setup_network() @@ -53,9 +52,6 @@ def start_logging(self): self.connect_logging_thread.daemon = True self.connect_logging_thread.start() - self.log.info("Waiting for RPC") - self.wait_for_rpc("scenarios_available") - def setup_network(self): self.log.info("Setting up network") self.log.info(self.warcli(f"network start {self.graph_file_path}")) From 9de24ac9b9aa829e893b0569b387969101781951 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 21 Aug 2024 17:32:44 +0200 Subject: [PATCH 054/710] bitcoin tanks now connect to each other --- networks/6_node_bitcoin/defaults.yaml | 24 ++------ networks/6_node_bitcoin/network.yaml | 27 ++++++++- .../charts/bitcoincore/templates/_helpers.tpl | 7 +-- .../bitcoincore/templates/configmap.yaml | 11 +++- .../charts/bitcoincore/templates/pod.yaml | 13 ++++- .../charts/bitcoincore/templates/service.yaml | 27 +++++++++ resources/charts/bitcoincore/values.yaml | 55 ++++++++++++------- src/warnet/cli/network2.py | 13 +++-- 8 files changed, 117 insertions(+), 60 deletions(-) create mode 100644 resources/charts/bitcoincore/templates/service.yaml diff --git a/networks/6_node_bitcoin/defaults.yaml b/networks/6_node_bitcoin/defaults.yaml index 438b33d8c..2ff2e554f 100644 --- a/networks/6_node_bitcoin/defaults.yaml +++ b/networks/6_node_bitcoin/defaults.yaml @@ -1,3 +1,5 @@ +chain: regtest + collectLogs: true metricsExport: true @@ -19,23 +21,5 @@ image: # Overrides the image tag whose default is the chart appVersion. tag: "27.0" -config: |2+ - regtest=1 - checkmempool=0 - acceptnonstdtxn=1 - debuglogfile=0 - logips=1 - logtimemicros=1 - capturemessages=1 - fallbackfee=0.00001000 - listen=1 - - [regtest] - rpcuser=user - rpcpassword=password - rpcport=18443 - rpcallowip=0.0.0.0/0 - rpcbind=0.0.0.0 - - zmqpubrawblock=tcp://0.0.0.0:28332 - zmqpubrawtx=tcp://0.0.0.0:28333 \ No newline at end of file +config: | + dns=1 \ No newline at end of file diff --git a/networks/6_node_bitcoin/network.yaml b/networks/6_node_bitcoin/network.yaml index c7c01095e..5269c6a5a 100644 --- a/networks/6_node_bitcoin/network.yaml +++ b/networks/6_node_bitcoin/network.yaml @@ -1,10 +1,31 @@ nodes: - name: tank-0001 - config: - image: - tag: "26.0" + image: + tag: "26.0" + connect: + - tank-0002 + - tank-0003 - name: tank-0002 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + connect: + - tank-0003 + - tank-0004 - name: tank-0003 + connect: + - tank-0004 + - tank-0005 - name: tank-0004 + connect: + - tank-0005 + - tank-0006 - name: tank-0005 + connect: + - tank-0006 + - tank-0007 - name: tank-0006 \ No newline at end of file diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index de333a17b..25622c4e8 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -14,12 +14,7 @@ If release name contains chart name it will be used as a full name. {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index 9f25c8e5e..ea21616f0 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -5,5 +5,12 @@ metadata: labels: {{- include "bitcoincore.labels" . | nindent 4 }} data: - bitcoin-conf: | - {{ tpl .Values.config . | nindent 2 }} \ No newline at end of file + bitcoin.conf: | + {{- if eq .Values.chain "regtest" }} + {{- tpl .Values.regtestConfig . | nindent 4 }} + {{- end }} + {{- .Values.baseConfig | nindent 4 }} + {{- .Values.config | nindent 4 }} + {{- range .Values.connect }} + {{- print "connect=" . | nindent 4}} + {{- end }} \ No newline at end of file diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 4b1b784e0..202ed4179 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -22,8 +22,17 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - name: http - containerPort: {{ .Values.service.port }} + - name: rpc + containerPort: {{ .Values.regtest.RPCPort }} + protocol: TCP + - name: p2p + containerPort: {{ .Values.regtest.P2PPort }} + protocol: TCP + - name: zmq-tx + containerPort: {{ .Values.regtest.ZMQTxPort }} + protocol: TCP + - name: zmq-block + containerPort: {{ .Values.regtest.ZMQBlockPort }} protocol: TCP livenessProbe: {{- toYaml .Values.livenessProbe | nindent 8 }} diff --git a/resources/charts/bitcoincore/templates/service.yaml b/resources/charts/bitcoincore/templates/service.yaml new file mode 100644 index 000000000..f2bb4fdf2 --- /dev/null +++ b/resources/charts/bitcoincore/templates/service.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "bitcoincore.fullname" . }} + labels: + {{- include "bitcoincore.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.regtest.RPCPort }} + targetPort: rpc + protocol: TCP + name: rpc + - port: {{ .Values.regtest.P2PPort }} + targetPort: p2p + protocol: TCP + name: p2p + - port: {{ .Values.regtest.ZMQTxPort }} + targetPort: zmq-tx + protocol: TCP + name: zmq-tx + - port: {{ .Values.regtest.ZMQBlockPort }} + targetPort: zmq-block + protocol: TCP + name: zmq-block + selector: + {{- include "bitcoincore.selectorLabels" . | nindent 4 }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index d1d875417..f9de5cb30 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -38,7 +38,12 @@ securityContext: {} service: type: ClusterIP - port: 80 + +regtest: + RPCPort: 18443 + P2PPort: 18444 + ZMQTxPort: 28333 + ZMQBlockPort: 28332 ingress: enabled: false @@ -107,23 +112,31 @@ tolerations: [] affinity: {} -config: |2+ - regtest=1 - checkmempool=0 - acceptnonstdtxn=1 - debuglogfile=0 - logips=1 - logtimemicros=1 - capturemessages=1 - fallbackfee=0.00001000 - listen=1 - - [regtest] - rpcuser=user - rpcpassword=password - rpcport=18443 - rpcallowip=0.0.0.0/0 - rpcbind=0.0.0.0 - - zmqpubrawblock=tcp://0.0.0.0:28332 - zmqpubrawtx=tcp://0.0.0.0:28333 \ No newline at end of file +chain: regtest + +regtestConfig: | + regtest=1 + + [regtest] + rpcuser=user + rpcpassword=password + rpcport=18443 + rpcallowip=0.0.0.0/0 + rpcbind=0.0.0.0 + +baseConfig: | + checkmempool=0 + acceptnonstdtxn=1 + debuglogfile=0 + logips=1 + logtimemicros=1 + capturemessages=1 + fallbackfee=0.00001000 + listen=1 + + zmqpubrawblock=tcp://0.0.0.0:28332 + zmqpubrawtx=tcp://0.0.0.0:28333 + +config: "" + +connect: [] \ No newline at end of file diff --git a/src/warnet/cli/network2.py b/src/warnet/cli/network2.py index 8a439c618..011026de0 100644 --- a/src/warnet/cli/network2.py +++ b/src/warnet/cli/network2.py @@ -5,7 +5,7 @@ import click import yaml -from .process import run_command +from .process import stream_command NETWORK_DIR = Path("networks") DEFAULT_NETWORK = "6_node_bitcoin" @@ -40,7 +40,8 @@ def start2(network_name: str, logging: bool, network: str): try: temp_override_file_path = "" node_name = node.get("name") - node_config_override = node.get("config") + # all the keys apart from name + node_config_override = {k: v for k, v in node.items() if k != "name"} cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {NAMESPACE} -f {defaults_file_path}" @@ -52,12 +53,12 @@ def start2(network_name: str, logging: bool, network: str): temp_override_file_path = temp_file.name cmd = f"{cmd} -f {temp_override_file_path}" - if not run_command(cmd, stream_output=True): + if not stream_command(cmd): print(f"Failed to run Helm command: {cmd}") return except Exception as e: print(f"Error: {e}") return - finally: - if temp_override_file_path: - Path(temp_override_file_path).unlink() + # finally: + # if temp_override_file_path: + # Path(temp_override_file_path).unlink() From 5fc15f67fca32b65199f45b20fb1728160c8a5e0 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 17:30:24 +0200 Subject: [PATCH 055/710] ci: run dag connection test --- .github/workflows/test.yml | 2 +- test/dag_connection_test.py | 1 - test/data/scenario_connect_dag.py | 5 +++-- test/test_base.py | 12 ++++++++---- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bef355556..27bdc5947 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [graph_test.py, rpc_test.py, scenarios_test.py, graph_test.py] + test: [scenarios_test.py, rpc_test.py, graph_test.py, dag_connection_test.py, logging_test.py] steps: - uses: actions/checkout@v4 - uses: hynek/setup-cached-uv@v1 diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 32c2ccc8c..0957ce14f 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -14,7 +14,6 @@ def __init__(self): ) def run_test(self): - self.start_server() try: self.setup_network() self.run_connect_dag_scenario() diff --git a/test/data/scenario_connect_dag.py b/test/data/scenario_connect_dag.py index f0565f9b7..b469b92c4 100644 --- a/test/data/scenario_connect_dag.py +++ b/test/data/scenario_connect_dag.py @@ -4,7 +4,8 @@ from enum import Enum, auto, unique from time import sleep -from warnet.test_framework_bridge import WarnetTestFramework +# The base class exists inside the commander container +from commander import Commander def cli_help(): @@ -17,7 +18,7 @@ class ConnectionType(Enum): DNS = auto() -class ConnectDag(WarnetTestFramework): +class ConnectDag(Commander): def set_test_params(self): # This is just a minimum self.num_nodes = 10 diff --git a/test/test_base.py b/test/test_base.py index 895c7032a..861231a9e 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -13,7 +13,7 @@ from warnet import SRC_DIR from warnet.cli.network import _status as network_status from warnet.cli.network import _connected as network_connected - +from warnet.cli.scenarios import _active as scenarios_active class TestBase: def __init__(self): @@ -128,9 +128,13 @@ def wait_for_all_edges(self, timeout=20 * 60, interval=5): def wait_for_all_scenarios(self): def check_scenarios(): - scns = self.rpc("scenarios_list_running") - return all(not scn["active"] for scn in scns) - + scns = scenarios_active() + if len(scns) == 0: + return True + for s in scns: + if s["status"] != "succeeded": + return False + return True self.wait_for_predicate(check_scenarios) def get_scenario_return_code(self, scenario_name): From 93dc8d54ac093262a960855ea25d6b72eaf16ac1 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 17:44:57 +0200 Subject: [PATCH 056/710] remove more stuff --- .github/workflows/test.yml | 2 +- src/warnet/test_framework_bridge.py | 406 ----------------------- src/warnet/utils.py | 479 ---------------------------- 3 files changed, 1 insertion(+), 886 deletions(-) delete mode 100644 src/warnet/test_framework_bridge.py delete mode 100644 src/warnet/utils.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 27bdc5947..4bcf5bad8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [scenarios_test.py, rpc_test.py, graph_test.py, dag_connection_test.py, logging_test.py] + test: [scenarios_test.py, rpc_test.py, graph_test.py, logging_test.py] steps: - uses: actions/checkout@v4 - uses: hynek/setup-cached-uv@v1 diff --git a/src/warnet/test_framework_bridge.py b/src/warnet/test_framework_bridge.py deleted file mode 100644 index 7872284d2..000000000 --- a/src/warnet/test_framework_bridge.py +++ /dev/null @@ -1,406 +0,0 @@ -import argparse -import configparser -import ipaddress -import logging -import os -import pathlib -import random -import signal -import sys -import tempfile - -from test_framework.authproxy import AuthServiceProxy -from test_framework.p2p import NetworkThread -from test_framework.test_framework import ( - TMPDIR_PREFIX, - BitcoinTestFramework, - TestStatus, -) -from test_framework.test_node import TestNode -from test_framework.util import PortSeed, get_rpc_proxy - -from .warnet import Warnet - - -# Ensure that all RPC calls are made with brand new http connections -def auth_proxy_request(self, method, path, postdata): - self._set_conn() # creates new http client connection - return self.oldrequest(method, path, postdata) - - -AuthServiceProxy.oldrequest = AuthServiceProxy._request -AuthServiceProxy._request = auth_proxy_request - - -class WarnetTestFramework(BitcoinTestFramework): - def set_test_params(self): - pass - - def run_test(self): - pass - - def handle_sigterm(self, signum, frame): - print("SIGTERM received, stopping...") - self.shutdown() - sys.exit(0) - - # The following functions are chopped-up hacks of - # the original methods from BitcoinTestFramework - - def setup(self): - signal.signal(signal.SIGTERM, self.handle_sigterm) - - # Must setup warnet first to avoid double formatting - self.warnet = Warnet.from_network(self.options.network) - # hacked from _start_logging() - # Scenarios will log plain messages to stdout only, which will can redirected by warnet - self.log = logging.getLogger(self.__class__.__name__) - self.log.setLevel(logging.INFO) # set this to DEBUG to see ALL RPC CALLS - - # Because scenarios run in their own subprocess, the logger here - # is not the same as the warnet server or other global loggers. - # Scenarios log directly to stdout which gets picked up by the - # subprocess manager in the server, and reprinted to the global log. - ch = logging.StreamHandler(sys.stdout) - formatter = logging.Formatter(fmt="%(name)-8s %(message)s") - ch.setFormatter(formatter) - self.log.addHandler(ch) - - for i, tank in enumerate(self.warnet.tanks): - ip = tank.ipv4 - self.log.info(f"Adding TestNode {i} from tank {tank.index} with IP {ip}") - node = TestNode( - i, - pathlib.Path(), # datadir path - chain=tank.bitcoin_network, - rpchost=ip, - timewait=60, - timeout_factor=self.options.timeout_factor, - bitcoind=None, - bitcoin_cli=None, - cwd=self.options.tmpdir, - coverage_dir=self.options.coveragedir, - ) - node.rpc = get_rpc_proxy( - f"http://{tank.rpc_user}:{tank.rpc_password}@{ip}:{tank.rpc_port}", - i, - timeout=60, - coveragedir=self.options.coveragedir, - ) - node.rpc_connected = True - self.nodes.append(node) - - self.num_nodes = len(self.nodes) - - # Set up temp directory and start logging - if self.options.tmpdir: - self.options.tmpdir = os.path.abspath(self.options.tmpdir) - os.makedirs(self.options.tmpdir, exist_ok=False) - else: - self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) - - # self.options.cachedir = os.path.abspath(self.options.cachedir) - - # config = self.config - - # self.set_binary_paths() - - # os.environ['PATH'] = os.pathsep.join([ - # os.path.join(config['environment']['BUILDDIR'], 'src'), - # os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH'] - # ]) - - # Set up temp directory and start logging - # if self.options.tmpdir: - # self.options.tmpdir = os.path.abspath(self.options.tmpdir) - # os.makedirs(self.options.tmpdir, exist_ok=False) - # else: - # self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) - # self._start_logging() - - # Seed the PRNG. Note that test runs are reproducible if and only if - # a single thread accesses the PRNG. For more information, see - # https://docs.python.org/3/library/random.html#notes-on-reproducibility. - # The network thread shouldn't access random. If we need to change the - # network thread to access randomness, it should instantiate its own - # random.Random object. - seed = self.options.randomseed - - if seed is None: - seed = random.randrange(sys.maxsize) - else: - self.log.info(f"User supplied random seed {seed}") - - random.seed(seed) - self.log.info(f"PRNG seed is: {seed}") - - self.log.debug("Setting up network thread") - self.network_thread = NetworkThread() - self.network_thread.start() - - # if self.options.usecli: - # if not self.supports_cli: - # raise SkipTest("--usecli specified but test does not support using CLI") - # self.skip_if_no_cli() - # self.skip_test_if_missing_module() - # self.setup_chain() - # self.setup_network() - - self.success = TestStatus.PASSED - - def parse_args(self): - previous_releases_path = "" - parser = argparse.ArgumentParser(usage="%(prog)s [options]") - parser.add_argument( - "--nocleanup", - dest="nocleanup", - default=False, - action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error", - ) - parser.add_argument( - "--nosandbox", - dest="nosandbox", - default=False, - action="store_true", - help="Don't use the syscall sandbox", - ) - parser.add_argument( - "--noshutdown", - dest="noshutdown", - default=False, - action="store_true", - help="Don't stop bitcoinds after the test execution", - ) - parser.add_argument( - "--cachedir", - dest="cachedir", - default=None, - help="Directory for caching pregenerated datadirs (default: %(default)s)", - ) - parser.add_argument( - "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" - ) - parser.add_argument( - "-l", - "--loglevel", - dest="loglevel", - default="DEBUG", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", - ) - parser.add_argument( - "--tracerpc", - dest="trace_rpc", - default=False, - action="store_true", - help="Print out all RPC calls as they are made", - ) - parser.add_argument( - "--portseed", - dest="port_seed", - default=0, - help="The seed to use for assigning port numbers (default: current process id)", - ) - parser.add_argument( - "--previous-releases", - dest="prev_releases", - default=None, - action="store_true", - help="Force test of previous releases (default: %(default)s)", - ) - parser.add_argument( - "--coveragedir", - dest="coveragedir", - default=None, - help="Write tested RPC commands into this directory", - ) - parser.add_argument( - "--configfile", - dest="configfile", - default=None, - help="Location of the test framework config file (default: %(default)s)", - ) - parser.add_argument( - "--pdbonfailure", - dest="pdbonfailure", - default=False, - action="store_true", - help="Attach a python debugger if test fails", - ) - parser.add_argument( - "--usecli", - dest="usecli", - default=False, - action="store_true", - help="use bitcoin-cli instead of RPC for all commands", - ) - parser.add_argument( - "--perf", - dest="perf", - default=False, - action="store_true", - help="profile running nodes with perf for the duration of the test", - ) - parser.add_argument( - "--valgrind", - dest="valgrind", - default=False, - action="store_true", - help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", - ) - parser.add_argument( - "--randomseed", - default=0x7761726E6574, # "warnet" ascii - help="set a random seed for deterministically reproducing a previous test run", - ) - parser.add_argument( - "--timeout-factor", - dest="timeout_factor", - default=1, - help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", - ) - parser.add_argument( - "--network", - dest="network", - default="warnet", - help="Designate which warnet this should run on (default: warnet)", - ) - parser.add_argument( - "--v2transport", - dest="v2transport", - default=False, - action="store_true", - help="use BIP324 v2 connections between all nodes by default", - ) - - self.add_options(parser) - # Running TestShell in a Jupyter notebook causes an additional -f argument - # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument - # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 - parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") - self.options = parser.parse_args() - if self.options.timeout_factor == 0: - self.options.timeout_factor = 99999 - self.options.timeout_factor = self.options.timeout_factor or ( - 4 if self.options.valgrind else 1 - ) - self.options.previous_releases_path = previous_releases_path - config = configparser.ConfigParser() - if self.options.configfile is not None: - with open(self.options.configfile) as f: - config.read_file(f) - - config["environment"] = {"PACKAGE_BUGREPORT": ""} - - self.config = config - - if "descriptors" not in self.options: - # Wallet is not required by the test at all and the value of self.options.descriptors won't matter. - # It still needs to exist and be None in order for tests to work however. - # So set it to None to force -disablewallet, because the wallet is not needed. - self.options.descriptors = None - elif self.options.descriptors is None: - # Some wallet is either required or optionally used by the test. - # Prefer SQLite unless it isn't available - if self.is_sqlite_compiled(): - self.options.descriptors = True - elif self.is_bdb_compiled(): - self.options.descriptors = False - else: - # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter - # It still needs to exist and be None in order for tests to work however. - # So set it to None, which will also set -disablewallet. - self.options.descriptors = None - - PortSeed.n = self.options.port_seed - - def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool = True): - """ - Kwargs: - wait_for_connect: if True, block until the nodes are verified as connected. You might - want to disable this when using -stopatheight with one of the connected nodes, - since there will be a race between the actual connection and performing - the assertions before one node shuts down. - """ - from_connection = self.nodes[a] - to_connection = self.nodes[b] - - to_ip_port = self.warnet.tanks[b].get_dns_addr() - from_ip_port = self.warnet.tanks[a].get_ip_addr() - - if peer_advertises_v2 is None: - peer_advertises_v2 = self.options.v2transport - - if peer_advertises_v2: - from_connection.addnode(node=to_ip_port, command="onetry", v2transport=True) - else: - # skip the optional third argument (default false) for - # compatibility with older clients - from_connection.addnode(to_ip_port, "onetry") - - if not wait_for_connect: - return - - def get_peer_ip(peer): - try: # we encounter a regular ip address - ip_addr = str(ipaddress.ip_address(peer["addr"].split(":")[0])) - return ip_addr - except ValueError as err: # or we encounter a service name - try: - # NETWORK-tank-TANK_INDEX-service - # NETWORK-test-TEST-tank-TANK_INDEX-service - tank_index = int(peer["addr"].split("-")[-2]) - except (ValueError, IndexError) as inner_err: - raise ValueError( - "could not derive tank index from service name: {} {}".format( - peer["addr"], inner_err - ) - ) from err - - ip_addr = self.warnet.tanks[tank_index].get_ip_addr() - return ip_addr - - # poll until version handshake complete to avoid race conditions - # with transaction relaying - # See comments in net_processing: - # * Must have a version message before anything else - # * Must have a verack message before anything else - self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["version"] != 0 - for peer in from_connection.getpeerinfo() - ) - ) - self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port and peer["version"] != 0 - for peer in to_connection.getpeerinfo() - ) - ) - self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 - for peer in from_connection.getpeerinfo() - ) - ) - self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port - and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 - for peer in to_connection.getpeerinfo() - ) - ) - # The message bytes are counted before processing the message, so make - # sure it was fully processed by waiting for a ping. - self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 - for peer in from_connection.getpeerinfo() - ) - ) - self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 - for peer in to_connection.getpeerinfo() - ) - ) diff --git a/src/warnet/utils.py b/src/warnet/utils.py deleted file mode 100644 index 23dad566b..000000000 --- a/src/warnet/utils.py +++ /dev/null @@ -1,479 +0,0 @@ -import functools -import ipaddress -import json -import logging -import os -import random -import re -import stat -import subprocess -import sys -import time -from io import BytesIO -from pathlib import Path - -import networkx as nx -from jsonschema import validate -from test_framework.messages import ser_uint256 -from test_framework.p2p import MESSAGEMAP -from warnet import SRC_DIR - -logger = logging.getLogger("utils") - - -SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] -DEFAULT_TAG = SUPPORTED_TAGS[0] -WEIGHTED_TAGS = [ - tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) -] - - -class NonErrorFilter(logging.Filter): - def filter(self, record: logging.LogRecord) -> bool | logging.LogRecord: - return record.levelno <= logging.INFO - - -def exponential_backoff(max_retries=5, base_delay=1, max_delay=32): - """ - A decorator for exponential backoff. - - Parameters: - - max_retries: Maximum number of retries before giving up. - - base_delay: Initial delay in seconds. - - max_delay: Maximum delay in seconds. - """ - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - retries = 0 - while retries < max_retries: - try: - return func(*args, **kwargs) - except Exception as e: - error_msg = str(e).replace("\n", " ").replace("\t", " ") - logger.error(f"rpc error: {error_msg}") - retries += 1 - if retries == max_retries: - raise e - delay = min(base_delay * (2**retries), max_delay) - logger.warning(f"exponential_backoff: retry in {delay} seconds...") - time.sleep(delay) - - return wrapper - - return decorator - - -def handle_json(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = "" - try: - result = func(*args, **kwargs) - logger.debug(f"{result=:}") - if isinstance(result, dict): - return result - parsed_result = json.loads(result) - return parsed_result - except json.JSONDecodeError as e: - logging.error( - f"JSON parsing error in {func.__name__}: {e}. Undecodable result: {result}" - ) - raise - except Exception as e: - logger.error(f"Error in {func.__name__}: {e}") - raise - - return wrapper - - -def get_architecture(): - """ - Get the architecture of the machine. - :return: The architecture of the machine or None if an error occurred - """ - result = subprocess.run(["uname", "-m"], stdout=subprocess.PIPE) - arch = result.stdout.decode("utf-8").strip() - if arch == "x86_64": - arch = "amd64" - if arch is None: - raise Exception("Failed to detect architecture.") - return arch - - -def generate_ipv4_addr(subnet): - """ - Generate a valid random IPv4 address within the given subnet. - - :param subnet: Subnet in CIDR notation (e.g., '100.0.0.0/8') - :return: Random IP address within the subnet - """ - reserved_ips = [ - "0.0.0.0/8", - "10.0.0.0/8", - "100.64.0.0/10", - "127.0.0.0/8", - "169.254.0.0/16", - "172.16.0.0/12", - "192.0.0.0/24", - "192.0.2.0/24", - "192.88.99.0/24", - "192.168.0.0/16", - "198.18.0.0/15", - "198.51.100.0/24", - "203.0.113.0/24", - "224.0.0.0/4", - ] - - def is_public(ip): - for reserved in reserved_ips: - if ipaddress.ip_address(ip) in ipaddress.ip_network(reserved, strict=False): - return False - return True - - network = ipaddress.ip_network(subnet, strict=False) - - # Generate a random IP within the subnet range - while True: - ip_int = random.randint(int(network.network_address), int(network.broadcast_address)) - ip_str = str(ipaddress.ip_address(ip_int)) - if is_public(ip_str): - return ip_str - - -def sanitize_tc_netem_command(command: str) -> bool: - """ - Sanitize the tc-netem command to ensure it's valid and safe to execute, as we run it as root on a container. - - Args: - - command (str): The tc-netem command to sanitize. - - Returns: - - bool: True if the command is valid and safe, False otherwise. - """ - if not command.startswith("tc qdisc add dev eth0 root netem"): - return False - - tokens = command.split()[7:] # Skip the prefix - - # Valid tc-netem parameters and their patterns - valid_params = { - "delay": r"^\d+ms(\s\d+ms)?(\sdistribution\s(normal|pareto|paretonormal|uniform))?$", - "loss": r"^\d+(\.\d+)?%$", - "duplicate": r"^\d+(\.\d+)?%$", - "corrupt": r"^\d+(\.\d+)?%$", - "reorder": r"^\d+(\.\d+)?%\s\d+(\.\d+)?%$", - "rate": r"^\d+(kbit|mbit|gbit)$", - } - - # Validate each param - i = 0 - while i < len(tokens): - param = tokens[i] - if param not in valid_params: - return False - i += 1 - value_tokens = [] - while i < len(tokens) and tokens[i] not in valid_params: - value_tokens.append(tokens[i]) - i += 1 - value = " ".join(value_tokens) - if not re.match(valid_params[param], value): - return False - - return True - - -def parse_bitcoin_conf(file_content): - """ - Custom parser for INI-style bitcoin.conf - - Args: - - file_content (str): The content of the INI-style file. - - Returns: - - dict: A dictionary representation of the file content. - Key-value pairs are stored as tuples so one key may have - multiple values. Sections are represented as arrays of these tuples. - """ - current_section = None - result = {current_section: []} - - for line in file_content.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - - if line.startswith("[") and line.endswith("]"): - current_section = line[1:-1] - result[current_section] = [] - elif "=" in line: - key, value = line.split("=", 1) - result[current_section].append((key.strip(), value.strip())) - - return result - - -def dump_bitcoin_conf(conf_dict, for_graph=False): - """ - Converts a dictionary representation of bitcoin.conf content back to INI-style string. - - Args: - - conf_dict (dict): A dictionary representation of the file content. - - Returns: - - str: The INI-style string representation of the input dictionary. - """ - result = [] - - # Print global section at the top first - values = conf_dict[None] - for sub_key, sub_value in values: - result.append(f"{sub_key}={sub_value}") - - # Then print any named subsections - for section, values in conf_dict.items(): - if section is not None: - result.append(f"\n[{section}]") - else: - continue - for sub_key, sub_value in values: - result.append(f"{sub_key}={sub_value}") - - if for_graph: - return ",".join(result) - - # Terminate file with newline - return "\n".join(result) + "\n" - - -def to_jsonable(obj): - HASH_INTS = [ - "blockhash", - "block_hash", - "hash", - "hashMerkleRoot", - "hashPrevBlock", - "hashstop", - "prev_header", - "sha256", - "stop_hash", - ] - - HASH_INT_VECTORS = [ - "hashes", - "headers", - "vHave", - "vHash", - ] - - if hasattr(obj, "__dict__"): - return obj.__dict__ - elif hasattr(obj, "__slots__"): - ret = {} # type: Any - for slot in obj.__slots__: - val = getattr(obj, slot, None) - if slot in HASH_INTS and isinstance(val, int): - ret[slot] = ser_uint256(val).hex() - elif slot in HASH_INT_VECTORS and all(isinstance(a, int) for a in val): - ret[slot] = [ser_uint256(a).hex() for a in val] - else: - ret[slot] = to_jsonable(val) - return ret - elif isinstance(obj, list): - return [to_jsonable(a) for a in obj] - elif isinstance(obj, bytes): - return obj.hex() - else: - return obj - - -# This function is a hacked-up copy of process_file() from -# Bitcoin Core contrib/message-capture/message-capture-parser.py -def parse_raw_messages(blob, outbound): - TIME_SIZE = 8 - LENGTH_SIZE = 4 - MSGTYPE_SIZE = 12 - - messages = [] - offset = 0 - while True: - # Read the Header - header_len = TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE - tmp_header_raw = blob[offset : offset + header_len] - - offset = offset + header_len - if not tmp_header_raw: - break - tmp_header = BytesIO(tmp_header_raw) - time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int - msgtype = tmp_header.read(MSGTYPE_SIZE).split(b"\x00", 1)[0] # type: bytes - length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int - - # Start converting the message to a dictionary - msg_dict = {} - msg_dict["outbound"] = outbound - msg_dict["time"] = time - msg_dict["size"] = length # "size" is less readable here, but more readable in the output - - msg_ser = BytesIO(blob[offset : offset + length]) - offset = offset + length - - # Determine message type - if msgtype not in MESSAGEMAP: - # Unrecognized message type - try: - msgtype_tmp = msgtype.decode() - if not msgtype_tmp.isprintable(): - raise UnicodeDecodeError - msg_dict["msgtype"] = msgtype_tmp - except UnicodeDecodeError: - msg_dict["msgtype"] = "UNREADABLE" - msg_dict["body"] = msg_ser.read().hex() - msg_dict["error"] = "Unrecognized message type." - messages.append(msg_dict) - print(f"WARNING - Unrecognized message type {msgtype}", file=sys.stderr) - continue - - # Deserialize the message - msg = MESSAGEMAP[msgtype]() - msg_dict["msgtype"] = msgtype.decode() - - try: - msg.deserialize(msg_ser) - except KeyboardInterrupt: - raise - except Exception: - # Unable to deserialize message body - msg_ser.seek(0, os.SEEK_SET) - msg_dict["body"] = msg_ser.read().hex() - msg_dict["error"] = "Unable to deserialize message." - messages.append(msg_dict) - print("WARNING - Unable to deserialize message", file=sys.stderr) - continue - - # Convert body of message into a jsonable object - if length: - msg_dict["body"] = to_jsonable(msg) - messages.append(msg_dict) - return messages - - -def gen_config_dir(network: str) -> Path: - """ - Determine a config dir based on network name - """ - config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.warnet")) - config_dir = Path(config_dir) / "warnet" / network - return config_dir - - -def remove_version_prefix(version_str): - if version_str.startswith("0."): - return version_str[2:] - return version_str - - -def set_execute_permission(file_path): - current_permissions = os.stat(file_path).st_mode - os.chmod(file_path, current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - - -def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): - try: - # Use nx.MultiDiGraph() so we get directed edges (source->target) - # and still allow parallel edges (L1 p2p connections + LN channels) - graph = nx.generators.cycle_graph(n, nx.MultiDiGraph()) - except TypeError as e: - msg = f"Failed to create graph: {e}" - logger.error(msg) - return msg - - # Graph is a simply cycle graph with all nodes connected in a loop, including both ends. - # Ensure each node has at least 8 outbound connections by making 7 more outbound connections - for src_node in graph.nodes(): - logger.debug(f"Creating additional connections for node {src_node}") - for _ in range(8): - # Choose a random node to connect to - # Make sure it's not the same node and they aren't already connected in either direction - potential_nodes = [ - dst_node - for dst_node in range(n) - if dst_node != src_node - and not graph.has_edge(dst_node, src_node) - and not graph.has_edge(src_node, dst_node) - ] - if potential_nodes: - chosen_node = random.choice(potential_nodes) - graph.add_edge(src_node, chosen_node) - logger.debug(f"Added edge: {src_node}:{chosen_node}") - logger.debug(f"Node {src_node} edges: {graph.edges(src_node)}") - - # parse and process conf file - conf_contents = "" - if bitcoin_conf is not None: - conf = Path(bitcoin_conf) - if conf.is_file(): - with open(conf) as f: - # parse INI style conf then dump using for_graph - conf_dict = parse_bitcoin_conf(f.read()) - conf_contents = dump_bitcoin_conf(conf_dict, for_graph=True) - - # populate our custom fields - for i, node in enumerate(graph.nodes()): - if random_version: - graph.nodes[node]["version"] = random.choice(WEIGHTED_TAGS) - else: - # One node demoing the image tag - if i == 1: - graph.nodes[node]["image"] = f"bitcoindevproject/bitcoin:{version}" - else: - graph.nodes[node]["version"] = version - graph.nodes[node]["bitcoin_config"] = conf_contents - graph.nodes[node]["tc_netem"] = "" - graph.nodes[node]["build_args"] = "" - graph.nodes[node]["exporter"] = False - graph.nodes[node]["collect_logs"] = False - - convert_unsupported_attributes(graph) - return graph - - -def convert_unsupported_attributes(graph: nx.Graph): - # Sometimes networkx complains about invalid types when writing the graph - # (it just generated itself!). Try to convert them here just in case. - for _, node_data in graph.nodes(data=True): - for key, value in node_data.items(): - if isinstance(value, set): - node_data[key] = list(value) - elif isinstance(value, int | float | str): - continue - else: - node_data[key] = str(value) - - for _, _, edge_data in graph.edges(data=True): - for key, value in edge_data.items(): - if isinstance(value, set): - edge_data[key] = list(value) - elif isinstance(value, int | float | str): - continue - else: - edge_data[key] = str(value) - - -def load_schema(): - with open(SRC_DIR / "graph_schema.json") as schema_file: - return json.load(schema_file) - - -def validate_graph_schema(graph: nx.Graph): - """ - Validate a networkx.Graph against the node schema - """ - graph_schema = load_schema() - validate(instance=graph.graph, schema=graph_schema["graph"]) - for n in list(graph.nodes): - validate(instance=graph.nodes[n], schema=graph_schema["node"]) - for e in list(graph.edges): - validate(instance=graph.edges[e], schema=graph_schema["edge"]) From 35a11a7b19051fe97ce763e9639196ffc4e8c08f Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 21 Aug 2024 17:06:37 +0200 Subject: [PATCH 057/710] add battalions chart add chart for deploying a battalion to warnet. a battalion is a collection of users in a namespace with roles assigned to the users that allow them to manage resources in their namespace only. after the battalion namespace is created, the bitcoincore chart can be used to deploy tanks to the battalion. --- resources/charts/battalions/Chart.yaml | 6 ++++++ .../battalions/templates/namespace.yaml | 4 ++++ .../charts/battalions/templates/role.yaml | 10 ++++++++++ .../battalions/templates/rolebinding.yaml | 18 ++++++++++++++++++ .../battalions/templates/serviceaccount.yaml | 11 +++++++++++ resources/charts/battalions/values.yaml | 19 +++++++++++++++++++ .../warnet-battalion-00-values.yaml | 19 +++++++++++++++++++ .../warnet-battalion-01-values.yaml | 19 +++++++++++++++++++ 8 files changed, 106 insertions(+) create mode 100644 resources/charts/battalions/Chart.yaml create mode 100644 resources/charts/battalions/templates/namespace.yaml create mode 100644 resources/charts/battalions/templates/role.yaml create mode 100644 resources/charts/battalions/templates/rolebinding.yaml create mode 100644 resources/charts/battalions/templates/serviceaccount.yaml create mode 100644 resources/charts/battalions/values.yaml create mode 100644 resources/charts/battalions/warnet-battalion-00-values.yaml create mode 100644 resources/charts/battalions/warnet-battalion-01-values.yaml diff --git a/resources/charts/battalions/Chart.yaml b/resources/charts/battalions/Chart.yaml new file mode 100644 index 000000000..d023219e0 --- /dev/null +++ b/resources/charts/battalions/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: battalion-namespace +description: A Helm chart for creating a battalion namespace +type: application +version: 0.1.0 +appVersion: "1.0.0" diff --git a/resources/charts/battalions/templates/namespace.yaml b/resources/charts/battalions/templates/namespace.yaml new file mode 100644 index 000000000..5e01eebed --- /dev/null +++ b/resources/charts/battalions/templates/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespaceName | default .Release.Name }} diff --git a/resources/charts/battalions/templates/role.yaml b/resources/charts/battalions/templates/role.yaml new file mode 100644 index 000000000..8d344cbb6 --- /dev/null +++ b/resources/charts/battalions/templates/role.yaml @@ -0,0 +1,10 @@ +{{- range .Values.roles }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .name }} + namespace: {{ $.Values.namespaceName | default $.Release.Name }} +rules: +{{ toYaml .rules | indent 2 }} +{{- end }} diff --git a/resources/charts/battalions/templates/rolebinding.yaml b/resources/charts/battalions/templates/rolebinding.yaml new file mode 100644 index 000000000..5a25d4cc4 --- /dev/null +++ b/resources/charts/battalions/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- range $user := .Values.users }} +{{- range $role := $user.roles }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $.Release.Name }}-{{ $role }}-{{ $user.name }} + namespace: {{ $.Values.namespaceName | default $.Release.Name }} +subjects: +- kind: ServiceAccount + name: {{ $user.name }} + namespace: {{ $.Values.namespaceName | default $.Release.Name }} +roleRef: + kind: Role + name: {{ $role }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/resources/charts/battalions/templates/serviceaccount.yaml b/resources/charts/battalions/templates/serviceaccount.yaml new file mode 100644 index 000000000..d9d906eee --- /dev/null +++ b/resources/charts/battalions/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- range .Values.users }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .name }} + namespace: {{ $.Values.namespaceName | default $.Release.Name }} + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-weight: "-5" +{{- end }} diff --git a/resources/charts/battalions/values.yaml b/resources/charts/battalions/values.yaml new file mode 100644 index 000000000..b2a2d24f5 --- /dev/null +++ b/resources/charts/battalions/values.yaml @@ -0,0 +1,19 @@ +users: + - name: alice + roles: + - pod-viewer + - name: bob + roles: + - pod-viewer + - pod-manager +roles: + - name: pod-viewer + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - name: pod-manager + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "delete"] diff --git a/resources/charts/battalions/warnet-battalion-00-values.yaml b/resources/charts/battalions/warnet-battalion-00-values.yaml new file mode 100644 index 000000000..b2a2d24f5 --- /dev/null +++ b/resources/charts/battalions/warnet-battalion-00-values.yaml @@ -0,0 +1,19 @@ +users: + - name: alice + roles: + - pod-viewer + - name: bob + roles: + - pod-viewer + - pod-manager +roles: + - name: pod-viewer + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - name: pod-manager + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "delete"] diff --git a/resources/charts/battalions/warnet-battalion-01-values.yaml b/resources/charts/battalions/warnet-battalion-01-values.yaml new file mode 100644 index 000000000..b2a2d24f5 --- /dev/null +++ b/resources/charts/battalions/warnet-battalion-01-values.yaml @@ -0,0 +1,19 @@ +users: + - name: alice + roles: + - pod-viewer + - name: bob + roles: + - pod-viewer + - pod-manager +roles: + - name: pod-viewer + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - name: pod-manager + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "delete"] From 5803fc392b10a07ff844c21a9b59c18cef23cfa6 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 21 Aug 2024 17:07:54 +0200 Subject: [PATCH 058/710] add helper script use credentials add script for generating a token for each user in a battalion. this token is used to create a kubeconfig. the kubeconfigs are placed in a directory and can then be given to the users. --- resources/scripts/setup_user_contexts.sh | 86 ++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100755 resources/scripts/setup_user_contexts.sh diff --git a/resources/scripts/setup_user_contexts.sh b/resources/scripts/setup_user_contexts.sh new file mode 100755 index 000000000..8d1a06eec --- /dev/null +++ b/resources/scripts/setup_user_contexts.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Function to check if a command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +# Function to display usage information +usage() { + echo "Usage: $0 [kubeconfig_directory] [token_duration]" + echo " namespace: The Kubernetes namespace" + echo " kubeconfig_directory: Directory to store kubeconfig files (default: kubeconfigs)" + echo " token_duration: Duration of the token in seconds (default: 600 seconds / 10 minutes)" + exit 1 +} + +# Check for required commands +if ! command_exists kubectl; then + echo "kubectl is not installed. Please install it and try again." + exit 1 +fi + +# Check if namespace argument is provided +if [ $# -eq 0 ]; then + usage +fi + +NAMESPACE=$1 +KUBECONFIG_DIR=${2:-"kubeconfigs"} +TOKEN_DURATION=${3:-600} + +CLUSTER_NAME=$(kubectl config view --minify -o jsonpath='{.clusters[0].name}') +CLUSTER_SERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') +CLUSTER_CA=$(kubectl config view --minify --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}') + +# Create the directory to store the kubeconfig files +mkdir -p "$KUBECONFIG_DIR" + +# Get all ServiceAccounts in the namespace +SERVICE_ACCOUNTS=$(kubectl get serviceaccounts -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}') + +for SA in $SERVICE_ACCOUNTS; do + echo "Processing ServiceAccount: $SA" + + # Create a token for the ServiceAccount with specified duration + TOKEN=$(kubectl create token $SA -n $NAMESPACE --duration="${TOKEN_DURATION}s") + + if [ -z "$TOKEN" ]; then + echo "Failed to create token for ServiceAccount $SA. Skipping..." + continue + fi + + # Create a kubeconfig file for the user + KUBECONFIG_FILE="$KUBECONFIG_DIR/${SA}-${NAMESPACE}-kubeconfig" + + cat << EOF > "$KUBECONFIG_FILE" +apiVersion: v1 +kind: Config +clusters: +- name: ${CLUSTER_NAME} + cluster: + server: ${CLUSTER_SERVER} + certificate-authority-data: ${CLUSTER_CA} +users: +- name: ${SA} + user: + token: ${TOKEN} +contexts: +- name: ${SA}-${NAMESPACE} + context: + cluster: ${CLUSTER_NAME} + namespace: ${NAMESPACE} + user: ${SA} +current-context: ${SA}-${NAMESPACE} +EOF + + echo "Created kubeconfig file for $SA: $KUBECONFIG_FILE" + echo "Token duration: ${TOKEN_DURATION} seconds" + echo "To use this config, run: kubectl --kubeconfig=$KUBECONFIG_FILE get pods" + echo "---" +done + +echo "All kubeconfig files have been created in the '$KUBECONFIG_DIR' directory." +echo "Distribute these files to the respective users." +echo "Users can then use them with kubectl by specifying the --kubeconfig flag or by setting the KUBECONFIG environment variable." +echo "Note: The tokens will expire after ${TOKEN_DURATION} seconds." From bfa3654ffedd9de168b8b77957a31cb4b93f6bd3 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 17:50:23 +0200 Subject: [PATCH 059/710] fix graph test --- test/graph_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/graph_test.py b/test/graph_test.py index 973d247e9..4c2ac3c39 100755 --- a/test/graph_test.py +++ b/test/graph_test.py @@ -9,7 +9,7 @@ from test_base import TestBase # from warnet.lnd import LNDNode -from warnet.utils import DEFAULT_TAG +from warnet.cli.util import DEFAULT_TAG class GraphTest(TestBase): From 1f52b038504311430b915421f929d47f3819f8fe Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 17:55:01 +0200 Subject: [PATCH 060/710] ci: use uv more cleanly --- .github/workflows/test.yml | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4bcf5bad8..d1ac5ef72 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,6 +6,9 @@ on: branches: - dev +env: + UV_SYSTEM_PYTHON: 1 + jobs: ruff: @@ -14,19 +17,15 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: hynek/setup-cached-uv@v1 - - run: uv venv - - run: uv pip install ruff - - run: source .venv/bin/activate; ruff check . + - run: curl -LsSf https://astral.sh/uv/install.sh | sh + - run: uvx ruff check . ruff-format: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: hynek/setup-cached-uv@v1 - - run: uv venv - - run: uv pip install ruff - - run: source .venv/bin/activate; ruff format . + - run: curl -LsSf https://astral.sh/uv/install.sh | sh + - run: uvx ruff format . --check test: runs-on: ubuntu-latest @@ -36,15 +35,13 @@ jobs: test: [scenarios_test.py, rpc_test.py, graph_test.py, logging_test.py] steps: - uses: actions/checkout@v4 - - uses: hynek/setup-cached-uv@v1 - uses: azure/setup-helm@v4.2.0 - uses: medyagh/setup-minikube@master - - name: Run tests + - name: Install uv run: | - echo Installing warnet python package for cli - uv venv - uv pip install -e . - shell: bash + curl -LsSf https://astral.sh/uv/install.sh | sh + - name: Install project + run: uv sync --all-extras --dev - name: Run tests run: | source .venv/bin/activate From c0aae416856fc0e89478bc9dd96f355d08b80cea Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 18:05:42 +0200 Subject: [PATCH 061/710] ruff format --- ruff.toml | 1 + src/warnet/cli/scenarios.py | 21 +++++++++++---------- test/data/scenario_p2p_interface.py | 1 + test/graph_test.py | 4 +--- test/test_base.py | 2 ++ 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/ruff.toml b/ruff.toml index 1cef47129..f66c3e7d9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,6 +1,7 @@ extend-exclude = [ "resources/images/commander/src/test_framework", "resources/images/exporter/authproxy.py", + "src/test_framework/*", ] line-length = 100 indent-width = 4 diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 3d5b5dc5b..1a9321c08 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -92,16 +92,17 @@ def run_scenario(scenario_path, additional_args): tankpods = get_mission("tank") tanks = [ - { - "tank": tank.metadata.name, - "chain": "regtest", - "rpc_host": tank.status.pod_ip, - "rpc_port": 18443, - "rpc_user": "user", - "rpc_password": "password", - "init_peers": [], - } for tank in tankpods - ] + { + "tank": tank.metadata.name, + "chain": "regtest", + "rpc_host": tank.status.pod_ip, + "rpc_port": 18443, + "rpc_user": "user", + "rpc_password": "password", + "init_peers": [], + } + for tank in tankpods + ] kubernetes_objects = [create_namespace()] kubernetes_objects.extend( [ diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index fe5f4a8d8..d78bbd817 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -8,6 +8,7 @@ # The base class exists inside the commander container from commander import Commander + def cli_help(): return "Run P2P GETDATA test" diff --git a/test/graph_test.py b/test/graph_test.py index 4c2ac3c39..95ebd744b 100755 --- a/test/graph_test.py +++ b/test/graph_test.py @@ -36,9 +36,7 @@ def run_test(self): def test_graph_creation_and_import(self): self.log.info(f"CLI tool creating test graph file: {self.tf_create}") self.log.info( - self.warcli( - f"graph create 10 --outfile={self.tf_create} --version={DEFAULT_TAG}" - ) + self.warcli(f"graph create 10 --outfile={self.tf_create} --version={DEFAULT_TAG}") ) self.wait_for_predicate(lambda: Path(self.tf_create).exists()) diff --git a/test/test_base.py b/test/test_base.py index 861231a9e..d63cdafd3 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -15,6 +15,7 @@ from warnet.cli.network import _connected as network_connected from warnet.cli.scenarios import _active as scenarios_active + class TestBase: def __init__(self): self.setup_environment() @@ -135,6 +136,7 @@ def check_scenarios(): if s["status"] != "succeeded": return False return True + self.wait_for_predicate(check_scenarios) def get_scenario_return_code(self, scenario_name): From 5a1441a98ce5857cb0656591038f1b3138c9627a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 18:08:01 +0200 Subject: [PATCH 062/710] ruff check fixes --- resources/scripts/apidocs.py | 1 + resources/scripts/graphdocs.py | 1 + src/warnet/cli/bitcoin.py | 2 +- src/warnet/cli/network.py | 9 ++++----- src/warnet/cli/process.py | 1 - src/warnet/cli/scenarios.py | 3 ++- src/warnet/cli/util.py | 2 -- test/data/scenario_p2p_interface.py | 7 +++---- test/graph_test.py | 1 - test/ln_test.py | 1 + test/scenarios_test.py | 8 +++++--- test/test_base.py | 7 ++----- 12 files changed, 20 insertions(+), 23 deletions(-) diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index 3815126a2..e02cd9939 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -6,6 +6,7 @@ from click import Context from tabulate import tabulate + from warnet.cli.main import cli file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "warcli.md" diff --git a/resources/scripts/graphdocs.py b/resources/scripts/graphdocs.py index 8f3f80d81..8a88888c4 100755 --- a/resources/scripts/graphdocs.py +++ b/resources/scripts/graphdocs.py @@ -5,6 +5,7 @@ from pathlib import Path from tabulate import tabulate + from warnet.utils import load_schema graph_schema = load_schema() diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index d00c0e91a..2e56dc48a 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -56,7 +56,7 @@ def grep_logs(pattern, show_k8s_timestamps, no_sort): """ # Get all pods in the namespace - command = f"kubectl get pods -n warnet -o json" + command = "kubectl get pods -n warnet -o json" pods_json = run_command(command) if pods_json is False: diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 23b4b065c..b8bc69ef1 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -20,7 +20,6 @@ get_mission, set_kubectl_context, ) - from .process import stream_command DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") @@ -194,7 +193,7 @@ def start(graph_file: Path, logging: bool): try: if deploy_base_configurations() and apply_kubernetes_yaml(temp_file_path): - print(f"Warnet network started successfully.") + print("Warnet network started successfully.") if not set_kubectl_context("warnet"): print( "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." @@ -202,7 +201,7 @@ def start(graph_file: Path, logging: bool): if logging and not setup_logging_helm(): print("Failed to install Helm charts.") else: - print(f"Failed to start warnet network.") + print("Failed to start warnet network.") finally: Path(temp_file_path).unlink() @@ -211,9 +210,9 @@ def start(graph_file: Path, logging: bool): def down(): """Bring down a running warnet""" if delete_namespace("warnet") and delete_namespace("warnet-logging"): - print(f"Warnet network has been successfully brought down and the namespaces deleted.") + print("Warnet network has been successfully brought down and the namespaces deleted.") else: - print(f"Failed to bring down warnet network or delete the namespaces.") + print("Failed to bring down warnet network or delete the namespaces.") @network.command() diff --git a/src/warnet/cli/process.py b/src/warnet/cli/process.py index 4131b89b7..ece252458 100644 --- a/src/warnet/cli/process.py +++ b/src/warnet/cli/process.py @@ -1,4 +1,3 @@ -import os import subprocess diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 1a9321c08..9b91edb1d 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -1,4 +1,3 @@ -import base64 import importlib import json import os @@ -12,7 +11,9 @@ from rich import print from rich.console import Console from rich.table import Table + from warnet import scenarios as SCENARIOS + from .k8s import apply_kubernetes_yaml, create_namespace, get_mission diff --git a/src/warnet/cli/util.py b/src/warnet/cli/util.py index b455f3934..156ccf85f 100644 --- a/src/warnet/cli/util.py +++ b/src/warnet/cli/util.py @@ -1,8 +1,6 @@ import json import logging -import os import random -import subprocess from importlib.resources import files from pathlib import Path diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index d78bbd817..54c044c7c 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -1,13 +1,12 @@ #!/usr/bin/env python3 from collections import defaultdict -from time import sleep - -from test_framework.messages import CInv, msg_getdata -from test_framework.p2p import P2PInterface # The base class exists inside the commander container from commander import Commander +from test_framework.messages import CInv, msg_getdata +from test_framework.p2p import P2PInterface + def cli_help(): return "Run P2P GETDATA test" diff --git a/test/graph_test.py b/test/graph_test.py index 95ebd744b..5ea73c5f0 100755 --- a/test/graph_test.py +++ b/test/graph_test.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -import json import os import tempfile import uuid diff --git a/test/ln_test.py b/test/ln_test.py index 31e8be112..3534dab27 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -5,6 +5,7 @@ from pathlib import Path from test_base import TestBase + from warnet.services import ServiceType diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 69499b2e9..9d6e17561 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -2,11 +2,13 @@ import os from pathlib import Path -from warnet.cli.scenarios import _available as scenarios_available -from warnet.cli.scenarios import _active as scenarios_active -from warnet.cli.k8s import delete_pod + from test_base import TestBase +from warnet.cli.k8s import delete_pod +from warnet.cli.scenarios import _active as scenarios_active +from warnet.cli.scenarios import _available as scenarios_available + class ScenariosTest(TestBase): def __init__(self): diff --git a/test/test_base.py b/test/test_base.py index d63cdafd3..5e7c6953b 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -11,8 +11,8 @@ from time import sleep from warnet import SRC_DIR -from warnet.cli.network import _status as network_status from warnet.cli.network import _connected as network_connected +from warnet.cli.network import _status as network_status from warnet.cli.scenarios import _active as scenarios_active @@ -132,10 +132,7 @@ def check_scenarios(): scns = scenarios_active() if len(scns) == 0: return True - for s in scns: - if s["status"] != "succeeded": - return False - return True + return all(s["status"] == "succeeded" for s in scns) self.wait_for_predicate(check_scenarios) From 7750306429fe2ab583f7947e6cd5b7c27b993a3a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 18:08:29 +0200 Subject: [PATCH 063/710] ci: re-enable ruff check --- .github/workflows/test.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d1ac5ef72..0a8f5d3dc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,8 +12,6 @@ env: jobs: ruff: - # DISABLE FOR REWRITE - if: false runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 From 1e41d6ea215f88979255cc0731602521270ba880 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 18:11:03 +0200 Subject: [PATCH 064/710] fix graphdocs import --- resources/scripts/graphdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/scripts/graphdocs.py b/resources/scripts/graphdocs.py index 8a88888c4..631d7df45 100755 --- a/resources/scripts/graphdocs.py +++ b/resources/scripts/graphdocs.py @@ -6,7 +6,7 @@ from tabulate import tabulate -from warnet.utils import load_schema +from warnet.cli.util import load_schema graph_schema = load_schema() From 73fd0327a78597ab1ef4dc6d674f323feac9c31b Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 21 Aug 2024 18:26:00 +0200 Subject: [PATCH 065/710] remove network2.py and move to network.py --- .../charts/bitcoincore/templates/pod.yaml | 2 +- resources/charts/bitcoincore/values.yaml | 4 +- src/warnet/cli/main.py | 2 - src/warnet/cli/network.py | 203 +++++------------- src/warnet/cli/network2.py | 64 ------ 5 files changed, 52 insertions(+), 223 deletions(-) delete mode 100644 src/warnet/cli/network2.py diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 202ed4179..f2954abc9 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -5,7 +5,7 @@ metadata: labels: {{- include "bitcoincore.labels" . | nindent 4 }} {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 4 }} {{- end }} spec: {{- with .Values.imagePullSecrets }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index f9de5cb30..9f7d15883 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -23,7 +23,9 @@ serviceAccount: # If not set and create is true, a name is generated using the fullname template name: "" -podLabels: {} +podLabels: + app: "warnet" + mission: "tank" podSecurityContext: {} # fsGroup: 2000 diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index ed8744190..95197bd29 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -11,7 +11,6 @@ # from .ln import ln from .network import network -from .network2 import network2 from .scenarios import scenarios QUICK_START_PATH = files("scripts").joinpath("quick_start.sh") @@ -27,7 +26,6 @@ def cli(): cli.add_command(image) # cli.add_command(ln) cli.add_command(network) -cli.add_command(network2) cli.add_command(scenarios) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index b8bc69ef1..0cdf021cf 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -1,4 +1,5 @@ import json +import os import tempfile from importlib.resources import files from pathlib import Path @@ -22,113 +23,20 @@ ) from .process import stream_command -DEFAULT_GRAPH_FILE = files("graphs").joinpath("default.graphml") WAR_MANIFESTS = files("manifests") - +NETWORK_DIR = Path("networks") +DEFAULT_NETWORK = "6_node_bitcoin" +NETWORK_FILE = "network.yaml" +DEFAULTS_FILE = "defaults.yaml" +HELM_COMMAND = "helm upgrade --install --create-namespace" +BITCOIN_CHART_LOCATION = "./resources/charts/bitcoincore" +NAMESPACE = "warnet" @click.group(name="network") def network(): """Network commands""" -def read_graph_file(graph_file: Path) -> nx.Graph: - with open(graph_file) as f: - return nx.parse_graphml(f.read()) - - -def generate_node_config(node: int, data: dict, graph: nx.Graph) -> str: - base_config = """ -regtest=1 -checkmempool=0 -acceptnonstdtxn=1 -debuglogfile=0 -logips=1 -logtimemicros=1 -capturemessages=1 -fallbackfee=0.00001000 -listen=1 - -[regtest] -rpcuser=user -rpcpassword=password -rpcport=18443 -rpcallowip=0.0.0.0/0 -rpcbind=0.0.0.0 - -zmqpubrawblock=tcp://0.0.0.0:28332 -zmqpubrawtx=tcp://0.0.0.0:28333 -""" - node_specific_config = data.get("bitcoin_config", "").replace(",", "\n") - - # Add addnode configurations for connected nodes - connected_nodes = list(graph.neighbors(node)) - addnode_configs = [f"addnode=warnet-tank-{index}-service" for index in connected_nodes] - - return f"{base_config}\n{node_specific_config}\n" + "\n".join(addnode_configs) - - -def create_node_deployment(node: int, data: dict) -> Dict[str, Any]: - image = data.get("image", "bitcoindevproject/bitcoin:27.0") - version = data.get("version", "27.0") - - return create_kubernetes_object( - kind="Pod", - metadata={ - "name": f"warnet-tank-{node}", - "namespace": "warnet", - "labels": {"app": "warnet", "mission": "tank", "index": str(node)}, - "annotations": {"data": json.dumps(data)}, - }, - spec={ - "containers": [ - { - "name": "bitcoin", - "image": image, - "env": [{"name": "BITCOIN_VERSION", "value": version}], - "volumeMounts": [ - { - "name": "config", - "mountPath": "/root/.bitcoin/bitcoin.conf", - "subPath": "bitcoin.conf", - } - ], - "ports": [ - {"containerPort": 18444}, - {"containerPort": 18443}, - ], - } - ], - "volumes": [{"name": "config", "configMap": {"name": f"bitcoin-config-tank-{node}"}}], - }, - ) - - -def create_node_service(node: int) -> Dict[str, Any]: - return create_kubernetes_object( - kind="Service", - metadata={"name": f"warnet-tank-{node}-service", "namespace": "warnet"}, - spec={ - "selector": {"app": "warnet", "mission": "tank", "index": str(node)}, - "ports": [ - {"name": "p2p", "port": 18444, "targetPort": 18444}, - {"name": "rpc", "port": 18443, "targetPort": 18443}, - ], - }, - ) - - -def create_config_map(node: int, config: str) -> Dict[str, Any]: - config_map = create_kubernetes_object( - kind="ConfigMap", - metadata={ - "name": f"bitcoin-config-tank-{node}", - "namespace": "warnet", - }, - ) - config_map["data"] = {"bitcoin.conf": config} - return config_map - - def create_edges_map(graph): edges = [] for src, dst, data in graph.edges(data=True): @@ -144,23 +52,6 @@ def create_edges_map(graph): return config_map -def generate_kubernetes_yaml(graph: nx.Graph) -> List[Dict[str, Any]]: - kubernetes_objects = [create_namespace()] - - for node, data in graph.nodes(data=True): - config = generate_node_config(node, data, graph) - kubernetes_objects.extend( - [ - create_config_map(node, config), - create_node_deployment(node, data), - create_node_service(node), - ] - ) - kubernetes_objects.append(create_edges_map(graph)) - - return kubernetes_objects - - def setup_logging_helm() -> bool: helm_commands = [ "helm repo add grafana https://grafana.github.io/helm-charts", @@ -180,30 +71,46 @@ def setup_logging_helm() -> bool: @network.command() -@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) +@click.argument("network_name", default=DEFAULT_NETWORK) +@click.option("--network", default="warnet", show_default=True) @click.option("--logging/--no-logging", default=False) -def start(graph_file: Path, logging: bool): - """Start a warnet with topology loaded from a """ - graph = read_graph_file(graph_file) - kubernetes_yaml = generate_kubernetes_yaml(graph) - - with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: - yaml.dump_all(kubernetes_yaml, temp_file) - temp_file_path = temp_file.name - - try: - if deploy_base_configurations() and apply_kubernetes_yaml(temp_file_path): - print("Warnet network started successfully.") - if not set_kubectl_context("warnet"): - print( - "Warning: Failed to set kubectl context. You may need to manually switch to the warnet namespace." - ) - if logging and not setup_logging_helm(): - print("Failed to install Helm charts.") - else: - print("Failed to start warnet network.") - finally: - Path(temp_file_path).unlink() +def start(network_name: str, logging: bool, network: str): + """Start a warnet with topology loaded from into [network]""" + full_path = os.path.join(NETWORK_DIR, network_name) + network_file_path = os.path.join(full_path, NETWORK_FILE) + defaults_file_path = os.path.join(full_path, DEFAULTS_FILE) + + network_file = {} + with open(network_file_path) as f: + network_file = yaml.safe_load(f) + + for node in network_file["nodes"]: + print(f"Starting node: {node.get('name')}") + try: + temp_override_file_path = "" + node_name = node.get("name") + # all the keys apart from name + node_config_override = {k: v for k, v in node.items() if k != "name"} + + cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {NAMESPACE} -f {defaults_file_path}" + + if node_config_override: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".yaml", delete=False + ) as temp_file: + yaml.dump(node_config_override, temp_file) + temp_override_file_path = temp_file.name + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + print(f"Failed to run Helm command: {cmd}") + return + except Exception as e: + print(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() @network.command() @@ -268,18 +175,4 @@ def _status(): "bitcoin_status": tank.status.phase.lower(), } stats.append(status) - return stats - - -@network.command() -@click.argument("graph_file", default=DEFAULT_GRAPH_FILE, type=click.Path()) -@click.option("--output", "-o", default="warnet-deployment.yaml", help="Output YAML file") -def generate_yaml(graph_file: Path, output: str): - """Generate a Kubernetes YAML file from a graph file for deploying warnet nodes.""" - graph = read_graph_file(graph_file) - kubernetes_yaml = generate_kubernetes_yaml(graph) - - with open(output, "w") as f: - yaml.dump_all(kubernetes_yaml, f) - - print(f"Kubernetes YAML file generated: {output}") + return stats \ No newline at end of file diff --git a/src/warnet/cli/network2.py b/src/warnet/cli/network2.py deleted file mode 100644 index 011026de0..000000000 --- a/src/warnet/cli/network2.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import tempfile -from pathlib import Path - -import click -import yaml - -from .process import stream_command - -NETWORK_DIR = Path("networks") -DEFAULT_NETWORK = "6_node_bitcoin" -NETWORK_FILE = "network.yaml" -DEFAULTS_FILE = "defaults.yaml" -HELM_COMMAND = "helm upgrade --install --create-namespace" -BITCOIN_CHART_LOCATION = "./resources/charts/bitcoincore" -NAMESPACE = "warnet" - - -@click.group(name="network2") -def network2(): - """Network commands""" - - -@network2.command() -@click.argument("network_name", default=DEFAULT_NETWORK) -@click.option("--network", default="warnet", show_default=True) -@click.option("--logging/--no-logging", default=False) -def start2(network_name: str, logging: bool, network: str): - """Start a warnet with topology loaded from into [network]""" - full_path = os.path.join(NETWORK_DIR, network_name) - network_file_path = os.path.join(full_path, NETWORK_FILE) - defaults_file_path = os.path.join(full_path, DEFAULTS_FILE) - - network_file = {} - with open(network_file_path) as f: - network_file = yaml.safe_load(f) - - for node in network_file["nodes"]: - print(f"Starting node: {node.get('name')}") - try: - temp_override_file_path = "" - node_name = node.get("name") - # all the keys apart from name - node_config_override = {k: v for k, v in node.items() if k != "name"} - - cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {NAMESPACE} -f {defaults_file_path}" - - if node_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(node_config_override, temp_file) - temp_override_file_path = temp_file.name - cmd = f"{cmd} -f {temp_override_file_path}" - - if not stream_command(cmd): - print(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - print(f"Error: {e}") - return - # finally: - # if temp_override_file_path: - # Path(temp_override_file_path).unlink() From 1f88f994fba605e34fe97e8719188a34e9c30279 Mon Sep 17 00:00:00 2001 From: m3dwards Date: Wed, 21 Aug 2024 16:27:29 +0000 Subject: [PATCH 066/710] Update apidocs and/or graphdocs --- docs/warcli.md | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index 11cbe63c7..5d3d6dd7a 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -134,15 +134,6 @@ Determine if all p2p conenctions defined in graph are established Bring down a running warnet -### `warcli network generate-yaml` -Generate a Kubernetes YAML file from a graph file for deploying warnet nodes. - -options: -| name | type | required | default | -|------------|--------|------------|----------------------------------| -| graph_file | Path | | resources/graphs/default.graphml | -| output | String | | "warnet-deployment.yaml" | - ### `warcli network logs` Get Kubernetes logs from the RPC server @@ -152,21 +143,6 @@ options: | follow | Bool | | False | ### `warcli network start` -Start a warnet with topology loaded from a \ - -options: -| name | type | required | default | -|------------|--------|------------|----------------------------------| -| graph_file | Path | | resources/graphs/default.graphml | -| logging | Bool | | False | - -### `warcli network status` -Return pod status - - -## Network2 - -### `warcli network2 start2` Start a warnet with topology loaded from \ into [network] options: @@ -176,6 +152,10 @@ options: | network | String | | "warnet" | | logging | Bool | | False | +### `warcli network status` +Return pod status + + ## Scenarios ### `warcli scenarios active` From 61255717ac2966448843ad4b5bffcc9989de4b5c Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 18:33:34 +0200 Subject: [PATCH 067/710] quick start: remove unneeded deps --- resources/scripts/quick_start.sh | 41 ++------------------------------ 1 file changed, 2 insertions(+), 39 deletions(-) diff --git a/resources/scripts/quick_start.sh b/resources/scripts/quick_start.sh index 7d473848c..7d444de75 100755 --- a/resources/scripts/quick_start.sh +++ b/resources/scripts/quick_start.sh @@ -52,15 +52,6 @@ print_message "" "" "" print_message "" " Let's find out if your system has what it takes to run Warnet..." "" print_message "" "" "" -minikube_path=$(command -v minikube || true) -if [ -n "$minikube_path" ]; then - print_partial_message " ⭐️ Found " "minikube" ": $minikube_path " "$BOLD" -else - print_partial_message " 💥 Could not find " "minikube" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://minikube.sigs.k8s.io/docs/start/" "$BOLD" - exit 127 -fi - kubectl_path=$(command -v kubectl || true) if [ -n "$kubectl_path" ]; then print_partial_message " ⭐️ Found " "kubectl" ": $kubectl_path " "$BOLD" @@ -79,15 +70,6 @@ else exit 127 fi -current_user=$(whoami) -if id -nG "$current_user" | grep -qw "docker"; then - print_partial_message " ⭐️ Found " "$current_user" " in the docker group" "$BOLD" -else - print_partial_message " 💥 Could not find " "$current_user" " in the docker group. Please add it like this..." "$BOLD" - print_message "" " sudo usermod -aG docker $current_user && newgrp docker" "$BOLD" - exit 1 -fi - helm_path=$(command -v helm || true) if [ -n "$helm_path" ]; then print_partial_message " ⭐️ Found " "helm" ": $helm_path" "$BOLD" @@ -97,14 +79,6 @@ else exit 127 fi -just_path=$(command -v just || true) -if [ -n "$just_path" ]; then - print_partial_message " ⭐️ Found " "just" ": $just_path " "$BOLD" -else - print_partial_message " 💥 Could not find " "just" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://github.com/casey/just?tab=readme-ov-file#pre-built-binaries" "$BOLD" -fi - python_path=$(command -v python3 || true) if [ -n "$python_path" ]; then print_partial_message " ⭐️ Found " "python3" ": $python_path " "$BOLD" @@ -121,16 +95,5 @@ else exit 127 fi -bpf_status=$(grep CONFIG_BPF /boot/config-"$(uname -r)" || true) -if [ -n "$bpf_status" ]; then - config_bpf=$(echo "$bpf_status" | grep CONFIG_BPF=y) - if [ "$config_bpf" = "CONFIG_BPF=y" ]; then - print_partial_message " ⭐️ Found " "BPF" ": Berkeley Packet Filters appear enabled" "$BOLD" - else - print_partial_message " 💥 Could not find " "BPF" ". Please figure out how to enable Berkeley Packet Filters in your kernel." "$BOLD" - exit 1 - fi -else - print_partial_message " 💥 Could not find " "BPF" ". Please figure out how to enable Berkeley Packet Filters in your kernel." "$BOLD" - exit 1 -fi +echo " ✅ Everything needed found" + From dd59217b9aeb4652de4f2676999499cb03a259c2 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 21 Aug 2024 19:13:13 +0200 Subject: [PATCH 068/710] reformat network --- src/warnet/cli/network.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 0cdf021cf..8e29afdda 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -3,23 +3,17 @@ import tempfile from importlib.resources import files from pathlib import Path -from typing import Any, Dict, List import click -import networkx as nx import yaml from rich import print from .bitcoin import _rpc from .k8s import ( - apply_kubernetes_yaml, create_kubernetes_object, - create_namespace, delete_namespace, - deploy_base_configurations, get_edges, get_mission, - set_kubectl_context, ) from .process import stream_command @@ -32,6 +26,7 @@ BITCOIN_CHART_LOCATION = "./resources/charts/bitcoincore" NAMESPACE = "warnet" + @click.group(name="network") def network(): """Network commands""" @@ -175,4 +170,4 @@ def _status(): "bitcoin_status": tank.status.phase.lower(), } stats.append(status) - return stats \ No newline at end of file + return stats From 2b92676cf587bb4307f78c6bc9e4a092796b56fa Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 21 Aug 2024 18:29:41 +0200 Subject: [PATCH 069/710] clean up namespaces charts rename to namespaces from battalions and get rid of warnet-* files --- .../warnet-battalion-00-values.yaml | 19 ------------------- .../warnet-battalion-01-values.yaml | 19 ------------------- .../{battalions => namespaces}/Chart.yaml | 0 .../templates/namespace.yaml | 0 .../templates/role.yaml | 0 .../templates/rolebinding.yaml | 0 .../templates/serviceaccount.yaml | 0 .../{battalions => namespaces}/values.yaml | 5 +---- 8 files changed, 1 insertion(+), 42 deletions(-) delete mode 100644 resources/charts/battalions/warnet-battalion-00-values.yaml delete mode 100644 resources/charts/battalions/warnet-battalion-01-values.yaml rename resources/charts/{battalions => namespaces}/Chart.yaml (100%) rename resources/charts/{battalions => namespaces}/templates/namespace.yaml (100%) rename resources/charts/{battalions => namespaces}/templates/role.yaml (100%) rename resources/charts/{battalions => namespaces}/templates/rolebinding.yaml (100%) rename resources/charts/{battalions => namespaces}/templates/serviceaccount.yaml (100%) rename resources/charts/{battalions => namespaces}/values.yaml (85%) diff --git a/resources/charts/battalions/warnet-battalion-00-values.yaml b/resources/charts/battalions/warnet-battalion-00-values.yaml deleted file mode 100644 index b2a2d24f5..000000000 --- a/resources/charts/battalions/warnet-battalion-00-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -users: - - name: alice - roles: - - pod-viewer - - name: bob - roles: - - pod-viewer - - pod-manager -roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "update", "delete"] diff --git a/resources/charts/battalions/warnet-battalion-01-values.yaml b/resources/charts/battalions/warnet-battalion-01-values.yaml deleted file mode 100644 index b2a2d24f5..000000000 --- a/resources/charts/battalions/warnet-battalion-01-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -users: - - name: alice - roles: - - pod-viewer - - name: bob - roles: - - pod-viewer - - pod-manager -roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "update", "delete"] diff --git a/resources/charts/battalions/Chart.yaml b/resources/charts/namespaces/Chart.yaml similarity index 100% rename from resources/charts/battalions/Chart.yaml rename to resources/charts/namespaces/Chart.yaml diff --git a/resources/charts/battalions/templates/namespace.yaml b/resources/charts/namespaces/templates/namespace.yaml similarity index 100% rename from resources/charts/battalions/templates/namespace.yaml rename to resources/charts/namespaces/templates/namespace.yaml diff --git a/resources/charts/battalions/templates/role.yaml b/resources/charts/namespaces/templates/role.yaml similarity index 100% rename from resources/charts/battalions/templates/role.yaml rename to resources/charts/namespaces/templates/role.yaml diff --git a/resources/charts/battalions/templates/rolebinding.yaml b/resources/charts/namespaces/templates/rolebinding.yaml similarity index 100% rename from resources/charts/battalions/templates/rolebinding.yaml rename to resources/charts/namespaces/templates/rolebinding.yaml diff --git a/resources/charts/battalions/templates/serviceaccount.yaml b/resources/charts/namespaces/templates/serviceaccount.yaml similarity index 100% rename from resources/charts/battalions/templates/serviceaccount.yaml rename to resources/charts/namespaces/templates/serviceaccount.yaml diff --git a/resources/charts/battalions/values.yaml b/resources/charts/namespaces/values.yaml similarity index 85% rename from resources/charts/battalions/values.yaml rename to resources/charts/namespaces/values.yaml index b2a2d24f5..a09f671dd 100644 --- a/resources/charts/battalions/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -1,8 +1,5 @@ users: - - name: alice - roles: - - pod-viewer - - name: bob + - name: warcli-user roles: - pod-viewer - pod-manager From b154e4df02ea7369ce437e41ac4b8abb1c1c3878 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 21 Aug 2024 18:59:22 +0200 Subject: [PATCH 070/710] add namespaces config defaults --- .../two_namespaces_two_users/defaults.yaml | 16 ++++++++ .../two_namespaces_two_users/namespaces.yaml | 41 +++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 namespaces/two_namespaces_two_users/defaults.yaml create mode 100644 namespaces/two_namespaces_two_users/namespaces.yaml diff --git a/namespaces/two_namespaces_two_users/defaults.yaml b/namespaces/two_namespaces_two_users/defaults.yaml new file mode 100644 index 000000000..a09f671dd --- /dev/null +++ b/namespaces/two_namespaces_two_users/defaults.yaml @@ -0,0 +1,16 @@ +users: + - name: warcli-user + roles: + - pod-viewer + - pod-manager +roles: + - name: pod-viewer + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - name: pod-manager + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "delete"] diff --git a/namespaces/two_namespaces_two_users/namespaces.yaml b/namespaces/two_namespaces_two_users/namespaces.yaml new file mode 100644 index 000000000..03b31696a --- /dev/null +++ b/namespaces/two_namespaces_two_users/namespaces.yaml @@ -0,0 +1,41 @@ +namespaces: + - name: warnet-red-team + users: + - name: alice + roles: + - pod-viewer + - name: bob + roles: + - pod-viewer + - pod-manager + roles: + - name: pod-viewer + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - name: pod-manager + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - name: warnet-blue-team + users: + - name: mallory + roles: + - pod-viewer + - name: carol + roles: + - pod-viewer + - pod-manager + roles: + - name: pod-viewer + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - name: pod-manager + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "delete"] From adc569a082fe4bbdbb572c13d0174f3c2d245fa3 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 21 Aug 2024 19:00:55 +0200 Subject: [PATCH 071/710] add namespaces cmds to cli --- src/warnet/cli/main.py | 2 + src/warnet/cli/namespaces.py | 115 +++++++++++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 src/warnet/cli/namespaces.py diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index 95197bd29..b09738675 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -10,6 +10,7 @@ from .image import image # from .ln import ln +from .namespaces import namespaces from .network import network from .scenarios import scenarios @@ -25,6 +26,7 @@ def cli(): cli.add_command(graph) cli.add_command(image) # cli.add_command(ln) +cli.add_command(namespaces) cli.add_command(network) cli.add_command(scenarios) diff --git a/src/warnet/cli/namespaces.py b/src/warnet/cli/namespaces.py new file mode 100644 index 000000000..dbcaf24a5 --- /dev/null +++ b/src/warnet/cli/namespaces.py @@ -0,0 +1,115 @@ +import os +import tempfile +from pathlib import Path + +import click +import yaml + +from .process import stream_command, run_command + +NAMESPACES_DIR = Path("namespaces") +DEFAULT_NAMESPACES = "two_namespaces_two_users" +NAMESPACES_FILE = "namespaces.yaml" +DEFAULTS_FILE = "defaults.yaml" +HELM_COMMAND = "helm upgrade --install" +BITCOIN_CHART_LOCATION = "./resources/charts/namespaces" + +@click.group(name="namespaces") +def namespaces(): + """Namespaces commands""" + + +@namespaces.command() +@click.argument("namespaces", default=DEFAULT_NAMESPACES) +def deploy(namespaces: str): + """Deploy namespaces with users from a """ + full_path = os.path.join(NAMESPACES_DIR, namespaces) + namespaces_file_path = os.path.join(full_path, NAMESPACES_FILE) + defaults_file_path = os.path.join(full_path, DEFAULTS_FILE) + + namespaces_file = {} + with open(namespaces_file_path) as f: + namespaces_file = yaml.safe_load(f) + + # validate names before deploying + names = [n.get("name") for n in namespaces_file["namespaces"]] + for n in names: + if not n.startswith("warnet-"): + print(f"Failled to create namespace: {n}. Namespaces must start with a 'warnet-' prefix.") + + # deploy namespaces + for namespace in namespaces_file["namespaces"]: + print(f"Deploying namespace: {namespace.get('name')}") + try: + temp_override_file_path = "" + namespace_name = namespace.get("name") + # all the keys apart from name + namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} + + cmd = f"{HELM_COMMAND} {namespace_name} {BITCOIN_CHART_LOCATION} -f {defaults_file_path}" + + if namespace_config_override: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".yaml", delete=False + ) as temp_file: + yaml.dump(namespace_config_override, temp_file) + temp_override_file_path = temp_file.name + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + print(f"Failed to run Helm command: {cmd}") + return + except Exception as e: + print(f"Error: {e}") + return + +@namespaces.command() +def list(): + """List all namespaces with 'warnet-' prefix""" + cmd = "kubectl get namespaces -o jsonpath='{.items[*].metadata.name}'" + res = run_command(cmd) + all_namespaces = res.split() + warnet_namespaces = [ns for ns in all_namespaces if ns.startswith("warnet-")] + + if warnet_namespaces: + print("Warnet namespaces:") + for ns in warnet_namespaces: + print(f"- {ns}") + else: + print("No warnet namespaces found.") + +@namespaces.command() +@click.option("--all", "destroy_all", is_flag=True, help="Destroy all warnet- prefixed namespaces") +@click.argument("namespace", required=False) +def destroy(destroy_all: bool, namespace: str): + """Destroy a specific namespace or all warnet- prefixed namespaces""" + if destroy_all: + cmd = "kubectl get namespaces -o jsonpath='{.items[*].metadata.name}'" + res = run_command(cmd) + + # Get the list of namespaces + all_namespaces = res.split() + warnet_namespaces = [ns for ns in all_namespaces if ns.startswith("warnet-")] + + if not warnet_namespaces: + print("No warnet namespaces found to destroy.") + return + + for ns in warnet_namespaces: + destroy_cmd = f"kubectl delete namespace {ns}" + if not stream_command(destroy_cmd): + print(f"Failed to destroy namespace: {ns}") + else: + print(f"Destroyed namespace: {ns}") + elif namespace: + if not namespace.startswith("warnet-"): + print("Error: Can only destroy namespaces with 'warnet-' prefix") + return + + destroy_cmd = f"kubectl delete namespace {namespace}" + if not stream_command(destroy_cmd): + print(f"Failed to destroy namespace: {namespace}") + else: + print(f"Destroyed namespace: {namespace}") + else: + print("Error: Please specify a namespace or use --all flag.") From 7d1d54ff892cb4521485e046b5937f4fc6ea493e Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 21 Aug 2024 17:38:43 +0000 Subject: [PATCH 072/710] Update apidocs and/or graphdocs --- docs/warcli.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docs/warcli.md b/docs/warcli.md index 5d3d6dd7a..ee9d050a7 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -124,6 +124,29 @@ options: | arches | String | | | | action | String | | "load" | +## Namespaces + +### `warcli namespaces deploy` +Deploy namespaces with users from a \ + +options: +| name | type | required | default | +|------------|--------|------------|----------------------------| +| namespaces | String | | "two_namespaces_two_users" | + +### `warcli namespaces destroy` +Destroy a specific namespace or all warnet- prefixed namespaces + +options: +| name | type | required | default | +|-------------|--------|------------|-----------| +| destroy_all | Bool | | False | +| namespace | String | | | + +### `warcli namespaces list` +List all namespaces with 'warnet-' prefix + + ## Network ### `warcli network connected` From 8563954890baa7534d727e6398d849c18a579890 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 21 Aug 2024 19:44:45 +0200 Subject: [PATCH 073/710] run dag connection test --- .github/workflows/test.yml | 2 +- resources/images/commander/src/commander.py | 64 +++++++-------------- resources/scripts/apidocs.py | 1 - resources/scripts/graphdocs.py | 1 - src/warnet/cli/bitcoin.py | 1 - src/warnet/cli/namespaces.py | 13 ++++- src/warnet/cli/scenarios.py | 1 - test/dag_connection_test.py | 14 ----- test/data/scenario_connect_dag.py | 53 ++++++----------- test/data/scenario_p2p_interface.py | 1 - test/ln_test.py | 1 - test/scenarios_test.py | 1 - 12 files changed, 50 insertions(+), 103 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0a8f5d3dc..3641b89f7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [scenarios_test.py, rpc_test.py, graph_test.py, logging_test.py] + test: [scenarios_test.py, rpc_test.py, graph_test.py, dag_connection_test.py, logging_test.py] steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/resources/images/commander/src/commander.py b/resources/images/commander/src/commander.py index fa1761ea9..6d8c0eb96 100644 --- a/resources/images/commander/src/commander.py +++ b/resources/images/commander/src/commander.py @@ -1,6 +1,5 @@ import argparse import configparser -import ipaddress import json import logging import os @@ -93,6 +92,7 @@ def setup(self): cwd=self.options.tmpdir, coverage_dir=self.options.coveragedir, ) + node.tank = tank["tank"] node.rpc = get_rpc_proxy( f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", i, @@ -302,83 +302,63 @@ def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool """ from_connection = self.nodes[a] to_connection = self.nodes[b] - - to_ip_port = self.warnet.tanks[b].get_dns_addr() - from_ip_port = self.warnet.tanks[a].get_ip_addr() + from_num_peers = 1 + len(from_connection.getpeerinfo()) + to_num_peers = 1 + len(to_connection.getpeerinfo()) + ip_port = self.nodes[b].rpchost + ":18444" if peer_advertises_v2 is None: peer_advertises_v2 = self.options.v2transport if peer_advertises_v2: - from_connection.addnode(node=to_ip_port, command="onetry", v2transport=True) + from_connection.addnode(node=ip_port, command="onetry", v2transport=True) else: # skip the optional third argument (default false) for # compatibility with older clients - from_connection.addnode(to_ip_port, "onetry") + from_connection.addnode(ip_port, "onetry") if not wait_for_connect: return - def get_peer_ip(peer): - try: # we encounter a regular ip address - ip_addr = str(ipaddress.ip_address(peer["addr"].split(":")[0])) - return ip_addr - except ValueError as err: # or we encounter a service name - try: - # NETWORK-tank-TANK_INDEX-service - # NETWORK-test-TEST-tank-TANK_INDEX-service - tank_index = int(peer["addr"].split("-")[-2]) - except (ValueError, IndexError) as inner_err: - raise ValueError( - "could not derive tank index from service name: {} {}".format( - peer["addr"], inner_err - ) - ) from err - - ip_addr = self.warnet.tanks[tank_index].get_ip_addr() - return ip_addr - # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: # * Must have a version message before anything else # * Must have a verack message before anything else self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["version"] != 0 - for peer in from_connection.getpeerinfo() - ) + lambda: sum(peer["version"] != 0 for peer in from_connection.getpeerinfo()) + == from_num_peers ) self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port and peer["version"] != 0 - for peer in to_connection.getpeerinfo() - ) + lambda: sum(peer["version"] != 0 for peer in to_connection.getpeerinfo()) + == to_num_peers ) self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + lambda: sum( + peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 for peer in from_connection.getpeerinfo() ) + == from_num_peers ) self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port - and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + lambda: sum( + peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 for peer in to_connection.getpeerinfo() ) + == to_num_peers ) # The message bytes are counted before processing the message, so make # sure it was fully processed by waiting for a ping. self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + lambda: sum( + peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in from_connection.getpeerinfo() ) + == from_num_peers ) self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + lambda: sum( + peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in to_connection.getpeerinfo() ) + == to_num_peers ) diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index e02cd9939..3815126a2 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -6,7 +6,6 @@ from click import Context from tabulate import tabulate - from warnet.cli.main import cli file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "warcli.md" diff --git a/resources/scripts/graphdocs.py b/resources/scripts/graphdocs.py index 631d7df45..513b12091 100755 --- a/resources/scripts/graphdocs.py +++ b/resources/scripts/graphdocs.py @@ -5,7 +5,6 @@ from pathlib import Path from tabulate import tabulate - from warnet.cli.util import load_schema graph_schema = load_schema() diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 2e56dc48a..356831839 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -5,7 +5,6 @@ from io import BytesIO import click - from test_framework.messages import ser_uint256 from test_framework.p2p import MESSAGEMAP diff --git a/src/warnet/cli/namespaces.py b/src/warnet/cli/namespaces.py index dbcaf24a5..fc550f310 100644 --- a/src/warnet/cli/namespaces.py +++ b/src/warnet/cli/namespaces.py @@ -5,7 +5,7 @@ import click import yaml -from .process import stream_command, run_command +from .process import run_command, stream_command NAMESPACES_DIR = Path("namespaces") DEFAULT_NAMESPACES = "two_namespaces_two_users" @@ -14,6 +14,7 @@ HELM_COMMAND = "helm upgrade --install" BITCOIN_CHART_LOCATION = "./resources/charts/namespaces" + @click.group(name="namespaces") def namespaces(): """Namespaces commands""" @@ -35,7 +36,9 @@ def deploy(namespaces: str): names = [n.get("name") for n in namespaces_file["namespaces"]] for n in names: if not n.startswith("warnet-"): - print(f"Failled to create namespace: {n}. Namespaces must start with a 'warnet-' prefix.") + print( + f"Failled to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." + ) # deploy namespaces for namespace in namespaces_file["namespaces"]: @@ -46,7 +49,9 @@ def deploy(namespaces: str): # all the keys apart from name namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} - cmd = f"{HELM_COMMAND} {namespace_name} {BITCOIN_CHART_LOCATION} -f {defaults_file_path}" + cmd = ( + f"{HELM_COMMAND} {namespace_name} {BITCOIN_CHART_LOCATION} -f {defaults_file_path}" + ) if namespace_config_override: with tempfile.NamedTemporaryFile( @@ -63,6 +68,7 @@ def deploy(namespaces: str): print(f"Error: {e}") return + @namespaces.command() def list(): """List all namespaces with 'warnet-' prefix""" @@ -78,6 +84,7 @@ def list(): else: print("No warnet namespaces found.") + @namespaces.command() @click.option("--all", "destroy_all", is_flag=True, help="Destroy all warnet- prefixed namespaces") @click.argument("namespace", required=False) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 9b91edb1d..df2c47f1e 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -11,7 +11,6 @@ from rich import print from rich.console import Console from rich.table import Table - from warnet import scenarios as SCENARIOS from .k8s import apply_kubernetes_yaml, create_namespace, get_mission diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 0957ce14f..a034a1818 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -17,7 +17,6 @@ def run_test(self): try: self.setup_network() self.run_connect_dag_scenario() - self.run_connect_dag_scenario_post_connection() finally: self.stop_server() @@ -29,21 +28,8 @@ def setup_network(self): def run_connect_dag_scenario(self): self.log.info("Running connect_dag scenario") - self.log_expected_msgs = [ - "Successfully ran the connect_dag.py scenario using a temporary file" - ] - self.log_unexpected_msgs = ["Test failed."] self.warcli("scenarios run-file test/data/scenario_connect_dag.py") self.wait_for_all_scenarios() - self.assert_log_msgs() - - def run_connect_dag_scenario_post_connection(self): - self.log.info("Running connect_dag scenario") - self.log_expected_msgs = ["Successfully ran the connect_dag.py scenario"] - self.log_unexpected_msgs = ["Test failed"] - self.warcli("scenarios run-file test/data/scenario_connect_dag.py") - self.wait_for_all_scenarios() - self.assert_log_msgs() if __name__ == "__main__": diff --git a/test/data/scenario_connect_dag.py b/test/data/scenario_connect_dag.py index b469b92c4..47f1ac247 100644 --- a/test/data/scenario_connect_dag.py +++ b/test/data/scenario_connect_dag.py @@ -2,7 +2,6 @@ import os from enum import Enum, auto, unique -from time import sleep # The base class exists inside the commander container from commander import Commander @@ -23,18 +22,7 @@ def set_test_params(self): # This is just a minimum self.num_nodes = 10 - def add_options(self, parser): - parser.add_argument( - "--network_name", - dest="network_name", - default="warnet", - help="", - ) - def run_test(self): - while not self.warnet.network_connected(): - sleep(1) - # All permutations of a directed acyclic graph with zero, one, or two inputs/outputs # # │ Node │ In │ Out │ Con In │ Con Out │ @@ -66,12 +54,6 @@ def run_test(self): self.connect_nodes(5, 4) self.connect_nodes(5, 6) self.connect_nodes(6, 7) - - # Nodes 8 & 9 shall come pre-connected. Attempt to connect them anyway to test the handling - # of dns node addresses - self.connect_nodes(8, 9) - self.connect_nodes(9, 8) - self.sync_all() zero_peers = self.nodes[0].getpeerinfo() @@ -85,31 +67,29 @@ def run_test(self): eight_peers = self.nodes[8].getpeerinfo() nine_peers = self.nodes[9].getpeerinfo() - for tank in self.warnet.tanks: - self.log.info( - f"Tank {tank.index}: {tank.warnet.tanks[tank.index].get_dns_addr()} pod:" - f" {tank.warnet.tanks[tank.index].get_ip_addr()}" - ) + for node in self.nodes: + self.log.info(f"Node {node.index}: tank={node.tank} ip={node.rpchost}") - self.assert_connection(zero_peers, 2, ConnectionType.DNS) - self.assert_connection(one_peers, 2, ConnectionType.DNS) - self.assert_connection(one_peers, 3, ConnectionType.DNS) + self.assert_connection(zero_peers, 2, ConnectionType.IP) + self.assert_connection(one_peers, 2, ConnectionType.IP) + self.assert_connection(one_peers, 3, ConnectionType.IP) self.assert_connection(two_peers, 0, ConnectionType.IP) self.assert_connection(two_peers, 1, ConnectionType.IP) - self.assert_connection(two_peers, 3, ConnectionType.DNS) - self.assert_connection(two_peers, 4, ConnectionType.DNS) + self.assert_connection(two_peers, 3, ConnectionType.IP) + self.assert_connection(two_peers, 4, ConnectionType.IP) self.assert_connection(three_peers, 1, ConnectionType.IP) self.assert_connection(three_peers, 2, ConnectionType.IP) - self.assert_connection(three_peers, 5, ConnectionType.DNS) + self.assert_connection(three_peers, 5, ConnectionType.IP) self.assert_connection(four_peers, 2, ConnectionType.IP) self.assert_connection(four_peers, 5, ConnectionType.IP) self.assert_connection(five_peers, 3, ConnectionType.IP) - self.assert_connection(five_peers, 4, ConnectionType.DNS) - self.assert_connection(five_peers, 6, ConnectionType.DNS) + self.assert_connection(five_peers, 4, ConnectionType.IP) + self.assert_connection(five_peers, 6, ConnectionType.IP) self.assert_connection(six_peers, 5, ConnectionType.IP) - self.assert_connection(six_peers, 7, ConnectionType.DNS) + self.assert_connection(six_peers, 7, ConnectionType.IP) self.assert_connection(seven_peers, 6, ConnectionType.IP) # Check the pre-connected nodes + # The only connection made by DNS name would be from the initial graph edges self.assert_connection(eight_peers, 9, ConnectionType.DNS) self.assert_connection(nine_peers, 8, ConnectionType.IP) @@ -121,14 +101,15 @@ def run_test(self): def assert_connection(self, connector, connectee_index, connection_type: ConnectionType): if connection_type == ConnectionType.DNS: assert any( - d.get("addr") == self.warnet.tanks[connectee_index].get_dns_addr() + # ignore the ...-service suffix + self.nodes[connectee_index].tank in d.get("addr") for d in connector - ), f"Could not find {self.options.network_name}-tank-00000{connectee_index}-service" + ), "Could not find conectee hostname" elif connection_type == ConnectionType.IP: assert any( - d.get("addr").split(":")[0] == self.warnet.tanks[connectee_index].get_ip_addr() + d.get("addr").split(":")[0] == self.nodes[connectee_index].rpchost for d in connector - ), f"Could not find Tank {connectee_index}'s ip addr" + ), "Could not find connectee ip addr" else: raise ValueError("ConnectionType must be of type DNS or IP") diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index 54c044c7c..95a9bd5d5 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -3,7 +3,6 @@ # The base class exists inside the commander container from commander import Commander - from test_framework.messages import CInv, msg_getdata from test_framework.p2p import P2PInterface diff --git a/test/ln_test.py b/test/ln_test.py index 3534dab27..31e8be112 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -5,7 +5,6 @@ from pathlib import Path from test_base import TestBase - from warnet.services import ServiceType diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 9d6e17561..cce5e0e17 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -4,7 +4,6 @@ from pathlib import Path from test_base import TestBase - from warnet.cli.k8s import delete_pod from warnet.cli.scenarios import _active as scenarios_active from warnet.cli.scenarios import _available as scenarios_available From 3e43db52c27c99b6fad9f84be22f4c2a53f2d576 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 21 Aug 2024 19:46:27 +0200 Subject: [PATCH 074/710] added apply_kubernetes_yaml_obj --- src/warnet/cli/k8s.py | 14 ++++++++++++++ src/warnet/cli/network.py | 30 ++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index ac35de212..176b70b19 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -1,5 +1,8 @@ import json +import yaml +import tempfile from importlib.resources import files +from pathlib import Path from typing import Any, Dict from kubernetes import client, config @@ -89,6 +92,17 @@ def apply_kubernetes_yaml(yaml_file: str): return stream_command(command) +def apply_kubernetes_yaml_obj(yaml_obj: str): + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump(yaml_obj, temp_file) + temp_file_path = temp_file.name + + try: + apply_kubernetes_yaml(temp_file_path) + finally: + Path(temp_file_path).unlink() + + def delete_namespace(namespace: str): command = f"kubectl delete namespace {namespace} --ignore-not-found" return stream_command(command) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 8e29afdda..4dd12b0d1 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -10,6 +10,7 @@ from .bitcoin import _rpc from .k8s import ( + apply_kubernetes_yaml_obj, create_kubernetes_object, delete_namespace, get_edges, @@ -32,10 +33,29 @@ def network(): """Network commands""" -def create_edges_map(graph): +class Edge: + def __init__(self, src: str, dst: str, data: Dict[str, Any]): + self.src = src + self.dst = dst + self.data = data + + def to_dict(self): + return {"src": self.src, "dst": self.dst, "data": self.data} + + +def edges_from_network_file(network_file: Dict[str, Any]) -> List[Edge]: + edges = [] + for node in network_file["nodes"]: + if "connect" in node: + for connection in node["connect"]: + edges.append(Edge(node["name"], connection, "")) + return edges + + +def create_edges_map(network_file: Dict[str, Any]): edges = [] - for src, dst, data in graph.edges(data=True): - edges.append({"src": src, "dst": dst, "data": data}) + for edge in edges_from_network_file(network_file): + edges.append(edge.to_dict()) config_map = create_kubernetes_object( kind="ConfigMap", metadata={ @@ -44,7 +64,7 @@ def create_edges_map(graph): }, ) config_map["data"] = {"data": json.dumps(edges)} - return config_map + apply_kubernetes_yaml_obj(config_map) def setup_logging_helm() -> bool: @@ -107,6 +127,8 @@ def start(network_name: str, logging: bool, network: str): if temp_override_file_path: Path(temp_override_file_path).unlink() + create_edges_map(network_file) + @network.command() def down(): From 71015e3268fa345a3b897e48067b751036778d1e Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 09:57:27 +0200 Subject: [PATCH 075/710] move commander into main src/ dir --- resources/images/commander/Dockerfile | 2 +- resources/images/commander/src/__init__.py | 0 src/warnet/scenarios/commander.py | 384 +++++++++++++++++++++ 3 files changed, 385 insertions(+), 1 deletion(-) delete mode 100644 resources/images/commander/src/__init__.py create mode 100644 src/warnet/scenarios/commander.py diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile index 489c19793..ed93d4103 100644 --- a/resources/images/commander/Dockerfile +++ b/resources/images/commander/Dockerfile @@ -5,7 +5,7 @@ FROM python:3.12-slim #RUN pip install --no-cache-dir prometheus_client # Prometheus exporter script for bitcoind -COPY resources/images/commander/src / +COPY src/warnet/scenarios/commander.py / COPY src/test_framework /test_framework # -u: force the stdout and stderr streams to be unbuffered diff --git a/resources/images/commander/src/__init__.py b/resources/images/commander/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/warnet/scenarios/commander.py b/src/warnet/scenarios/commander.py new file mode 100644 index 000000000..fa1761ea9 --- /dev/null +++ b/src/warnet/scenarios/commander.py @@ -0,0 +1,384 @@ +import argparse +import configparser +import ipaddress +import json +import logging +import os +import pathlib +import random +import signal +import sys +import tempfile +from pathlib import Path + +from test_framework.authproxy import AuthServiceProxy +from test_framework.p2p import NetworkThread +from test_framework.test_framework import ( + TMPDIR_PREFIX, + BitcoinTestFramework, + TestStatus, +) +from test_framework.test_node import TestNode +from test_framework.util import PortSeed, get_rpc_proxy + +WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" +with open(WARNET_FILE) as file: + WARNET = json.load(file) + + +# Ensure that all RPC calls are made with brand new http connections +def auth_proxy_request(self, method, path, postdata): + self._set_conn() # creates new http client connection + return self.oldrequest(method, path, postdata) + + +AuthServiceProxy.oldrequest = AuthServiceProxy._request +AuthServiceProxy._request = auth_proxy_request + + +class Commander(BitcoinTestFramework): + # required by subclasses of BitcoinTestFramework + def set_test_params(self): + pass + + def run_test(self): + pass + + # Utility functions for Warnet scenarios + @staticmethod + def ensure_miner(node): + wallets = node.listwallets() + if "miner" not in wallets: + node.createwallet("miner", descriptors=True) + return node.get_wallet_rpc("miner") + + def handle_sigterm(self, signum, frame): + print("SIGTERM received, stopping...") + self.shutdown() + sys.exit(0) + + # The following functions are chopped-up hacks of + # the original methods from BitcoinTestFramework + + def setup(self): + signal.signal(signal.SIGTERM, self.handle_sigterm) + + # hacked from _start_logging() + # Scenarios will log plain messages to stdout only, which will can redirected by warnet + self.log = logging.getLogger(self.__class__.__name__) + self.log.setLevel(logging.INFO) # set this to DEBUG to see ALL RPC CALLS + + # Because scenarios run in their own subprocess, the logger here + # is not the same as the warnet server or other global loggers. + # Scenarios log directly to stdout which gets picked up by the + # subprocess manager in the server, and reprinted to the global log. + ch = logging.StreamHandler(sys.stdout) + formatter = logging.Formatter(fmt="%(name)-8s %(message)s") + ch.setFormatter(formatter) + self.log.addHandler(ch) + + for i, tank in enumerate(WARNET): + self.log.info( + f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" + ) + node = TestNode( + i, + pathlib.Path(), # datadir path + chain=tank["chain"], + rpchost=tank["rpc_host"], + timewait=60, + timeout_factor=self.options.timeout_factor, + bitcoind=None, + bitcoin_cli=None, + cwd=self.options.tmpdir, + coverage_dir=self.options.coveragedir, + ) + node.rpc = get_rpc_proxy( + f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", + i, + timeout=60, + coveragedir=self.options.coveragedir, + ) + node.rpc_connected = True + node.init_peers = tank["init_peers"] + self.nodes.append(node) + + self.num_nodes = len(self.nodes) + + # Set up temp directory and start logging + if self.options.tmpdir: + self.options.tmpdir = os.path.abspath(self.options.tmpdir) + os.makedirs(self.options.tmpdir, exist_ok=False) + else: + self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) + + seed = self.options.randomseed + if seed is None: + seed = random.randrange(sys.maxsize) + else: + self.log.info(f"User supplied random seed {seed}") + random.seed(seed) + self.log.info(f"PRNG seed is: {seed}") + + self.log.debug("Setting up network thread") + self.network_thread = NetworkThread() + self.network_thread.start() + + self.success = TestStatus.PASSED + + def parse_args(self): + previous_releases_path = "" + parser = argparse.ArgumentParser(usage="%(prog)s [options]") + parser.add_argument( + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error", + ) + parser.add_argument( + "--nosandbox", + dest="nosandbox", + default=False, + action="store_true", + help="Don't use the syscall sandbox", + ) + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution", + ) + parser.add_argument( + "--cachedir", + dest="cachedir", + default=None, + help="Directory for caching pregenerated datadirs (default: %(default)s)", + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" + ) + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="DEBUG", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made", + ) + parser.add_argument( + "--portseed", + dest="port_seed", + default=0, + help="The seed to use for assigning port numbers (default: current process id)", + ) + parser.add_argument( + "--previous-releases", + dest="prev_releases", + default=None, + action="store_true", + help="Force test of previous releases (default: %(default)s)", + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + default=None, + help="Write tested RPC commands into this directory", + ) + parser.add_argument( + "--configfile", + dest="configfile", + default=None, + help="Location of the test framework config file (default: %(default)s)", + ) + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands", + ) + parser.add_argument( + "--perf", + dest="perf", + default=False, + action="store_true", + help="profile running nodes with perf for the duration of the test", + ) + parser.add_argument( + "--valgrind", + dest="valgrind", + default=False, + action="store_true", + help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", + ) + parser.add_argument( + "--randomseed", + default=0x7761726E6574, # "warnet" ascii + help="set a random seed for deterministically reproducing a previous test run", + ) + parser.add_argument( + "--timeout-factor", + dest="timeout_factor", + default=1, + help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", + ) + parser.add_argument( + "--network", + dest="network", + default="warnet", + help="Designate which warnet this should run on (default: warnet)", + ) + parser.add_argument( + "--v2transport", + dest="v2transport", + default=False, + action="store_true", + help="use BIP324 v2 connections between all nodes by default", + ) + + self.add_options(parser) + # Running TestShell in a Jupyter notebook causes an additional -f argument + # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument + # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 + parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") + self.options = parser.parse_args() + if self.options.timeout_factor == 0: + self.options.timeout_factor = 99999 + self.options.timeout_factor = self.options.timeout_factor or ( + 4 if self.options.valgrind else 1 + ) + self.options.previous_releases_path = previous_releases_path + config = configparser.ConfigParser() + if self.options.configfile is not None: + with open(self.options.configfile) as f: + config.read_file(f) + + config["environment"] = {"PACKAGE_BUGREPORT": ""} + + self.config = config + + if "descriptors" not in self.options: + # Wallet is not required by the test at all and the value of self.options.descriptors won't matter. + # It still needs to exist and be None in order for tests to work however. + # So set it to None to force -disablewallet, because the wallet is not needed. + self.options.descriptors = None + elif self.options.descriptors is None: + # Some wallet is either required or optionally used by the test. + # Prefer SQLite unless it isn't available + if self.is_sqlite_compiled(): + self.options.descriptors = True + elif self.is_bdb_compiled(): + self.options.descriptors = False + else: + # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter + # It still needs to exist and be None in order for tests to work however. + # So set it to None, which will also set -disablewallet. + self.options.descriptors = None + + PortSeed.n = self.options.port_seed + + def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool = True): + """ + Kwargs: + wait_for_connect: if True, block until the nodes are verified as connected. You might + want to disable this when using -stopatheight with one of the connected nodes, + since there will be a race between the actual connection and performing + the assertions before one node shuts down. + """ + from_connection = self.nodes[a] + to_connection = self.nodes[b] + + to_ip_port = self.warnet.tanks[b].get_dns_addr() + from_ip_port = self.warnet.tanks[a].get_ip_addr() + + if peer_advertises_v2 is None: + peer_advertises_v2 = self.options.v2transport + + if peer_advertises_v2: + from_connection.addnode(node=to_ip_port, command="onetry", v2transport=True) + else: + # skip the optional third argument (default false) for + # compatibility with older clients + from_connection.addnode(to_ip_port, "onetry") + + if not wait_for_connect: + return + + def get_peer_ip(peer): + try: # we encounter a regular ip address + ip_addr = str(ipaddress.ip_address(peer["addr"].split(":")[0])) + return ip_addr + except ValueError as err: # or we encounter a service name + try: + # NETWORK-tank-TANK_INDEX-service + # NETWORK-test-TEST-tank-TANK_INDEX-service + tank_index = int(peer["addr"].split("-")[-2]) + except (ValueError, IndexError) as inner_err: + raise ValueError( + "could not derive tank index from service name: {} {}".format( + peer["addr"], inner_err + ) + ) from err + + ip_addr = self.warnet.tanks[tank_index].get_ip_addr() + return ip_addr + + # poll until version handshake complete to avoid race conditions + # with transaction relaying + # See comments in net_processing: + # * Must have a version message before anything else + # * Must have a verack message before anything else + self.wait_until( + lambda: any( + peer["addr"] == to_ip_port and peer["version"] != 0 + for peer in from_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + get_peer_ip(peer) == from_ip_port and peer["version"] != 0 + for peer in to_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + for peer in from_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + get_peer_ip(peer) == from_ip_port + and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + for peer in to_connection.getpeerinfo() + ) + ) + # The message bytes are counted before processing the message, so make + # sure it was fully processed by waiting for a ping. + self.wait_until( + lambda: any( + peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + for peer in from_connection.getpeerinfo() + ) + ) + self.wait_until( + lambda: any( + get_peer_ip(peer) == from_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + for peer in to_connection.getpeerinfo() + ) + ) From 42150de4d545d969fba9151674d22f0c4bf70674 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 09:58:23 +0200 Subject: [PATCH 076/710] add missing types --- resources/scripts/apidocs.py | 1 + resources/scripts/graphdocs.py | 1 + src/warnet/cli/bitcoin.py | 17 ++++++++-------- src/warnet/cli/k8s.py | 30 ++++++++++++++--------------- src/warnet/cli/namespaces.py | 2 +- src/warnet/cli/network.py | 8 ++++---- src/warnet/cli/process.py | 4 ++-- src/warnet/cli/scenarios.py | 15 ++++++++------- test/data/scenario_p2p_interface.py | 1 + test/ln_test.py | 1 + test/scenarios_test.py | 1 + 11 files changed, 44 insertions(+), 37 deletions(-) diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index 3815126a2..e02cd9939 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -6,6 +6,7 @@ from click import Context from tabulate import tabulate + from warnet.cli.main import cli file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "warcli.md" diff --git a/resources/scripts/graphdocs.py b/resources/scripts/graphdocs.py index 513b12091..631d7df45 100755 --- a/resources/scripts/graphdocs.py +++ b/resources/scripts/graphdocs.py @@ -5,6 +5,7 @@ from pathlib import Path from tabulate import tabulate + from warnet.cli.util import load_schema graph_schema = load_schema() diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 356831839..397e74066 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -5,6 +5,7 @@ from io import BytesIO import click + from test_framework.messages import ser_uint256 from test_framework.p2p import MESSAGEMAP @@ -20,14 +21,14 @@ def bitcoin(): @click.argument("node", type=int) @click.argument("method", type=str) @click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments -def rpc(node, method, params): +def rpc(node: int, method: str, params: str): """ Call bitcoin-cli [params] on """ print(_rpc(node, method, params)) -def _rpc(node, method, params): +def _rpc(node: int, method: str, params: str): if params: cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" else: @@ -37,7 +38,7 @@ def _rpc(node, method, params): @bitcoin.command() @click.argument("node", type=int, required=True) -def debug_log(node): +def debug_log(node: int): """ Fetch the Bitcoin Core debug log from """ @@ -49,7 +50,7 @@ def debug_log(node): @click.argument("pattern", type=str, required=True) @click.option("--show-k8s-timestamps", is_flag=True, default=False, show_default=True) @click.option("--no-sort", is_flag=True, default=False, show_default=True) -def grep_logs(pattern, show_k8s_timestamps, no_sort): +def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): """ Grep combined bitcoind logs using regex """ @@ -120,7 +121,7 @@ def grep_logs(pattern, show_k8s_timestamps, no_sort): @click.argument("node_a", type=int, required=True) @click.argument("node_b", type=int, required=True) @click.option("--network", default="regtest", show_default=True) -def messages(node_a, node_b, network): +def messages(node_a: int, node_b: int, network: str): """ Fetch messages sent between and in [network] """ @@ -154,7 +155,7 @@ def messages(node_a, node_b, network): print(f"Error fetching messages between nodes {node_a} and {node_b}: {e}") -def get_messages(node_a, node_b, network): +def get_messages(node_a: int, node_b: int, network: str): """ Fetch messages from the message capture files """ @@ -198,7 +199,7 @@ def get_messages(node_a, node_b, network): # This function is a hacked-up copy of process_file() from # Bitcoin Core contrib/message-capture/message-capture-parser.py -def parse_raw_messages(blob, outbound): +def parse_raw_messages(blob: bytes, outbound: bool): TIME_SIZE = 8 LENGTH_SIZE = 4 MSGTYPE_SIZE = 12 @@ -267,7 +268,7 @@ def parse_raw_messages(blob, outbound): return messages -def to_jsonable(obj): +def to_jsonable(obj: str): HASH_INTS = [ "blockhash", "block_hash", diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 176b70b19..98db9e60e 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -1,11 +1,11 @@ import json -import yaml import tempfile from importlib.resources import files from pathlib import Path -from typing import Any, Dict +import yaml from kubernetes import client, config +from kubernetes.client.models import CoreV1Event, V1PodList from kubernetes.dynamic import DynamicClient from .process import stream_command @@ -13,22 +13,22 @@ WAR_MANIFESTS = files("manifests") -def get_static_client(): +def get_static_client() -> CoreV1Event: config.load_kube_config() return client.CoreV1Api() -def get_dynamic_client(): +def get_dynamic_client() -> DynamicClient: config.load_kube_config() return DynamicClient(client.ApiClient()) -def get_pods(): +def get_pods() -> V1PodList: sclient = get_static_client() return sclient.list_namespaced_pod("warnet") -def get_mission(mission): +def get_mission(mission: str) -> list[V1PodList]: pods = get_pods() crew = [] for pod in pods.items: @@ -37,15 +37,15 @@ def get_mission(mission): return crew -def get_edges(): +def get_edges() -> any: sclient = get_static_client() configmap = sclient.read_namespaced_config_map(name="edges", namespace="warnet") return json.loads(configmap.data["data"]) def create_kubernetes_object( - kind: str, metadata: Dict[str, Any], spec: Dict[str, Any] = None -) -> Dict[str, Any]: + kind: str, metadata: dict[str, any], spec: dict[str, any] = None +) -> dict[str, any]: obj = { "apiVersion": "v1", "kind": kind, @@ -60,7 +60,7 @@ def create_namespace() -> dict: return {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": "warnet"}} -def set_kubectl_context(namespace: str): +def set_kubectl_context(namespace: str) -> bool: """ Set the default kubectl context to the specified namespace. """ @@ -73,7 +73,7 @@ def set_kubectl_context(namespace: str): return result -def deploy_base_configurations(): +def deploy_base_configurations() -> bool: base_configs = [ "namespace.yaml", "rbac-config.yaml", @@ -87,12 +87,12 @@ def deploy_base_configurations(): return True -def apply_kubernetes_yaml(yaml_file: str): +def apply_kubernetes_yaml(yaml_file: str) -> bool: command = f"kubectl apply -f {yaml_file}" return stream_command(command) -def apply_kubernetes_yaml_obj(yaml_obj: str): +def apply_kubernetes_yaml_obj(yaml_obj: str) -> None: with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: yaml.dump(yaml_obj, temp_file) temp_file_path = temp_file.name @@ -103,11 +103,11 @@ def apply_kubernetes_yaml_obj(yaml_obj: str): Path(temp_file_path).unlink() -def delete_namespace(namespace: str): +def delete_namespace(namespace: str) -> bool: command = f"kubectl delete namespace {namespace} --ignore-not-found" return stream_command(command) -def delete_pod(pod_name: str): +def delete_pod(pod_name: str) -> bool: command = f"kubectl delete pod {pod_name}" return stream_command(command) diff --git a/src/warnet/cli/namespaces.py b/src/warnet/cli/namespaces.py index fc550f310..7c208cb94 100644 --- a/src/warnet/cli/namespaces.py +++ b/src/warnet/cli/namespaces.py @@ -37,7 +37,7 @@ def deploy(namespaces: str): for n in names: if not n.startswith("warnet-"): print( - f"Failled to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." + f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." ) # deploy namespaces diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 4dd12b0d1..760b78db1 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -34,7 +34,7 @@ def network(): class Edge: - def __init__(self, src: str, dst: str, data: Dict[str, Any]): + def __init__(self, src: str, dst: str, data: dict[str, any]): self.src = src self.dst = dst self.data = data @@ -43,7 +43,7 @@ def to_dict(self): return {"src": self.src, "dst": self.dst, "data": self.data} -def edges_from_network_file(network_file: Dict[str, Any]) -> List[Edge]: +def edges_from_network_file(network_file: dict[str, any]) -> list[Edge]: edges = [] for node in network_file["nodes"]: if "connect" in node: @@ -52,7 +52,7 @@ def edges_from_network_file(network_file: Dict[str, Any]) -> List[Edge]: return edges -def create_edges_map(network_file: Dict[str, Any]): +def create_edges_map(network_file: dict[str, any]): edges = [] for edge in edges_from_network_file(network_file): edges.append(edge.to_dict()) @@ -149,7 +149,7 @@ def logs(follow: bool): @network.command() def connected(): - """Determine if all p2p conenctions defined in graph are established""" + """Determine if all p2p connections defined in graph are established""" print(_connected()) diff --git a/src/warnet/cli/process.py b/src/warnet/cli/process.py index ece252458..2cd4129f5 100644 --- a/src/warnet/cli/process.py +++ b/src/warnet/cli/process.py @@ -1,7 +1,7 @@ import subprocess -def run_command(command) -> str: +def run_command(command: str) -> str: result = subprocess.run( command, shell=True, capture_output=True, text=True, executable="/bin/bash" ) @@ -10,7 +10,7 @@ def run_command(command) -> str: return result.stdout -def stream_command(command, env=None) -> bool: +def stream_command(command: str, env=None) -> bool: process = subprocess.Popen( ["/bin/bash", "-c", command], stdout=subprocess.PIPE, diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index df2c47f1e..ea09f1b10 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -11,6 +11,7 @@ from rich import print from rich.console import Console from rich.table import Table + from warnet import scenarios as SCENARIOS from .k8s import apply_kubernetes_yaml, create_namespace, get_mission @@ -51,7 +52,7 @@ def _available(): @scenarios.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario", type=str) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -def run(scenario, additional_args): +def run(scenario: str, additional_args: tuple[str]): """ Run from the Warnet Test Framework with optional arguments """ @@ -69,7 +70,7 @@ def run(scenario, additional_args): @scenarios.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario_path", type=str) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -def run_file(scenario_path, additional_args): +def run_file(scenario_path: str, additional_args: tuple[str]): """ Run from the Warnet Test Framework with optional arguments """ @@ -79,7 +80,7 @@ def run_file(scenario_path, additional_args): return run_scenario(scenario_path, additional_args) -def run_scenario(scenario_path, additional_args): +def run_scenario(scenario_path: str, additional_args: tuple[str]): if not os.path.exists(scenario_path): raise Exception(f"Scenario file not found at {scenario_path}.") @@ -119,7 +120,7 @@ def run_scenario(scenario_path, additional_args): "apiVersion": "v1", "kind": "ConfigMap", "metadata": { - "name": "scnaeriopy", + "name": "scenariopy", "namespace": "warnet", }, "data": {"scenario.py": scenario_text}, @@ -146,7 +147,7 @@ def run_scenario(scenario_path, additional_args): "subPath": "warnet.json", }, { - "name": "scnaeriopy", + "name": "scenariopy", "mountPath": "scenario.py", "subPath": "scenario.py", }, @@ -155,7 +156,7 @@ def run_scenario(scenario_path, additional_args): ], "volumes": [ {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, - {"name": "scnaeriopy", "configMap": {"name": "scnaeriopy"}}, + {"name": "scenariopy", "configMap": {"name": "scenariopy"}}, ], }, }, @@ -188,6 +189,6 @@ def active(): console.print(table) -def _active(): +def _active() -> list[str]: commanders = get_mission("commander") return [{"commander": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index 95a9bd5d5..54c044c7c 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -3,6 +3,7 @@ # The base class exists inside the commander container from commander import Commander + from test_framework.messages import CInv, msg_getdata from test_framework.p2p import P2PInterface diff --git a/test/ln_test.py b/test/ln_test.py index 31e8be112..3534dab27 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -5,6 +5,7 @@ from pathlib import Path from test_base import TestBase + from warnet.services import ServiceType diff --git a/test/scenarios_test.py b/test/scenarios_test.py index cce5e0e17..9d6e17561 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -4,6 +4,7 @@ from pathlib import Path from test_base import TestBase + from warnet.cli.k8s import delete_pod from warnet.cli.scenarios import _active as scenarios_active from warnet.cli.scenarios import _available as scenarios_available From 43e59c30cbeebad0fcd657d4ca8c64549d220397 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 08:00:04 +0000 Subject: [PATCH 077/710] Update apidocs and/or graphdocs --- docs/warcli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/warcli.md b/docs/warcli.md index ee9d050a7..7002d3b7b 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -150,7 +150,7 @@ List all namespaces with 'warnet-' prefix ## Network ### `warcli network connected` -Determine if all p2p conenctions defined in graph are established +Determine if all p2p connections defined in graph are established ### `warcli network down` From 374c9c7a5166b76079e484e92eb4eec0f75d7a96 Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 04:16:21 -0500 Subject: [PATCH 078/710] fix `network status` --- src/warnet/cli/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 760b78db1..11304241c 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -188,7 +188,7 @@ def _status(): stats = [] for tank in tanks: status = { - "tank_index": tank.metadata.labels["index"], + "tank_index": tank.metadata.labels["app.kubernetes.io/instance"], "bitcoin_status": tank.status.phase.lower(), } stats.append(status) From e48e9a878dc9b9b55961df347bdd0ee1f359aa5c Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 06:03:00 -0500 Subject: [PATCH 079/710] fix debug-log --- src/warnet/cli/bitcoin.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 397e74066..1643d8bbc 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -38,11 +38,13 @@ def _rpc(node: int, method: str, params: str): @bitcoin.command() @click.argument("node", type=int, required=True) -def debug_log(node: int): +@click.option("--namespace", type=str, default="warnet", show_default=True) +def debug_log(node: int, namespace: str): """ - Fetch the Bitcoin Core debug log from + Fetch the Bitcoin Core debug log from in """ - cmd = f"kubectl logs warnet-tank-{node}" + node = str(node).zfill(4) + cmd = f"kubectl logs tank-{node} -n {namespace}" print(run_command(cmd)) From d6cfbd5290f49fc4ff9671f9979ba6e9580484da Mon Sep 17 00:00:00 2001 From: mplsgrant Date: Thu, 22 Aug 2024 11:04:59 +0000 Subject: [PATCH 080/710] Update apidocs and/or graphdocs --- docs/warcli.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index 7002d3b7b..ff6df844f 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -33,12 +33,13 @@ Check Warnet requirements are installed ## Bitcoin ### `warcli bitcoin debug-log` -Fetch the Bitcoin Core debug log from \ +Fetch the Bitcoin Core debug log from \ in \ options: -| name | type | required | default | -|--------|--------|------------|-----------| -| node | Int | yes | | +| name | type | required | default | +|-----------|--------|------------|-----------| +| node | Int | yes | | +| namespace | String | | "warnet" | ### `warcli bitcoin grep-logs` Grep combined bitcoind logs using regex \ From 53dd8b81ee4e06e58f586257c7e47cce9596c134 Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 06:27:31 -0500 Subject: [PATCH 081/710] fix `bitcoin messages` --- src/warnet/cli/bitcoin.py | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 1643d8bbc..27ad893d1 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -122,14 +122,15 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): @bitcoin.command() @click.argument("node_a", type=int, required=True) @click.argument("node_b", type=int, required=True) -@click.option("--network", default="regtest", show_default=True) -def messages(node_a: int, node_b: int, network: str): +@click.option("--chain", default="regtest", show_default=True) +@click.option("--namespace", default="warnet", show_default=True) +def messages(node_a: int, node_b: int, chain: str, namespace: str): """ - Fetch messages sent between and in [network] + Fetch messages sent between and on [chain] in a [namespace] """ try: # Get the messages - messages = get_messages(node_a, node_b, network) + messages = get_messages(node_a, node_b, chain, namespace) if not messages: print(f"No messages found between {node_a} and {node_b}") @@ -157,34 +158,42 @@ def messages(node_a: int, node_b: int, network: str): print(f"Error fetching messages between nodes {node_a} and {node_b}: {e}") -def get_messages(node_a: int, node_b: int, network: str): +def get_messages(node_a: int, node_b: int, chain: str, namespace: str): """ Fetch messages from the message capture files """ - subdir = "" if network == "main" else f"{network}/" + node_a = str(node_a).zfill(4) + node_b = str(node_b).zfill(4) + + subdir = "" if chain == "main" else f"{chain}/" base_dir = f"/root/.bitcoin/{subdir}message_capture" # Get the IP of node_b - cmd = f"kubectl get pod warnet-tank-{node_b} -o jsonpath='{{.status.podIP}}'" + cmd = f"kubectl get pod tank-{node_b} -n {namespace} -o jsonpath='{{.status.podIP}}'" + print(cmd) node_b_ip = run_command(cmd).strip() + print(f"node_b-ip: {node_b_ip}") # Get the service IP of node_b - cmd = f"kubectl get service warnet-tank-{node_b}-service -o jsonpath='{{.spec.clusterIP}}'" + cmd = f"kubectl get service tank-{node_b} -n {namespace} -o jsonpath='{{.spec.clusterIP}}'" + print(cmd) node_b_service_ip = run_command(cmd).strip() # List directories in the message capture folder - cmd = f"kubectl exec warnet-tank-{node_a} -- ls {base_dir}" + cmd = f"kubectl exec tank-{node_a} -n {namespace} -- ls {base_dir}" + print(cmd) dirs = run_command(cmd).splitlines() messages = [] for dir_name in dirs: + dir_name = dir_name.split("_")[0] if node_b_ip in dir_name or node_b_service_ip in dir_name: for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: - file_path = f"{base_dir}/{dir_name}/{file}" - + file_path = f"{base_dir}/{dir_name}_18444/{file}" + print(file_path) # Fetch the file contents from the container - cmd = f"kubectl exec warnet-tank-{node_a} -- cat {file_path}" + cmd = f"kubectl exec tank-{node_a} -n {namespace} -- cat {file_path}" import subprocess blob = subprocess.run( From 708f590027171d7721d1228ce961f5d9a054e526 Mon Sep 17 00:00:00 2001 From: mplsgrant Date: Thu, 22 Aug 2024 11:30:23 +0000 Subject: [PATCH 082/710] Update apidocs and/or graphdocs --- docs/warcli.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index ff6df844f..867b93d6b 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -52,14 +52,15 @@ options: | no_sort | Bool | | False | ### `warcli bitcoin messages` -Fetch messages sent between \ and \ in [network] +Fetch messages sent between \ and \ on [chain] in a [namespace] options: -| name | type | required | default | -|---------|--------|------------|-----------| -| node_a | Int | yes | | -| node_b | Int | yes | | -| network | String | | "regtest" | +| name | type | required | default | +|-----------|--------|------------|-----------| +| node_a | Int | yes | | +| node_b | Int | yes | | +| chain | String | | "regtest" | +| namespace | String | | "warnet" | ### `warcli bitcoin rpc` Call bitcoin-cli \ [params] on \ From ee57d0d50e44dde9431de3fd857e2e2178f061d0 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 13:42:35 +0200 Subject: [PATCH 083/710] rpc test passing with helm --- .../charts/bitcoincore/templates/pod.yaml | 2 + src/warnet/cli/bitcoin.py | 125 +++++++----------- src/warnet/cli/network.py | 26 +--- test/data/12_node_ring.graphml | 73 ---------- test/data/12_node_ring/defaults.yaml | 4 + test/data/12_node_ring/network.yaml | 61 +++++++++ test/rpc_test.py | 22 +-- 7 files changed, 128 insertions(+), 185 deletions(-) create mode 100644 test/data/12_node_ring/defaults.yaml create mode 100644 test/data/12_node_ring/network.yaml diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index f2954abc9..054c20e85 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,6 +7,8 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} + annotations: + init_peers: "{{ .Values.connect | len }}" spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 27ad893d1..7e30e9259 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -11,6 +11,7 @@ from .process import run_command +from .k8s import get_mission @click.group(name="bitcoin") def bitcoin(): @@ -18,33 +19,31 @@ def bitcoin(): @bitcoin.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) +@click.argument("tank", type=str) @click.argument("method", type=str) @click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments -def rpc(node: int, method: str, params: str): +def rpc(tank: str, method: str, params: str): """ - Call bitcoin-cli [params] on + Call bitcoin-cli [params] on """ - print(_rpc(node, method, params)) + print(_rpc(tank, method, params)) -def _rpc(node: int, method: str, params: str): +def _rpc(tank: str, method: str, params: str): if params: - cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" + cmd = f"kubectl exec {tank} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" else: - cmd = f"kubectl exec warnet-tank-{node} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" + cmd = f"kubectl exec {tank} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" return run_command(cmd) @bitcoin.command() -@click.argument("node", type=int, required=True) -@click.option("--namespace", type=str, default="warnet", show_default=True) -def debug_log(node: int, namespace: str): +@click.argument("tank", type=str, required=True) +def debug_log(tank: str): """ - Fetch the Bitcoin Core debug log from in + Fetch the Bitcoin Core debug log from """ - node = str(node).zfill(4) - cmd = f"kubectl logs tank-{node} -n {namespace}" + cmd = f"kubectl logs {tank}" print(run_command(cmd)) @@ -57,44 +56,31 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): Grep combined bitcoind logs using regex """ - # Get all pods in the namespace - command = "kubectl get pods -n warnet -o json" - pods_json = run_command(command) - - if pods_json is False: - print("Error: Failed to get pods information") - return - - try: - pods = json.loads(pods_json) - except json.JSONDecodeError as e: - print(f"Error decoding JSON: {e}") - return + tanks = get_mission("tank") matching_logs = [] - for pod in pods.get("items", []): - pod_name = pod.get("metadata", {}).get("name", "") - if "warnet" in pod_name: - # Get container names for this pod - containers = pod.get("spec", {}).get("containers", []) - if not containers: - continue + for tank in tanks: + pod_name = tank.metadata.name + # Get container names for this pod + containers = tank.spec.containers + if not containers: + continue - # Use the first container name - container_name = containers[0].get("name", "") - if not container_name: - continue + # Use the first container name + container_name = containers[0].name + if not container_name: + continue - # Get logs from the specific container - command = f"kubectl logs {pod_name} -c {container_name} -n warnet --timestamps" - logs = run_command(command) + # Get logs from the specific container + command = f"kubectl logs {pod_name} -c {container_name} -n warnet --timestamps" + logs = run_command(command) - if logs is not False: - # Process logs - for log_entry in logs.splitlines(): - if re.search(pattern, log_entry): - matching_logs.append((log_entry, pod_name)) + if logs is not False: + # Process logs + for log_entry in logs.splitlines(): + if re.search(pattern, log_entry): + matching_logs.append((log_entry, pod_name)) # Sort logs if needed if not no_sort: @@ -120,20 +106,19 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): @bitcoin.command() -@click.argument("node_a", type=int, required=True) -@click.argument("node_b", type=int, required=True) -@click.option("--chain", default="regtest", show_default=True) -@click.option("--namespace", default="warnet", show_default=True) -def messages(node_a: int, node_b: int, chain: str, namespace: str): +@click.argument("tank_a", type=str, required=True) +@click.argument("tank_b", type=str, required=True) +@click.option("--network", default="regtest", show_default=True) +def messages(tank_a: str, tank_b: str, network: str): """ - Fetch messages sent between and on [chain] in a [namespace] + Fetch messages sent between and in [network] """ try: # Get the messages - messages = get_messages(node_a, node_b, chain, namespace) + messages = get_messages(tank_a, tank_b, network) if not messages: - print(f"No messages found between {node_a} and {node_b}") + print(f"No messages found between {tank_a} and {tank_b}") return # Process and print messages @@ -155,45 +140,31 @@ def messages(node_a: int, node_b: int, chain: str, namespace: str): print(f"{timestamp} {direction} {msgtype} {body_str}") except Exception as e: - print(f"Error fetching messages between nodes {node_a} and {node_b}: {e}") - - -def get_messages(node_a: int, node_b: int, chain: str, namespace: str): - """ - Fetch messages from the message capture files - """ - node_a = str(node_a).zfill(4) - node_b = str(node_b).zfill(4) + print(f"Error fetching messages between nodes {tank_a} and {tank_b}: {e}") - subdir = "" if chain == "main" else f"{chain}/" - base_dir = f"/root/.bitcoin/{subdir}message_capture" - # Get the IP of node_b - cmd = f"kubectl get pod tank-{node_b} -n {namespace} -o jsonpath='{{.status.podIP}}'" - print(cmd) - node_b_ip = run_command(cmd).strip() - print(f"node_b-ip: {node_b_ip}") +def get_messages(tank_a: str, tank_b: str, network: str): + cmd = f"kubectl get pod {tank_b} -o jsonpath='{{.status.podIP}}'" + tank_b_ip = run_command(cmd).strip() # Get the service IP of node_b - cmd = f"kubectl get service tank-{node_b} -n {namespace} -o jsonpath='{{.spec.clusterIP}}'" - print(cmd) - node_b_service_ip = run_command(cmd).strip() + cmd = f"kubectl get service {tank_b} -o jsonpath='{{.spec.clusterIP}}'" + tank_b_service_ip = run_command(cmd).strip() # List directories in the message capture folder - cmd = f"kubectl exec tank-{node_a} -n {namespace} -- ls {base_dir}" - print(cmd) + cmd = f"kubectl exec {tank_a} -- ls {base_dir}" + dirs = run_command(cmd).splitlines() messages = [] for dir_name in dirs: - dir_name = dir_name.split("_")[0] - if node_b_ip in dir_name or node_b_service_ip in dir_name: + if tank_b_ip in dir_name or tank_b_service_ip in dir_name: for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: file_path = f"{base_dir}/{dir_name}_18444/{file}" print(file_path) # Fetch the file contents from the container - cmd = f"kubectl exec tank-{node_a} -n {namespace} -- cat {file_path}" + cmd = f"kubectl exec {tank_a} -- cat {file_path}" import subprocess blob = subprocess.run( diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 11304241c..cc2d35344 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -52,21 +52,6 @@ def edges_from_network_file(network_file: dict[str, any]) -> list[Edge]: return edges -def create_edges_map(network_file: dict[str, any]): - edges = [] - for edge in edges_from_network_file(network_file): - edges.append(edge.to_dict()) - config_map = create_kubernetes_object( - kind="ConfigMap", - metadata={ - "name": "edges", - "namespace": "warnet", - }, - ) - config_map["data"] = {"data": json.dumps(edges)} - apply_kubernetes_yaml_obj(config_map) - - def setup_logging_helm() -> bool: helm_commands = [ "helm repo add grafana https://grafana.github.io/helm-charts", @@ -127,8 +112,6 @@ def start(network_name: str, logging: bool, network: str): if temp_override_file_path: Path(temp_override_file_path).unlink() - create_edges_map(network_file) - @network.command() def down(): @@ -155,21 +138,16 @@ def connected(): def _connected(): tanks = get_mission("tank") - edges = get_edges() for tank in tanks: # Get actual - index = tank.metadata.labels["index"] - peerinfo = json.loads(_rpc(int(index), "getpeerinfo", "")) + peerinfo = json.loads(_rpc(tank.metadata.name, "getpeerinfo", "")) manuals = 0 for peer in peerinfo: if peer["connection_type"] == "manual": manuals += 1 - # Get expected - init_peers = sum(1 for edge in edges if edge["src"] == index) - print(f"Tank {index} connections: expected={init_peers} actual={manuals}") # Even if more edges are specifed, bitcoind only allows # 8 manual outbound connections - if min(8, init_peers) > manuals: + if min(8, int(tank.metadata.annotations["init_peers"])) > manuals: print("Network not connected") return False print("Network connected") diff --git a/test/data/12_node_ring.graphml b/test/data/12_node_ring.graphml index a45889765..e69de29bb 100644 --- a/test/data/12_node_ring.graphml +++ b/test/data/12_node_ring.graphml @@ -1,73 +0,0 @@ - - - - - - - - - - - - - - - - - - - - 27.0 - debug=validation - - - 27.0 - debug=validation - - - 27.0 - debug=validation - - - 27.0 - - - 27.0 - - - 27.0 - - - 27.0 - - - 27.0 - - - 27.0 - - - 27.0 - - - 27.0 - - - 27.0 - - - - - - - - - - - - - - - - - diff --git a/test/data/12_node_ring/defaults.yaml b/test/data/12_node_ring/defaults.yaml new file mode 100644 index 000000000..7e021cad1 --- /dev/null +++ b/test/data/12_node_ring/defaults.yaml @@ -0,0 +1,4 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" diff --git a/test/data/12_node_ring/network.yaml b/test/data/12_node_ring/network.yaml new file mode 100644 index 000000000..8ae4e752d --- /dev/null +++ b/test/data/12_node_ring/network.yaml @@ -0,0 +1,61 @@ +nodes: + - name: tank-0000 + connect: + - tank-0001 + config: | + debug=rpc + debug=validation + - name: tank-0001 + connect: + - tank-0002 + config: | + debug=net + debug=validation + - name: tank-0002 + connect: + - tank-0003 + config: | + debug=validation + - name: tank-0003 + connect: + - tank-0004 + config: | + debug=validation + - name: tank-0004 + connect: + - tank-0005 + - name: tank-0005 + connect: + - tank-0006 + config: | + debug=validation + - name: tank-0006 + connect: + - tank-0007 + - name: tank-0007 + config: | + debug=validation + connect: + - tank-0008 + config: | + debug=validation + - name: tank-0008 + connect: + - tank-0009 + config: | + debug=validation + - name: tank-0009 + connect: + - tank-0010 + config: | + debug=validation + - name: tank-0010 + connect: + - tank-0011 + config: | + debug=validation + - name: tank-0011 + connect: + - tank-0000 + config: | + debug=validation \ No newline at end of file diff --git a/test/rpc_test.py b/test/rpc_test.py index b53ccb0fb..ae08a57bb 100755 --- a/test/rpc_test.py +++ b/test/rpc_test.py @@ -10,7 +10,7 @@ class RPCTest(TestBase): def __init__(self): super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "12_node_ring.graphml" + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "12_node_ring" def run_test(self): try: @@ -24,24 +24,24 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) + self.log.info(self.warcli(f"network start {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() def test_rpc_commands(self): self.log.info("Testing basic RPC commands") - self.warcli("bitcoin rpc 0 getblockcount") - self.warcli("bitcoin rpc 1 createwallet miner") - self.warcli("bitcoin rpc 1 -generate 101") - self.wait_for_predicate(lambda: "101" in self.warcli("bitcoin rpc 0 getblockcount")) + self.warcli("bitcoin rpc tank-0000 getblockcount") + self.warcli("bitcoin rpc tank-0001 createwallet miner") + self.warcli("bitcoin rpc tank-0001 -generate 101") + self.wait_for_predicate(lambda: "101" in self.warcli("bitcoin rpc tank-0000 getblockcount")) def test_transaction_propagation(self): self.log.info("Testing transaction propagation") address = "bcrt1qthmht0k2qnh3wy7336z05lu2km7emzfpm3wg46" - txid = self.warcli(f"bitcoin rpc 1 sendtoaddress {address} 0.1") - self.wait_for_predicate(lambda: txid in self.warcli("bitcoin rpc 0 getrawmempool")) + txid = self.warcli(f"bitcoin rpc tank-0001 sendtoaddress {address} 0.1") + self.wait_for_predicate(lambda: txid in self.warcli("bitcoin rpc tank-0000 getrawmempool")) - node_log = self.warcli("bitcoin debug-log 1") + node_log = self.warcli("bitcoin debug-log tank-0001") assert txid in node_log, "Transaction ID not found in node log" all_logs = self.warcli(f"bitcoin grep-logs {txid}") @@ -50,14 +50,14 @@ def test_transaction_propagation(self): def test_message_exchange(self): self.log.info("Testing message exchange between nodes") - msgs = self.warcli("bitcoin messages 0 1") + msgs = self.warcli("bitcoin messages tank-0000 tank-0001") assert "verack" in msgs, "VERACK message not found in exchange" def test_address_manager(self): self.log.info("Testing address manager") def got_addrs(): - addrman = json.loads(self.warcli("bitcoin rpc 0 getrawaddrman")) + addrman = json.loads(self.warcli("bitcoin rpc tank-0000 getrawaddrman")) for key in ["tried", "new"]: obj = addrman[key] keys = list(obj.keys()) From 85ad4b6e516effb8a866f7193cb763237e453b44 Mon Sep 17 00:00:00 2001 From: pinheadmz Date: Thu, 22 Aug 2024 11:48:27 +0000 Subject: [PATCH 084/710] Update apidocs and/or graphdocs --- docs/warcli.md | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index 867b93d6b..915c15e8e 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -33,13 +33,12 @@ Check Warnet requirements are installed ## Bitcoin ### `warcli bitcoin debug-log` -Fetch the Bitcoin Core debug log from \ in \ +Fetch the Bitcoin Core debug log from \ options: -| name | type | required | default | -|-----------|--------|------------|-----------| -| node | Int | yes | | -| namespace | String | | "warnet" | +| name | type | required | default | +|--------|--------|------------|-----------| +| tank | String | yes | | ### `warcli bitcoin grep-logs` Grep combined bitcoind logs using regex \ @@ -52,23 +51,22 @@ options: | no_sort | Bool | | False | ### `warcli bitcoin messages` -Fetch messages sent between \ and \ on [chain] in a [namespace] +Fetch messages sent between \ and \ in [network] options: -| name | type | required | default | -|-----------|--------|------------|-----------| -| node_a | Int | yes | | -| node_b | Int | yes | | -| chain | String | | "regtest" | -| namespace | String | | "warnet" | +| name | type | required | default | +|---------|--------|------------|-----------| +| tank_a | String | yes | | +| tank_b | String | yes | | +| network | String | | "regtest" | ### `warcli bitcoin rpc` -Call bitcoin-cli \ [params] on \ +Call bitcoin-cli \ [params] on \ options: | name | type | required | default | |--------|--------|------------|-----------| -| node | Int | yes | | +| tank | String | yes | | | method | String | yes | | | params | String | | | From 8fc9d91ae5c463391369fbd280f0c3dd53862f50 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 14:23:26 +0200 Subject: [PATCH 085/710] remove graph test and utils, get ci passing --- .github/workflows/test.yml | 2 +- requirements.in | 2 - requirements.txt | 18 - resources/images/commander/src/commander.py | 364 -------------------- src/warnet/cli/bitcoin.py | 23 +- src/warnet/cli/graph.py | 95 +---- src/warnet/cli/network.py | 3 - src/warnet/cli/util.py | 103 +----- test/data/12_node_ring.graphml | 0 test/data/services.graphml | 27 -- test/graph_test.py | 110 ------ 11 files changed, 19 insertions(+), 728 deletions(-) delete mode 100644 resources/images/commander/src/commander.py delete mode 100644 test/data/12_node_ring.graphml mode change 100755 => 100644 test/graph_test.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3641b89f7..7d714ecce 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [scenarios_test.py, rpc_test.py, graph_test.py, dag_connection_test.py, logging_test.py] + test: [ rpc_test.py ] steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/requirements.in b/requirements.in index d18f137d3..c6d4fa1f3 100644 --- a/requirements.in +++ b/requirements.in @@ -1,9 +1,7 @@ click docker flask -jsonschema kubernetes -networkx rich tabulate PyYAML diff --git a/requirements.txt b/requirements.txt index 0b7ef495b..abe91e0e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,5 @@ # This file was autogenerated by uv via the following command: # uv pip compile requirements.in -o requirements.txt -attrs==23.2.0 - # via - # jsonschema - # referencing blinker==1.7.0 # via flask cachetools==5.3.2 @@ -30,10 +26,6 @@ itsdangerous==2.1.2 # via flask jinja2==3.1.2 # via flask -jsonschema==4.20.0 - # via -r requirements.in -jsonschema-specifications==2023.12.1 - # via jsonschema kubernetes==28.1.0 # via -r requirements.in markdown-it-py==3.0.0 @@ -44,8 +36,6 @@ markupsafe==2.1.3 # werkzeug mdurl==0.1.2 # via markdown-it-py -networkx==3.2.1 - # via -r requirements.in oauthlib==3.2.2 # via # kubernetes @@ -66,10 +56,6 @@ pyyaml==6.0.1 # via # -r requirements.in # kubernetes -referencing==0.32.0 - # via - # jsonschema - # jsonschema-specifications requests==2.29.0 # via # docker @@ -79,10 +65,6 @@ requests-oauthlib==1.3.1 # via kubernetes rich==13.7.0 # via -r requirements.in -rpds-py==0.16.2 - # via - # jsonschema - # referencing rsa==4.9 # via google-auth six==1.16.0 diff --git a/resources/images/commander/src/commander.py b/resources/images/commander/src/commander.py deleted file mode 100644 index 6d8c0eb96..000000000 --- a/resources/images/commander/src/commander.py +++ /dev/null @@ -1,364 +0,0 @@ -import argparse -import configparser -import json -import logging -import os -import pathlib -import random -import signal -import sys -import tempfile -from pathlib import Path - -from test_framework.authproxy import AuthServiceProxy -from test_framework.p2p import NetworkThread -from test_framework.test_framework import ( - TMPDIR_PREFIX, - BitcoinTestFramework, - TestStatus, -) -from test_framework.test_node import TestNode -from test_framework.util import PortSeed, get_rpc_proxy - -WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" -with open(WARNET_FILE) as file: - WARNET = json.load(file) - - -# Ensure that all RPC calls are made with brand new http connections -def auth_proxy_request(self, method, path, postdata): - self._set_conn() # creates new http client connection - return self.oldrequest(method, path, postdata) - - -AuthServiceProxy.oldrequest = AuthServiceProxy._request -AuthServiceProxy._request = auth_proxy_request - - -class Commander(BitcoinTestFramework): - # required by subclasses of BitcoinTestFramework - def set_test_params(self): - pass - - def run_test(self): - pass - - # Utility functions for Warnet scenarios - @staticmethod - def ensure_miner(node): - wallets = node.listwallets() - if "miner" not in wallets: - node.createwallet("miner", descriptors=True) - return node.get_wallet_rpc("miner") - - def handle_sigterm(self, signum, frame): - print("SIGTERM received, stopping...") - self.shutdown() - sys.exit(0) - - # The following functions are chopped-up hacks of - # the original methods from BitcoinTestFramework - - def setup(self): - signal.signal(signal.SIGTERM, self.handle_sigterm) - - # hacked from _start_logging() - # Scenarios will log plain messages to stdout only, which will can redirected by warnet - self.log = logging.getLogger(self.__class__.__name__) - self.log.setLevel(logging.INFO) # set this to DEBUG to see ALL RPC CALLS - - # Because scenarios run in their own subprocess, the logger here - # is not the same as the warnet server or other global loggers. - # Scenarios log directly to stdout which gets picked up by the - # subprocess manager in the server, and reprinted to the global log. - ch = logging.StreamHandler(sys.stdout) - formatter = logging.Formatter(fmt="%(name)-8s %(message)s") - ch.setFormatter(formatter) - self.log.addHandler(ch) - - for i, tank in enumerate(WARNET): - self.log.info( - f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" - ) - node = TestNode( - i, - pathlib.Path(), # datadir path - chain=tank["chain"], - rpchost=tank["rpc_host"], - timewait=60, - timeout_factor=self.options.timeout_factor, - bitcoind=None, - bitcoin_cli=None, - cwd=self.options.tmpdir, - coverage_dir=self.options.coveragedir, - ) - node.tank = tank["tank"] - node.rpc = get_rpc_proxy( - f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", - i, - timeout=60, - coveragedir=self.options.coveragedir, - ) - node.rpc_connected = True - node.init_peers = tank["init_peers"] - self.nodes.append(node) - - self.num_nodes = len(self.nodes) - - # Set up temp directory and start logging - if self.options.tmpdir: - self.options.tmpdir = os.path.abspath(self.options.tmpdir) - os.makedirs(self.options.tmpdir, exist_ok=False) - else: - self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) - - seed = self.options.randomseed - if seed is None: - seed = random.randrange(sys.maxsize) - else: - self.log.info(f"User supplied random seed {seed}") - random.seed(seed) - self.log.info(f"PRNG seed is: {seed}") - - self.log.debug("Setting up network thread") - self.network_thread = NetworkThread() - self.network_thread.start() - - self.success = TestStatus.PASSED - - def parse_args(self): - previous_releases_path = "" - parser = argparse.ArgumentParser(usage="%(prog)s [options]") - parser.add_argument( - "--nocleanup", - dest="nocleanup", - default=False, - action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error", - ) - parser.add_argument( - "--nosandbox", - dest="nosandbox", - default=False, - action="store_true", - help="Don't use the syscall sandbox", - ) - parser.add_argument( - "--noshutdown", - dest="noshutdown", - default=False, - action="store_true", - help="Don't stop bitcoinds after the test execution", - ) - parser.add_argument( - "--cachedir", - dest="cachedir", - default=None, - help="Directory for caching pregenerated datadirs (default: %(default)s)", - ) - parser.add_argument( - "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" - ) - parser.add_argument( - "-l", - "--loglevel", - dest="loglevel", - default="DEBUG", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", - ) - parser.add_argument( - "--tracerpc", - dest="trace_rpc", - default=False, - action="store_true", - help="Print out all RPC calls as they are made", - ) - parser.add_argument( - "--portseed", - dest="port_seed", - default=0, - help="The seed to use for assigning port numbers (default: current process id)", - ) - parser.add_argument( - "--previous-releases", - dest="prev_releases", - default=None, - action="store_true", - help="Force test of previous releases (default: %(default)s)", - ) - parser.add_argument( - "--coveragedir", - dest="coveragedir", - default=None, - help="Write tested RPC commands into this directory", - ) - parser.add_argument( - "--configfile", - dest="configfile", - default=None, - help="Location of the test framework config file (default: %(default)s)", - ) - parser.add_argument( - "--pdbonfailure", - dest="pdbonfailure", - default=False, - action="store_true", - help="Attach a python debugger if test fails", - ) - parser.add_argument( - "--usecli", - dest="usecli", - default=False, - action="store_true", - help="use bitcoin-cli instead of RPC for all commands", - ) - parser.add_argument( - "--perf", - dest="perf", - default=False, - action="store_true", - help="profile running nodes with perf for the duration of the test", - ) - parser.add_argument( - "--valgrind", - dest="valgrind", - default=False, - action="store_true", - help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", - ) - parser.add_argument( - "--randomseed", - default=0x7761726E6574, # "warnet" ascii - help="set a random seed for deterministically reproducing a previous test run", - ) - parser.add_argument( - "--timeout-factor", - dest="timeout_factor", - default=1, - help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", - ) - parser.add_argument( - "--network", - dest="network", - default="warnet", - help="Designate which warnet this should run on (default: warnet)", - ) - parser.add_argument( - "--v2transport", - dest="v2transport", - default=False, - action="store_true", - help="use BIP324 v2 connections between all nodes by default", - ) - - self.add_options(parser) - # Running TestShell in a Jupyter notebook causes an additional -f argument - # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument - # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 - parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") - self.options = parser.parse_args() - if self.options.timeout_factor == 0: - self.options.timeout_factor = 99999 - self.options.timeout_factor = self.options.timeout_factor or ( - 4 if self.options.valgrind else 1 - ) - self.options.previous_releases_path = previous_releases_path - config = configparser.ConfigParser() - if self.options.configfile is not None: - with open(self.options.configfile) as f: - config.read_file(f) - - config["environment"] = {"PACKAGE_BUGREPORT": ""} - - self.config = config - - if "descriptors" not in self.options: - # Wallet is not required by the test at all and the value of self.options.descriptors won't matter. - # It still needs to exist and be None in order for tests to work however. - # So set it to None to force -disablewallet, because the wallet is not needed. - self.options.descriptors = None - elif self.options.descriptors is None: - # Some wallet is either required or optionally used by the test. - # Prefer SQLite unless it isn't available - if self.is_sqlite_compiled(): - self.options.descriptors = True - elif self.is_bdb_compiled(): - self.options.descriptors = False - else: - # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter - # It still needs to exist and be None in order for tests to work however. - # So set it to None, which will also set -disablewallet. - self.options.descriptors = None - - PortSeed.n = self.options.port_seed - - def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool = True): - """ - Kwargs: - wait_for_connect: if True, block until the nodes are verified as connected. You might - want to disable this when using -stopatheight with one of the connected nodes, - since there will be a race between the actual connection and performing - the assertions before one node shuts down. - """ - from_connection = self.nodes[a] - to_connection = self.nodes[b] - from_num_peers = 1 + len(from_connection.getpeerinfo()) - to_num_peers = 1 + len(to_connection.getpeerinfo()) - ip_port = self.nodes[b].rpchost + ":18444" - - if peer_advertises_v2 is None: - peer_advertises_v2 = self.options.v2transport - - if peer_advertises_v2: - from_connection.addnode(node=ip_port, command="onetry", v2transport=True) - else: - # skip the optional third argument (default false) for - # compatibility with older clients - from_connection.addnode(ip_port, "onetry") - - if not wait_for_connect: - return - - # poll until version handshake complete to avoid race conditions - # with transaction relaying - # See comments in net_processing: - # * Must have a version message before anything else - # * Must have a verack message before anything else - self.wait_until( - lambda: sum(peer["version"] != 0 for peer in from_connection.getpeerinfo()) - == from_num_peers - ) - self.wait_until( - lambda: sum(peer["version"] != 0 for peer in to_connection.getpeerinfo()) - == to_num_peers - ) - self.wait_until( - lambda: sum( - peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 - for peer in from_connection.getpeerinfo() - ) - == from_num_peers - ) - self.wait_until( - lambda: sum( - peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 - for peer in to_connection.getpeerinfo() - ) - == to_num_peers - ) - # The message bytes are counted before processing the message, so make - # sure it was fully processed by waiting for a ping. - self.wait_until( - lambda: sum( - peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 - for peer in from_connection.getpeerinfo() - ) - == from_num_peers - ) - self.wait_until( - lambda: sum( - peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 - for peer in to_connection.getpeerinfo() - ) - == to_num_peers - ) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index 7e30e9259..c1c490471 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -1,4 +1,3 @@ -import json import os import re from datetime import datetime @@ -9,9 +8,9 @@ from test_framework.messages import ser_uint256 from test_framework.p2p import MESSAGEMAP +from .k8s import get_mission from .process import run_command -from .k8s import get_mission @click.group(name="bitcoin") def bitcoin(): @@ -108,14 +107,14 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): @bitcoin.command() @click.argument("tank_a", type=str, required=True) @click.argument("tank_b", type=str, required=True) -@click.option("--network", default="regtest", show_default=True) -def messages(tank_a: str, tank_b: str, network: str): +@click.option("--chain", default="regtest", show_default=True) +def messages(tank_a: str, tank_b: str, chain: str): """ - Fetch messages sent between and in [network] + Fetch messages sent between and in [chain] """ try: # Get the messages - messages = get_messages(tank_a, tank_b, network) + messages = get_messages(tank_a, tank_b, chain) if not messages: print(f"No messages found between {tank_a} and {tank_b}") @@ -143,7 +142,14 @@ def messages(tank_a: str, tank_b: str, network: str): print(f"Error fetching messages between nodes {tank_a} and {tank_b}: {e}") -def get_messages(tank_a: str, tank_b: str, network: str): +def get_messages(tank_a: str, tank_b: str, chain: str): + """ + Fetch messages from the message capture files + """ + subdir = "" if chain == "main" else f"{chain}/" + base_dir = f"/root/.bitcoin/{subdir}message_capture" + + # Get the IP of node_b cmd = f"kubectl get pod {tank_b} -o jsonpath='{{.status.podIP}}'" tank_b_ip = run_command(cmd).strip() @@ -161,8 +167,7 @@ def get_messages(tank_a: str, tank_b: str, network: str): for dir_name in dirs: if tank_b_ip in dir_name or tank_b_service_ip in dir_name: for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: - file_path = f"{base_dir}/{dir_name}_18444/{file}" - print(file_path) + file_path = f"{base_dir}/{dir_name}/{file}" # Fetch the file contents from the container cmd = f"kubectl exec {tank_a} -- cat {file_path}" import subprocess diff --git a/src/warnet/cli/graph.py b/src/warnet/cli/graph.py index 1e75862e5..d03c4d334 100644 --- a/src/warnet/cli/graph.py +++ b/src/warnet/cli/graph.py @@ -1,12 +1,8 @@ -import json -from io import BytesIO from pathlib import Path import click -import networkx as nx -from rich import print -from .util import DEFAULT_TAG, create_cycle_graph, validate_graph_schema +from .util import DEFAULT_TAG @click.group(name="graph") @@ -25,15 +21,7 @@ def create(number: int, outfile: Path, version: str, bitcoin_conf: Path, random: Create a cycle graph with nodes, and include 7 extra random outbounds per node. Returns XML file as string with or without --outfile option """ - graph = create_cycle_graph(number, version, bitcoin_conf, random) - - if outfile: - file_path = Path(outfile) - nx.write_graphml(graph, file_path, named_key_ids=True) - bio = BytesIO() - nx.write_graphml(graph, bio, named_key_ids=True) - xml_data = bio.getvalue() - print(xml_data.decode("utf-8")) + raise Exception("Not Implemented") @graph.command() @@ -48,81 +36,4 @@ def import_json(infile: Path, outfile: Path, cb: str, ln_image: str): channels and their policies as well. Returns XML file as string with or without --outfile option. """ - with open(infile) as f: - json_graph = json.loads(f.read()) - - # Start with a connected L1 graph with the right amount of tanks - graph = create_cycle_graph( - len(json_graph["nodes"]), version=DEFAULT_TAG, bitcoin_conf=None, random_version=False - ) - - # Initialize all the tanks with basic LN node configurations - for index, n in enumerate(graph.nodes()): - graph.nodes[n]["bitcoin_config"] = f"-uacomment=tank{index:06}" - graph.nodes[n]["ln"] = "lnd" - graph.nodes[n]["ln_config"] = "--protocol.wumbo-channels" - if cb: - graph.nodes[n]["ln_cb_image"] = cb - if ln_image: - graph.nodes[n]["ln_image"] = ln_image - - # Save a map of LN pubkey -> Tank index - ln_ids = {} - for index, node in enumerate(json_graph["nodes"]): - ln_ids[node["pub_key"]] = index - - # Offset for edge IDs - # Note create_cycle_graph() creates L1 edges all with the same id "0" - L1_edges = len(graph.edges) - - # Insert LN channels - # Ensure channels are in order by channel ID like lnd describegraph output - sorted_edges = sorted(json_graph["edges"], key=lambda chan: int(chan["channel_id"])) - for ln_index, channel in enumerate(sorted_edges): - src = ln_ids[channel["node1_pub"]] - tgt = ln_ids[channel["node2_pub"]] - cap = int(channel["capacity"]) - push = cap // 2 - openp = f"--local_amt={cap} --push_amt={push}" - srcp = "" - tgtp = "" - if channel["node1_policy"]: - srcp += f" --base_fee_msat={channel['node1_policy']['fee_base_msat']}" - srcp += f" --fee_rate_ppm={channel['node1_policy']['fee_rate_milli_msat']}" - srcp += f" --time_lock_delta={max(int(channel['node1_policy']['time_lock_delta']), 18)}" - srcp += f" --min_htlc_msat={max(int(channel['node1_policy']['min_htlc']), 1)}" - srcp += f" --max_htlc_msat={push * 1000}" - if channel["node2_policy"]: - tgtp += f" --base_fee_msat={channel['node2_policy']['fee_base_msat']}" - tgtp += f" --fee_rate_ppm={channel['node2_policy']['fee_rate_milli_msat']}" - tgtp += f" --time_lock_delta={max(int(channel['node2_policy']['time_lock_delta']), 18)}" - tgtp += f" --min_htlc_msat={max(int(channel['node2_policy']['min_htlc']), 1)}" - tgtp += f" --max_htlc_msat={push * 1000}" - - graph.add_edge( - src, - tgt, - key=ln_index + L1_edges, - channel_open=openp, - source_policy=srcp, - target_policy=tgtp, - ) - - if outfile: - file_path = Path(outfile) - nx.write_graphml(graph, file_path, named_key_ids=True) - bio = BytesIO() - nx.write_graphml(graph, bio, named_key_ids=True) - xml_data = bio.getvalue() - print(xml_data.decode("utf-8")) - - -@graph.command() -@click.argument("graph", type=click.Path()) -def validate(graph: Path): - """ - Validate a against the schema. - """ - with open(graph) as f: - graph = nx.parse_graphml(f.read(), node_type=int, force_multigraph=True) - return validate_graph_schema(graph) + raise Exception("Not Implemented") diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index cc2d35344..e90d851d8 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -10,10 +10,7 @@ from .bitcoin import _rpc from .k8s import ( - apply_kubernetes_yaml_obj, - create_kubernetes_object, delete_namespace, - get_edges, get_mission, ) from .process import stream_command diff --git a/src/warnet/cli/util.py b/src/warnet/cli/util.py index 156ccf85f..314a39eb8 100644 --- a/src/warnet/cli/util.py +++ b/src/warnet/cli/util.py @@ -1,11 +1,5 @@ -import json import logging -import random from importlib.resources import files -from pathlib import Path - -import networkx as nx -from jsonschema import validate logger = logging.getLogger("utils") @@ -18,102 +12,7 @@ def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): - try: - # Use nx.MultiDiGraph() so we get directed edges (source->target) - # and still allow parallel edges (L1 p2p connections + LN channels) - graph = nx.generators.cycle_graph(n, nx.MultiDiGraph()) - except TypeError as e: - msg = f"Failed to create graph: {e}" - logging.error(msg) - return msg - - # Graph is a simply cycle graph with all nodes connected in a loop, including both ends. - # Ensure each node has at least 8 outbound connections by making 7 more outbound connections - for src_node in graph.nodes(): - logging.debug(f"Creating additional connections for node {src_node}") - for _ in range(8): - # Choose a random node to connect to - # Make sure it's not the same node and they aren't already connected in either direction - potential_nodes = [ - dst_node - for dst_node in range(n) - if dst_node != src_node - and not graph.has_edge(dst_node, src_node) - and not graph.has_edge(src_node, dst_node) - ] - if potential_nodes: - chosen_node = random.choice(potential_nodes) - graph.add_edge(src_node, chosen_node) - logging.debug(f"Added edge: {src_node}:{chosen_node}") - logging.debug(f"Node {src_node} edges: {graph.edges(src_node)}") - - # parse and process conf file - conf_contents = "" - if bitcoin_conf is not None: - conf = Path(bitcoin_conf) - if conf.is_file(): - with open(conf) as f: - # parse INI style conf then dump using for_graph - conf_dict = parse_bitcoin_conf(f.read()) - conf_contents = dump_bitcoin_conf(conf_dict, for_graph=True) - - # populate our custom fields - for i, node in enumerate(graph.nodes()): - if random_version: - graph.nodes[node]["version"] = random.choice(WEIGHTED_TAGS) - else: - # One node demoing the image tag - if i == 1: - graph.nodes[node]["image"] = f"bitcoindevproject/bitcoin:{version}" - else: - graph.nodes[node]["version"] = version - graph.nodes[node]["bitcoin_config"] = conf_contents - graph.nodes[node]["tc_netem"] = "" - graph.nodes[node]["build_args"] = "" - graph.nodes[node]["exporter"] = False - graph.nodes[node]["collect_logs"] = False - - convert_unsupported_attributes(graph) - return graph - - -def convert_unsupported_attributes(graph: nx.Graph): - # Sometimes networkx complains about invalid types when writing the graph - # (it just generated itself!). Try to convert them here just in case. - for _, node_data in graph.nodes(data=True): - for key, value in node_data.items(): - if isinstance(value, set): - node_data[key] = list(value) - elif isinstance(value, int | float | str): - continue - else: - node_data[key] = str(value) - - for _, _, edge_data in graph.edges(data=True): - for key, value in edge_data.items(): - if isinstance(value, set): - edge_data[key] = list(value) - elif isinstance(value, int | float | str): - continue - else: - edge_data[key] = str(value) - - -def load_schema(): - with open(SRC_DIR / "graph_schema.json") as schema_file: - return json.load(schema_file) - - -def validate_graph_schema(graph: nx.Graph): - """ - Validate a networkx.Graph against the node schema - """ - graph_schema = load_schema() - validate(instance=graph.graph, schema=graph_schema["graph"]) - for n in list(graph.nodes): - validate(instance=graph.nodes[n], schema=graph_schema["node"]) - for e in list(graph.edges): - validate(instance=graph.edges[e], schema=graph_schema["edge"]) + raise Exception("Not Implemented") def parse_bitcoin_conf(file_content): diff --git a/test/data/12_node_ring.graphml b/test/data/12_node_ring.graphml deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/data/services.graphml b/test/data/services.graphml index 25a9fa44e..e69de29bb 100644 --- a/test/data/services.graphml +++ b/test/data/services.graphml @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - 27.0 - debug=validation - true - lnd - - - diff --git a/test/graph_test.py b/test/graph_test.py old mode 100755 new mode 100644 index 5ea73c5f0..e69de29bb --- a/test/graph_test.py +++ b/test/graph_test.py @@ -1,110 +0,0 @@ -#!/usr/bin/env python3 - -import os -import tempfile -import uuid -from pathlib import Path - -from test_base import TestBase - -# from warnet.lnd import LNDNode -from warnet.cli.util import DEFAULT_TAG - - -class GraphTest(TestBase): - def __init__(self): - super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "services.graphml" - self.json_file_path = Path(os.path.dirname(__file__)) / "data" / "LN_10.json" - self.NUM_IMPORTED_NODES = 10 - self.test_dir = tempfile.TemporaryDirectory() - self.tf_create = f"{self.test_dir.name}/{str(uuid.uuid4())}.graphml" - self.tf_import = f"{self.test_dir.name}/{str(uuid.uuid4())}.graphml" - - def run_test(self): - self.test_graph_creation_and_import() - self.validate_graph_schema() - try: - # TODO: re-enable these when we add lightning back - # self.test_graph_with_optional_services() - self.test_created_graph() - # self.test_imported_graph() - finally: - self.stop_server() - - def test_graph_creation_and_import(self): - self.log.info(f"CLI tool creating test graph file: {self.tf_create}") - self.log.info( - self.warcli(f"graph create 10 --outfile={self.tf_create} --version={DEFAULT_TAG}") - ) - self.wait_for_predicate(lambda: Path(self.tf_create).exists()) - - self.log.info(f"CLI tool importing json and writing test graph file: {self.tf_import}") - self.log.info( - self.warcli( - f"graph import-json {self.json_file_path} --outfile={self.tf_import} --ln_image=carlakirkcohen/lnd:attackathon --cb=carlakirkcohen/circuitbreaker:attackathon-test", - ) - ) - self.wait_for_predicate(lambda: Path(self.tf_import).exists()) - - def validate_graph_schema(self): - self.log.info("Validating graph schema") - assert "invalid" not in self.warcli(f"graph validate {Path(self.tf_create)}") - assert "invalid" not in self.warcli(f"graph validate {Path(self.tf_import)}") - assert "invalid" not in self.warcli(f"graph validate {self.graph_file_path}") - - def test_graph_with_optional_services(self): - self.log.info("Testing graph with optional services...") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) - self.wait_for_all_tanks_status(target="running") - self.wait_for_all_edges() - self.warcli("bitcoin rpc 0 getblockcount") - - self.log.info("Checking services...") - self.warcli("network down") - self.wait_for_all_tanks_status(target="stopped") - - def test_created_graph(self): - self.log.info("Testing created graph...") - self.log.info(self.warcli(f"network start {Path(self.tf_create)}")) - self.wait_for_all_tanks_status(target="running") - self.wait_for_all_edges() - self.warcli("bitcoin rpc 0 getblockcount") - self.warcli("network down") - self.wait_for_all_tanks_status(target="stopped") - - def test_imported_graph(self): - self.log.info("Testing imported graph...") - self.log.info(self.warcli(f"network start {Path(self.tf_import)}")) - self.wait_for_all_tanks_status(target="running") - self.wait_for_all_edges() - self.warcli("bitcoin rpc 0 getblockcount") - self.warcli("scenarios run ln_init") - self.wait_for_all_scenarios() - - self.verify_ln_channel_policies() - - def verify_ln_channel_policies(self): - self.log.info("Ensuring warnet LN channel policies match imported JSON description") - # with open(self.json_file_path) as file: - # actual = json.loads(self.warcli("ln rpc 0 describegraph"))["edges"] - # expected = json.loads(file.read())["edges"] - # expected = sorted(expected, key=lambda chan: int(chan["channel_id"])) - # for chan_index, actual_chan_json in enumerate(actual): - # expected_chan = LNDNode.lnchannel_from_json(expected[chan_index]) - # actual_chan = LNDNode.lnchannel_from_json(actual_chan_json) - # if not expected_chan.channel_match(actual_chan): - # self.log.info( - # f"Channel {chan_index} policy mismatch, testing flipped channel: {actual_chan.short_chan_id}" - # ) - # if not expected_chan.channel_match(actual_chan.flip()): - # raise Exception( - # f"Channel policy doesn't match source: {actual_chan.short_chan_id}\n" - # + f"Actual:\n{actual_chan}\n" - # + f"Expected:\n{expected_chan}\n" - # ) - - -if __name__ == "__main__": - test = GraphTest() - test.run_test() From 124a16af1d73ba1f65f292de23244bf39440d0c5 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 14:27:16 +0200 Subject: [PATCH 086/710] remove unusued k8s functions --- src/warnet/cli/k8s.py | 18 ------------------ src/warnet/cli/scenarios.py | 3 +-- 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 98db9e60e..3b6def94d 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -56,10 +56,6 @@ def create_kubernetes_object( return obj -def create_namespace() -> dict: - return {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": "warnet"}} - - def set_kubectl_context(namespace: str) -> bool: """ Set the default kubectl context to the specified namespace. @@ -73,20 +69,6 @@ def set_kubectl_context(namespace: str) -> bool: return result -def deploy_base_configurations() -> bool: - base_configs = [ - "namespace.yaml", - "rbac-config.yaml", - ] - - for bconfig in base_configs: - command = f"kubectl apply -f {WAR_MANIFESTS}/{bconfig}" - if not stream_command(command): - print(f"Failed to apply {bconfig}") - return False - return True - - def apply_kubernetes_yaml(yaml_file: str) -> bool: command = f"kubectl apply -f {yaml_file}" return stream_command(command) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index ea09f1b10..6d08963db 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -14,7 +14,7 @@ from warnet import scenarios as SCENARIOS -from .k8s import apply_kubernetes_yaml, create_namespace, get_mission +from .k8s import apply_kubernetes_yaml, get_mission @click.group(name="scenarios") @@ -104,7 +104,6 @@ def run_scenario(scenario_path: str, additional_args: tuple[str]): } for tank in tankpods ] - kubernetes_objects = [create_namespace()] kubernetes_objects.extend( [ { From ec8030f97bc2837b772054ac8fbb7f3d6242dc8b Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 14:27:43 +0200 Subject: [PATCH 087/710] remove serviceaccount --- resources/charts/bitcoincore/templates/pod.yaml | 1 - .../bitcoincore/templates/serviceaccount.yaml | 13 ------------- 2 files changed, 14 deletions(-) delete mode 100644 resources/charts/bitcoincore/templates/serviceaccount.yaml diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 054c20e85..88f9fedd2 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -14,7 +14,6 @@ spec: imagePullSecrets: {{- toYaml . | nindent 4 }} {{- end }} - serviceAccountName: {{ include "bitcoincore.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 4 }} containers: diff --git a/resources/charts/bitcoincore/templates/serviceaccount.yaml b/resources/charts/bitcoincore/templates/serviceaccount.yaml deleted file mode 100644 index af2605409..000000000 --- a/resources/charts/bitcoincore/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "bitcoincore.serviceAccountName" . }} - labels: - {{- include "bitcoincore.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -automountServiceAccountToken: {{ .Values.serviceAccount.automount }} -{{- end }} From cec09f1fd2aa04a71daad029f4d0fbc8247be48e Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 14:28:04 +0200 Subject: [PATCH 088/710] remove helm test connection --- .../templates/tests/test-connection.yaml | 15 --------------- resources/charts/bitcoincore/values.yaml | 12 +----------- 2 files changed, 1 insertion(+), 26 deletions(-) delete mode 100644 resources/charts/bitcoincore/templates/tests/test-connection.yaml diff --git a/resources/charts/bitcoincore/templates/tests/test-connection.yaml b/resources/charts/bitcoincore/templates/tests/test-connection.yaml deleted file mode 100644 index a0855da45..000000000 --- a/resources/charts/bitcoincore/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "bitcoincore.fullname" . }}-test-connection" - labels: - {{- include "bitcoincore.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "bitcoincore.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 9f7d15883..d22d3c005 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -1,6 +1,7 @@ # Default values for bitcoincore. # This is a YAML-formatted file. # Declare variables to be passed into your templates. +namespace: warnet image: repository: bitcoindevproject/bitcoin @@ -12,17 +13,6 @@ imagePullSecrets: [] nameOverride: "" fullnameOverride: "" -serviceAccount: - # Specifies whether a service account should be created - create: false - # Automatically mount a ServiceAccount's API credentials? - automount: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - podLabels: app: "warnet" mission: "tank" From 2bcd2d375220a8714d7d701c0e9b493a8125f53b Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 14:29:23 +0200 Subject: [PATCH 089/710] warcli network start install into default ns --- src/warnet/cli/k8s.py | 10 +++++++++- src/warnet/cli/network.py | 9 ++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 3b6def94d..5f7d90edc 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -8,9 +8,10 @@ from kubernetes.client.models import CoreV1Event, V1PodList from kubernetes.dynamic import DynamicClient -from .process import stream_command +from .process import stream_command, run_command WAR_MANIFESTS = files("manifests") +DEFAULT_NAMESPACE = "warnet" def get_static_client() -> CoreV1Event: @@ -46,6 +47,7 @@ def get_edges() -> any: def create_kubernetes_object( kind: str, metadata: dict[str, any], spec: dict[str, any] = None ) -> dict[str, any]: + metadata["namespace"] = get_default_namespace() obj = { "apiVersion": "v1", "kind": kind, @@ -93,3 +95,9 @@ def delete_namespace(namespace: str) -> bool: def delete_pod(pod_name: str) -> bool: command = f"kubectl delete pod {pod_name}" return stream_command(command) + + +def get_default_namespace() -> str: + command = "kubectl config view --minify -o jsonpath='{..namespace}'" + kubectl_namespace = run_command(command) + return kubectl_namespace if kubectl_namespace else DEFAULT_NAMESPACE diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index e90d851d8..f48b840fb 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -12,6 +12,7 @@ from .k8s import ( delete_namespace, get_mission, + get_default_namespace, ) from .process import stream_command @@ -22,7 +23,6 @@ DEFAULTS_FILE = "defaults.yaml" HELM_COMMAND = "helm upgrade --install --create-namespace" BITCOIN_CHART_LOCATION = "./resources/charts/bitcoincore" -NAMESPACE = "warnet" @click.group(name="network") @@ -81,6 +81,8 @@ def start(network_name: str, logging: bool, network: str): with open(network_file_path) as f: network_file = yaml.safe_load(f) + namespace = get_default_namespace() + for node in network_file["nodes"]: print(f"Starting node: {node.get('name')}") try: @@ -89,7 +91,7 @@ def start(network_name: str, logging: bool, network: str): # all the keys apart from name node_config_override = {k: v for k, v in node.items() if k != "name"} - cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {NAMESPACE} -f {defaults_file_path}" + cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" if node_config_override: with tempfile.NamedTemporaryFile( @@ -113,7 +115,8 @@ def start(network_name: str, logging: bool, network: str): @network.command() def down(): """Bring down a running warnet""" - if delete_namespace("warnet") and delete_namespace("warnet-logging"): + namespace = get_default_namespace() + if delete_namespace(namespace) and delete_namespace("warnet-logging"): print("Warnet network has been successfully brought down and the namespaces deleted.") else: print("Failed to bring down warnet network or delete the namespaces.") From 9d912d5d47f782aa915a2c6d7149108ce8c8885f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 14:40:37 +0200 Subject: [PATCH 090/710] minor fix --- src/warnet/cli/k8s.py | 2 +- src/warnet/cli/network.py | 2 +- src/warnet/cli/scenarios.py | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index 5f7d90edc..e43a6a58c 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -8,7 +8,7 @@ from kubernetes.client.models import CoreV1Event, V1PodList from kubernetes.dynamic import DynamicClient -from .process import stream_command, run_command +from .process import run_command, stream_command WAR_MANIFESTS = files("manifests") DEFAULT_NAMESPACE = "warnet" diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index f48b840fb..139be46b5 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -11,8 +11,8 @@ from .bitcoin import _rpc from .k8s import ( delete_namespace, - get_mission, get_default_namespace, + get_mission, ) from .process import stream_command diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 6d08963db..b5212072a 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -104,6 +104,7 @@ def run_scenario(scenario_path: str, additional_args: tuple[str]): } for tank in tankpods ] + kubernetes_objects = [] kubernetes_objects.extend( [ { From ee3a6ee2f0df8a8d9e623eacb031ce2a744cece0 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 14:44:31 +0200 Subject: [PATCH 091/710] remove hard coded namespace from grep logs --- src/warnet/cli/bitcoin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/cli/bitcoin.py index c1c490471..9b9d2676c 100644 --- a/src/warnet/cli/bitcoin.py +++ b/src/warnet/cli/bitcoin.py @@ -72,7 +72,7 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): continue # Get logs from the specific container - command = f"kubectl logs {pod_name} -c {container_name} -n warnet --timestamps" + command = f"kubectl logs {pod_name} -c {container_name} --timestamps" logs = run_command(command) if logs is not False: From df6b38357b8ba2fac28c42f87a380c377cd54f0e Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 14:42:31 +0200 Subject: [PATCH 092/710] build: tidy up pyproject.toml - Also include uv.lock file for max --- pyproject.toml | 17 +- requirements.in | 7 - requirements.txt | 84 ----- uv.lock | 899 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 911 insertions(+), 96 deletions(-) delete mode 100644 requirements.in delete mode 100644 requirements.txt create mode 100644 uv.lock diff --git a/pyproject.toml b/pyproject.toml index 5485098a8..53866fd32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,13 +3,22 @@ name = "warnet" version = "0.10.0" description = "Monitor and analyze the emergent behaviours of bitcoin networks" readme = "README.md" -requires-python = ">=3.10" +requires-python = ">=3.11" keywords = ["bitcoin", "warnet"] license = {text = "MIT"} classifiers = [ "Programming Language :: Python :: 3", ] -dynamic = ["dependencies"] + +dependencies = [ + "click==8.1.7", + "docker==7.1.0", + "flask==3.0.3", + "kubernetes==30.1.0", + "rich==13.7.1", + "tabulate==0.9.0", + "PyYAML==6.0.2", +] [project.scripts] warcli = "warnet.cli.main:cli" @@ -17,6 +26,7 @@ warcli = "warnet.cli.main:cli" [project.urls] Homepage = "https://warnet.dev" GitHub = "https://github.com/bitcoindevproject/warnet" +Pypi = "https://pypi.org/project/warnet/" [project.optional-dependencies] build = [ @@ -28,9 +38,6 @@ build = [ requires = ["setuptools>=64", "setuptools_scm>=8"] build-backend = "setuptools.build_meta" -[tool.setuptools.dynamic] -dependencies = {file = ["requirements.txt"]} - [tool.setuptools] include-package-data = true diff --git a/requirements.in b/requirements.in deleted file mode 100644 index c6d4fa1f3..000000000 --- a/requirements.in +++ /dev/null @@ -1,7 +0,0 @@ -click -docker -flask -kubernetes -rich -tabulate -PyYAML diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index abe91e0e2..000000000 --- a/requirements.txt +++ /dev/null @@ -1,84 +0,0 @@ -# This file was autogenerated by uv via the following command: -# uv pip compile requirements.in -o requirements.txt -blinker==1.7.0 - # via flask -cachetools==5.3.2 - # via google-auth -certifi==2023.11.17 - # via - # kubernetes - # requests -charset-normalizer==3.3.2 - # via requests -click==8.1.7 - # via - # -r requirements.in - # flask -docker==7.0.0 - # via -r requirements.in -flask==3.0.0 - # via -r requirements.in -google-auth==2.25.2 - # via kubernetes -idna==3.6 - # via requests -itsdangerous==2.1.2 - # via flask -jinja2==3.1.2 - # via flask -kubernetes==28.1.0 - # via -r requirements.in -markdown-it-py==3.0.0 - # via rich -markupsafe==2.1.3 - # via - # jinja2 - # werkzeug -mdurl==0.1.2 - # via markdown-it-py -oauthlib==3.2.2 - # via - # kubernetes - # requests-oauthlib -packaging==23.2 - # via docker -pyasn1==0.5.1 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 - # via google-auth -pygments==2.17.2 - # via rich -python-dateutil==2.8.2 - # via kubernetes -pyyaml==6.0.1 - # via - # -r requirements.in - # kubernetes -requests==2.29.0 - # via - # docker - # kubernetes - # requests-oauthlib -requests-oauthlib==1.3.1 - # via kubernetes -rich==13.7.0 - # via -r requirements.in -rsa==4.9 - # via google-auth -six==1.16.0 - # via - # kubernetes - # python-dateutil -tabulate==0.9.0 - # via -r requirements.in -urllib3==1.26.18 - # via - # docker - # kubernetes - # requests -websocket-client==1.7.0 - # via kubernetes -werkzeug==3.0.1 - # via flask diff --git a/uv.lock b/uv.lock new file mode 100644 index 000000000..3df666887 --- /dev/null +++ b/uv.lock @@ -0,0 +1,899 @@ +version = 1 +requires-python = ">=3.11" + +[[package]] +name = "attrs" +version = "24.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/0f/aafca9af9315aee06a89ffde799a10a582fe8de76c563ee80bbcdc08b3fb/attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346", size = 792678 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/21/5b6702a7f963e95456c0de2d495f67bf5fd62840ac655dc451586d23d39a/attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2", size = 63001 }, +] + +[[package]] +name = "backports-tarfile" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/86/72/cd9b395f25e290e633655a100af28cb253e4393396264a98bd5f5951d50f/backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991", size = 86406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/fa/123043af240e49752f1c4bd24da5053b6bd00cad78c2be53c0d1e8b975bc/backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34", size = 30181 }, +] + +[[package]] +name = "blinker" +version = "1.8.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/57/a6a1721eff09598fb01f3c7cda070c1b6a0f12d63c83236edf79a440abcc/blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83", size = 23161 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/2a/10164ed1f31196a2f7f3799368a821765c62851ead0e630ab52b8e14b4d0/blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01", size = 9456 }, +] + +[[package]] +name = "build" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "os_name == 'nt'" }, + { name = "packaging" }, + { name = "pyproject-hooks" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/9e/2d725d2f7729c6e79ca62aeb926492abbc06e25910dd30139d60a68bcb19/build-1.2.1.tar.gz", hash = "sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d", size = 44781 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/03/f3c8ba0a6b6e30d7d18c40faab90807c9bb5e9a1e3b2fe2008af624a9c97/build-1.2.1-py3-none-any.whl", hash = "sha256:75e10f767a433d9a86e50d83f418e83efc18ede923ee5ff7df93b6cb0306c5d4", size = 21911 }, +] + +[[package]] +name = "cachetools" +version = "5.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/38/a0f315319737ecf45b4319a8cd1f3a908e29d9277b46942263292115eee7/cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a", size = 27661 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/07/14f8ad37f2d12a5ce41206c21820d8cb6561b728e51fad4530dff0552a67/cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292", size = 9524 }, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/02/a95f2b11e207f68bc64d7aae9666fed2e2b3f307748d5123dffb72a1bbea/certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", size = 164065 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90", size = 162960 }, +] + +[[package]] +name = "cffi" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/bf/82c351342972702867359cfeba5693927efe0a8dd568165490144f554b18/cffi-1.17.0.tar.gz", hash = "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76", size = 516073 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/cc/9298fb6235522e00e47d78d6aa7f395332ef4e5f6fe124f9a03aa60600f7/cffi-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720", size = 181912 }, + { url = "https://files.pythonhosted.org/packages/e7/79/dc5334fbe60635d0846c56597a8d2af078a543ff22bc48d36551a0de62c2/cffi-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9", size = 178297 }, + { url = "https://files.pythonhosted.org/packages/39/d7/ef1b6b16b51ccbabaced90ff0d821c6c23567fc4b2e4a445aea25d3ceb92/cffi-1.17.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb", size = 444909 }, + { url = "https://files.pythonhosted.org/packages/29/b8/6e3c61885537d985c78ef7dd779b68109ba256263d74a2f615c40f44548d/cffi-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424", size = 468854 }, + { url = "https://files.pythonhosted.org/packages/0b/49/adad1228e19b931e523c2731e6984717d5f9e33a2f9971794ab42815b29b/cffi-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d", size = 476890 }, + { url = "https://files.pythonhosted.org/packages/76/54/c00f075c3e7fd14d9011713bcdb5b4f105ad044c5ad948db7b1a0a7e4e78/cffi-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8", size = 459374 }, + { url = "https://files.pythonhosted.org/packages/f3/b9/f163bb3fa4fbc636ee1f2a6a4598c096cdef279823ddfaa5734e556dd206/cffi-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6", size = 466891 }, + { url = "https://files.pythonhosted.org/packages/31/52/72bbc95f6d06ff2e88a6fa13786be4043e542cb24748e1351aba864cb0a7/cffi-1.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91", size = 477658 }, + { url = "https://files.pythonhosted.org/packages/67/20/d694811457eeae0c7663fa1a7ca201ce495533b646c1180d4ac25684c69c/cffi-1.17.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8", size = 453890 }, + { url = "https://files.pythonhosted.org/packages/dc/79/40cbf5739eb4f694833db5a27ce7f63e30a9b25b4a836c4f25fb7272aacc/cffi-1.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb", size = 478254 }, + { url = "https://files.pythonhosted.org/packages/e9/eb/2c384c385cca5cae67ca10ac4ef685277680b8c552b99aedecf4ea23ff7e/cffi-1.17.0-cp311-cp311-win32.whl", hash = "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9", size = 171285 }, + { url = "https://files.pythonhosted.org/packages/ca/42/74cb1e0f1b79cb64672f3cb46245b506239c1297a20c0d9c3aeb3929cb0c/cffi-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0", size = 180842 }, + { url = "https://files.pythonhosted.org/packages/1a/1f/7862231350cc959a3138889d2c8d33da7042b22e923457dfd4cd487d772a/cffi-1.17.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc", size = 182826 }, + { url = "https://files.pythonhosted.org/packages/8b/8c/26119bf8b79e05a1c39812064e1ee7981e1f8a5372205ba5698ea4dd958d/cffi-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59", size = 178494 }, + { url = "https://files.pythonhosted.org/packages/61/94/4882c47d3ad396d91f0eda6ef16d45be3d752a332663b7361933039ed66a/cffi-1.17.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb", size = 454459 }, + { url = "https://files.pythonhosted.org/packages/0f/7c/a6beb119ad515058c5ee1829742d96b25b2b9204ff920746f6e13bf574eb/cffi-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195", size = 478502 }, + { url = "https://files.pythonhosted.org/packages/61/8a/2575cd01a90e1eca96a30aec4b1ac101a6fae06c49d490ac2704fa9bc8ba/cffi-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e", size = 485381 }, + { url = "https://files.pythonhosted.org/packages/cd/66/85899f5a9f152db49646e0c77427173e1b77a1046de0191ab3b0b9a5e6e3/cffi-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828", size = 470907 }, + { url = "https://files.pythonhosted.org/packages/00/13/150924609bf377140abe6e934ce0a57f3fc48f1fd956ec1f578ce97a4624/cffi-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150", size = 479074 }, + { url = "https://files.pythonhosted.org/packages/17/fd/7d73d7110155c036303b0a6462c56250e9bc2f4119d7591d27417329b4d1/cffi-1.17.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a", size = 484225 }, + { url = "https://files.pythonhosted.org/packages/fc/83/8353e5c9b01bb46332dac3dfb18e6c597a04ceb085c19c814c2f78a8c0d0/cffi-1.17.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885", size = 488388 }, + { url = "https://files.pythonhosted.org/packages/73/0c/f9d5ca9a095b1fc88ef77d1f8b85d11151c374144e4606da33874e17b65b/cffi-1.17.0-cp312-cp312-win32.whl", hash = "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492", size = 172096 }, + { url = "https://files.pythonhosted.org/packages/72/21/8c5d285fe20a6e31d29325f1287bb0e55f7d93630a5a44cafdafb5922495/cffi-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2", size = 181478 }, + { url = "https://files.pythonhosted.org/packages/17/8f/581f2f3c3464d5f7cf87c2f7a5ba9acc6976253e02d73804240964243ec2/cffi-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118", size = 182638 }, + { url = "https://files.pythonhosted.org/packages/8d/1c/c9afa66684b7039f48018eb11b229b659dfb32b7a16b88251bac106dd1ff/cffi-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7", size = 178453 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/1a134d479d3a5a1ff2fabbee551d1d3f1dd70f453e081b5f70d604aae4c0/cffi-1.17.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377", size = 454441 }, + { url = "https://files.pythonhosted.org/packages/b1/b4/e1569475d63aad8042b0935dbf62ae2a54d1e9142424e2b0e924d2d4a529/cffi-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb", size = 478543 }, + { url = "https://files.pythonhosted.org/packages/d2/40/a9ad03fbd64309dec5bb70bc803a9a6772602de0ee164d7b9a6ca5a89249/cffi-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555", size = 485463 }, + { url = "https://files.pythonhosted.org/packages/a6/1a/f10be60e006dd9242a24bcc2b1cd55c34c578380100f742d8c610f7a5d26/cffi-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204", size = 470854 }, + { url = "https://files.pythonhosted.org/packages/cc/b3/c035ed21aa3d39432bd749fe331ee90e4bc83ea2dbed1f71c4bc26c41084/cffi-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f", size = 479096 }, + { url = "https://files.pythonhosted.org/packages/00/cb/6f7edde01131de9382c89430b8e253b8c8754d66b63a62059663ceafeab2/cffi-1.17.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0", size = 484013 }, + { url = "https://files.pythonhosted.org/packages/b9/83/8e4e8c211ea940210d293e951bf06b1bfb90f2eeee590e9778e99b4a8676/cffi-1.17.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4", size = 488119 }, + { url = "https://files.pythonhosted.org/packages/5e/52/3f7cfbc4f444cb4f73ff17b28690d12436dde665f67d68f1e1687908ab6c/cffi-1.17.0-cp313-cp313-win32.whl", hash = "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a", size = 172122 }, + { url = "https://files.pythonhosted.org/packages/94/19/cf5baa07ee0f0e55eab7382459fbddaba0fdb0ba45973dd92556ae0d02db/cffi-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7", size = 181504 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", size = 104809 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/77/02839016f6fbbf808e8b38601df6e0e66c17bbab76dff4613f7511413597/charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", size = 191647 }, + { url = "https://files.pythonhosted.org/packages/3e/33/21a875a61057165e92227466e54ee076b73af1e21fe1b31f1e292251aa1e/charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", size = 121434 }, + { url = "https://files.pythonhosted.org/packages/dd/51/68b61b90b24ca35495956b718f35a9756ef7d3dd4b3c1508056fa98d1a1b/charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", size = 118979 }, + { url = "https://files.pythonhosted.org/packages/e4/a6/7ee57823d46331ddc37dd00749c95b0edec2c79b15fc0d6e6efb532e89ac/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", size = 136582 }, + { url = "https://files.pythonhosted.org/packages/74/f1/0d9fe69ac441467b737ba7f48c68241487df2f4522dd7246d9426e7c690e/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", size = 146645 }, + { url = "https://files.pythonhosted.org/packages/05/31/e1f51c76db7be1d4aef220d29fbfa5dbb4a99165d9833dcbf166753b6dc0/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", size = 139398 }, + { url = "https://files.pythonhosted.org/packages/40/26/f35951c45070edc957ba40a5b1db3cf60a9dbb1b350c2d5bef03e01e61de/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", size = 140273 }, + { url = "https://files.pythonhosted.org/packages/07/07/7e554f2bbce3295e191f7e653ff15d55309a9ca40d0362fcdab36f01063c/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", size = 142577 }, + { url = "https://files.pythonhosted.org/packages/d8/b5/eb705c313100defa57da79277d9207dc8d8e45931035862fa64b625bfead/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", size = 137747 }, + { url = "https://files.pythonhosted.org/packages/19/28/573147271fd041d351b438a5665be8223f1dd92f273713cb882ddafe214c/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", size = 143375 }, + { url = "https://files.pythonhosted.org/packages/cf/7c/f3b682fa053cc21373c9a839e6beba7705857075686a05c72e0f8c4980ca/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", size = 148474 }, + { url = "https://files.pythonhosted.org/packages/1e/49/7ab74d4ac537ece3bc3334ee08645e231f39f7d6df6347b29a74b0537103/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", size = 140232 }, + { url = "https://files.pythonhosted.org/packages/2d/dc/9dacba68c9ac0ae781d40e1a0c0058e26302ea0660e574ddf6797a0347f7/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", size = 140859 }, + { url = "https://files.pythonhosted.org/packages/6c/c2/4a583f800c0708dd22096298e49f887b49d9746d0e78bfc1d7e29816614c/charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", size = 92509 }, + { url = "https://files.pythonhosted.org/packages/57/ec/80c8d48ac8b1741d5b963797b7c0c869335619e13d4744ca2f67fc11c6fc/charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", size = 99870 }, + { url = "https://files.pythonhosted.org/packages/d1/b2/fcedc8255ec42afee97f9e6f0145c734bbe104aac28300214593eb326f1d/charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", size = 192892 }, + { url = "https://files.pythonhosted.org/packages/2e/7d/2259318c202f3d17f3fe6438149b3b9e706d1070fe3fcbb28049730bb25c/charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", size = 122213 }, + { url = "https://files.pythonhosted.org/packages/3a/52/9f9d17c3b54dc238de384c4cb5a2ef0e27985b42a0e5cc8e8a31d918d48d/charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", size = 119404 }, + { url = "https://files.pythonhosted.org/packages/99/b0/9c365f6d79a9f0f3c379ddb40a256a67aa69c59609608fe7feb6235896e1/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", size = 137275 }, + { url = "https://files.pythonhosted.org/packages/91/33/749df346e93d7a30cdcb90cbfdd41a06026317bfbfb62cd68307c1a3c543/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", size = 147518 }, + { url = "https://files.pythonhosted.org/packages/72/1a/641d5c9f59e6af4c7b53da463d07600a695b9824e20849cb6eea8a627761/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", size = 140182 }, + { url = "https://files.pythonhosted.org/packages/ee/fb/14d30eb4956408ee3ae09ad34299131fb383c47df355ddb428a7331cfa1e/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", size = 141869 }, + { url = "https://files.pythonhosted.org/packages/df/3e/a06b18788ca2eb6695c9b22325b6fde7dde0f1d1838b1792a0076f58fe9d/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", size = 144042 }, + { url = "https://files.pythonhosted.org/packages/45/59/3d27019d3b447a88fe7e7d004a1e04be220227760264cc41b405e863891b/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", size = 138275 }, + { url = "https://files.pythonhosted.org/packages/7b/ef/5eb105530b4da8ae37d506ccfa25057961b7b63d581def6f99165ea89c7e/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", size = 144819 }, + { url = "https://files.pythonhosted.org/packages/a2/51/e5023f937d7f307c948ed3e5c29c4b7a3e42ed2ee0b8cdf8f3a706089bf0/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", size = 149415 }, + { url = "https://files.pythonhosted.org/packages/24/9d/2e3ef673dfd5be0154b20363c5cdcc5606f35666544381bee15af3778239/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", size = 141212 }, + { url = "https://files.pythonhosted.org/packages/5b/ae/ce2c12fcac59cb3860b2e2d76dc405253a4475436b1861d95fe75bdea520/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", size = 142167 }, + { url = "https://files.pythonhosted.org/packages/ed/3a/a448bf035dce5da359daf9ae8a16b8a39623cc395a2ffb1620aa1bce62b0/charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", size = 93041 }, + { url = "https://files.pythonhosted.org/packages/b6/7c/8debebb4f90174074b827c63242c23851bdf00a532489fba57fef3416e40/charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", size = 100397 }, + { url = "https://files.pythonhosted.org/packages/28/76/e6222113b83e3622caa4bb41032d0b1bf785250607392e1b778aca0b8a7d/charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", size = 48543 }, +] + +[[package]] +name = "click" +version = "8.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "cryptography" +version = "43.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/ec/9fb9dcf4f91f0e5e76de597256c43eedefd8423aa59be95c70c4c3db426a/cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", size = 686873 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/46/dcd2eb6840b9452e7fbc52720f3dc54a85eb41e68414733379e8f98e3275/cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", size = 6239718 }, + { url = "https://files.pythonhosted.org/packages/e8/23/b0713319edff1d8633775b354f8b34a476e4dd5f4cd4b91e488baec3361a/cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", size = 3808466 }, + { url = "https://files.pythonhosted.org/packages/77/9d/0b98c73cebfd41e4fb0439fe9ce08022e8d059f51caa7afc8934fc1edcd9/cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", size = 3998060 }, + { url = "https://files.pythonhosted.org/packages/ae/71/e073795d0d1624847f323481f7d84855f699172a632aa37646464b0e1712/cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", size = 3792596 }, + { url = "https://files.pythonhosted.org/packages/83/25/439a8ddd8058e7f898b7d27c36f94b66c8c8a2d60e1855d725845f4be0bc/cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", size = 4008355 }, + { url = "https://files.pythonhosted.org/packages/c7/a2/1607f1295eb2c30fcf2c07d7fd0c3772d21dcdb827de2b2730b02df0af51/cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", size = 3899133 }, + { url = "https://files.pythonhosted.org/packages/5e/64/f41f42ddc9c583737c9df0093affb92c61de7d5b0d299bf644524afe31c1/cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", size = 4096946 }, + { url = "https://files.pythonhosted.org/packages/cd/cd/d165adcf3e707d6a049d44ade6ca89973549bed0ab3686fa49efdeefea53/cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", size = 2616826 }, + { url = "https://files.pythonhosted.org/packages/f9/b7/38924229e84c41b0e88d7a5eed8a29d05a44364f85fbb9ddb3984b746fd2/cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", size = 3078700 }, + { url = "https://files.pythonhosted.org/packages/66/d7/397515233e6a861f921bd0365b162b38e0cc513fcf4f1bdd9cc7bc5a3384/cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", size = 6242814 }, + { url = "https://files.pythonhosted.org/packages/58/aa/99b2c00a4f54c60d210d6d1759c720ecf28305aa32d6fb1bb1853f415be6/cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", size = 3809467 }, + { url = "https://files.pythonhosted.org/packages/76/eb/ab783b47b3b9b55371b4361c7ec695144bde1a3343ff2b7a8c1d8fe617bb/cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", size = 3998617 }, + { url = "https://files.pythonhosted.org/packages/a3/62/62770f34290ebb1b6542bd3f13b3b102875b90aed4804e296f8d2a5ac6d7/cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", size = 3794003 }, + { url = "https://files.pythonhosted.org/packages/0f/6c/b42660b3075ff543065b2c1c5a3d9bedaadcff8ebce2ee981be2babc2934/cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", size = 4008774 }, + { url = "https://files.pythonhosted.org/packages/f7/74/028cea86db9315ba3f991e307adabf9f0aa15067011137c38b2fb2aa16eb/cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0", size = 3900098 }, + { url = "https://files.pythonhosted.org/packages/bd/f6/e4387edb55563e2546028ba4c634522fe727693d3cdd9ec0ecacedc75411/cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", size = 4096867 }, + { url = "https://files.pythonhosted.org/packages/ce/61/55560405e75432bdd9f6cf72fa516cab623b83a3f6d230791bc8fc4afeee/cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", size = 2616481 }, + { url = "https://files.pythonhosted.org/packages/e6/3d/696e7a0f04555c58a2813d47aaa78cb5ba863c1f453c74a4f45ae772b054/cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", size = 3081462 }, +] + +[[package]] +name = "docker" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774 }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, +] + +[[package]] +name = "flask" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/e1/d104c83026f8d35dfd2c261df7d64738341067526406b40190bc063e829a/flask-3.0.3.tar.gz", hash = "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842", size = 676315 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/80/ffe1da13ad9300f87c93af113edd0638c75138c42a0994becfacac078c06/flask-3.0.3-py3-none-any.whl", hash = "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3", size = 101735 }, +] + +[[package]] +name = "google-auth" +version = "2.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/ae/634dafb151366d91eb848a25846a780dbce4326906ef005d199723fbbca0/google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc", size = 257875 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/fb/9af9e3f2996677bdda72734482934fe85a3abde174e5f0783ac2f817ba98/google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65", size = 200870 }, +] + +[[package]] +name = "idna" +version = "3.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/ed/f86a79a07470cb07819390452f178b3bef1d375f2ec021ecfc709fc7cf07/idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", size = 189575 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/3e/741d8c82801c347547f8a2a06aa57dbb1992be9e948df2ea0eda2c8b79e8/idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0", size = 66836 }, +] + +[[package]] +name = "importlib-metadata" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269 }, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234 }, +] + +[[package]] +name = "jaraco-classes" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/c0/ed4a27bc5571b99e3cff68f8a9fa5b56ff7df1c2251cc715a652ddd26402/jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd", size = 11780 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/66/b15ce62552d84bbfcec9a4873ab79d993a1dd4edb922cbfccae192bd5b5f/jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790", size = 6777 }, +] + +[[package]] +name = "jaraco-context" +version = "6.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-tarfile", marker = "python_full_version < '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/ad/f3777b81bf0b6e7bc7514a1656d3e637b2e8e15fab2ce3235730b3e7a4e6/jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3", size = 13912 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/db/0c52c4cf5e4bd9f5d7135ec7669a3a767af21b3a308e1ed3674881e52b62/jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4", size = 6825 }, +] + +[[package]] +name = "jaraco-functools" +version = "4.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/03/b1/6ca3c2052e584e9908a2c146f00378939b3c51b839304ab8ef4de067f042/jaraco_functools-4.0.2.tar.gz", hash = "sha256:3460c74cd0d32bf82b9576bbb3527c4364d5b27a21f5158a62aed6c4b42e23f5", size = 18319 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/54/7623e24ffc63730c3a619101361b08860c6b7c7cfc1aef6edb66d80ed708/jaraco.functools-4.0.2-py3-none-any.whl", hash = "sha256:c9d16a3ed4ccb5a889ad8e0b7a343401ee5b2a71cee6ed192d3f68bc351e94e3", size = 9883 }, +] + +[[package]] +name = "jeepney" +version = "0.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/f4/154cf374c2daf2020e05c3c6a03c91348d59b23c5366e968feb198306fdf/jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806", size = 106005 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/72/2a1e2290f1ab1e06f71f3d0f1646c9e4634e70e1d37491535e19266e8dc9/jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755", size = 48435 }, +] + +[[package]] +name = "jinja2" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", size = 240245 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", size = 133271 }, +] + +[[package]] +name = "jsonschema" +version = "4.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462 }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b9/cc0cc592e7c195fb8a650c1d5990b10175cf13b4c97465c72ec841de9e4b/jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc", size = 13983 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/07/44bd408781594c4d0a027666ef27fab1e441b109dc3b76b4f836f8fd04fe/jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c", size = 18482 }, +] + +[[package]] +name = "keyring" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata", marker = "python_full_version < '3.12'" }, + { name = "jaraco-classes" }, + { name = "jaraco-context" }, + { name = "jaraco-functools" }, + { name = "jeepney", marker = "sys_platform == 'linux'" }, + { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, + { name = "secretstorage", marker = "sys_platform == 'linux'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/32/30/bfdde7294ba6bb2f519950687471dc6a0996d4f77ab30d75c841fa4994ed/keyring-25.3.0.tar.gz", hash = "sha256:8d85a1ea5d6db8515b59e1c5d1d1678b03cf7fc8b8dcfb1651e8c4a524eb42ef", size = 61495 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/42/ea8c9726e5ee5ff0731978aaf7cd5fa16674cf549c46279b279d7167c2b4/keyring-25.3.0-py3-none-any.whl", hash = "sha256:8d963da00ccdf06e356acd9bf3b743208878751032d8599c6cc89eb51310ffae", size = 38742 }, +] + +[[package]] +name = "kubernetes" +version = "30.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "google-auth" }, + { name = "oauthlib" }, + { name = "python-dateutil" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "requests-oauthlib" }, + { name = "six" }, + { name = "urllib3" }, + { name = "websocket-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/3c/9f29f6cab7f35df8e54f019e5719465fa97b877be2454e99f989270b4f34/kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc", size = 887810 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/2027ddede72d33be2effc087580aeba07e733a7360780ae87226f1f91bd8/kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d", size = 1706042 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[[package]] +name = "markupsafe" +version = "2.1.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/5b/aae44c6655f3801e81aa3eef09dbbf012431987ba564d7231722f68df02d/MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", size = 19384 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/e7/291e55127bb2ae67c64d66cef01432b5933859dfb7d6949daa721b89d0b3/MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", size = 18219 }, + { url = "https://files.pythonhosted.org/packages/6b/cb/aed7a284c00dfa7c0682d14df85ad4955a350a21d2e3b06d8240497359bf/MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", size = 14098 }, + { url = "https://files.pythonhosted.org/packages/1c/cf/35fe557e53709e93feb65575c93927942087e9b97213eabc3fe9d5b25a55/MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", size = 29014 }, + { url = "https://files.pythonhosted.org/packages/97/18/c30da5e7a0e7f4603abfc6780574131221d9148f323752c2755d48abad30/MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", size = 28220 }, + { url = "https://files.pythonhosted.org/packages/0c/40/2e73e7d532d030b1e41180807a80d564eda53babaf04d65e15c1cf897e40/MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", size = 27756 }, + { url = "https://files.pythonhosted.org/packages/18/46/5dca760547e8c59c5311b332f70605d24c99d1303dd9a6e1fc3ed0d73561/MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", size = 33988 }, + { url = "https://files.pythonhosted.org/packages/6d/c5/27febe918ac36397919cd4a67d5579cbbfa8da027fa1238af6285bb368ea/MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", size = 32718 }, + { url = "https://files.pythonhosted.org/packages/f8/81/56e567126a2c2bc2684d6391332e357589a96a76cb9f8e5052d85cb0ead8/MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", size = 33317 }, + { url = "https://files.pythonhosted.org/packages/00/0b/23f4b2470accb53285c613a3ab9ec19dc944eaf53592cb6d9e2af8aa24cc/MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", size = 16670 }, + { url = "https://files.pythonhosted.org/packages/b7/a2/c78a06a9ec6d04b3445a949615c4c7ed86a0b2eb68e44e7541b9d57067cc/MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", size = 17224 }, + { url = "https://files.pythonhosted.org/packages/53/bd/583bf3e4c8d6a321938c13f49d44024dbe5ed63e0a7ba127e454a66da974/MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", size = 18215 }, + { url = "https://files.pythonhosted.org/packages/48/d6/e7cd795fc710292c3af3a06d80868ce4b02bfbbf370b7cee11d282815a2a/MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", size = 14069 }, + { url = "https://files.pythonhosted.org/packages/51/b5/5d8ec796e2a08fc814a2c7d2584b55f889a55cf17dd1a90f2beb70744e5c/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", size = 29452 }, + { url = "https://files.pythonhosted.org/packages/0a/0d/2454f072fae3b5a137c119abf15465d1771319dfe9e4acbb31722a0fff91/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", size = 28462 }, + { url = "https://files.pythonhosted.org/packages/2d/75/fd6cb2e68780f72d47e6671840ca517bda5ef663d30ada7616b0462ad1e3/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", size = 27869 }, + { url = "https://files.pythonhosted.org/packages/b0/81/147c477391c2750e8fc7705829f7351cf1cd3be64406edcf900dc633feb2/MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", size = 33906 }, + { url = "https://files.pythonhosted.org/packages/8b/ff/9a52b71839d7a256b563e85d11050e307121000dcebc97df120176b3ad93/MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", size = 32296 }, + { url = "https://files.pythonhosted.org/packages/88/07/2dc76aa51b481eb96a4c3198894f38b480490e834479611a4053fbf08623/MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", size = 33038 }, + { url = "https://files.pythonhosted.org/packages/96/0c/620c1fb3661858c0e37eb3cbffd8c6f732a67cd97296f725789679801b31/MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", size = 16572 }, + { url = "https://files.pythonhosted.org/packages/3f/14/c3554d512d5f9100a95e737502f4a2323a1959f6d0d01e0d0997b35f7b10/MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", size = 17127 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "more-itertools" +version = "10.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/92/0d/ad6a82320cb8eba710fd0dceb0f678d5a1b58d67d03ae5be14874baa39e0/more-itertools-10.4.0.tar.gz", hash = "sha256:fe0e63c4ab068eac62410ab05cccca2dc71ec44ba8ef29916a0090df061cf923", size = 120755 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d8/0b/6a51175e1395774449fca317fb8861379b7a2d59be411b8cce3d19d6ce78/more_itertools-10.4.0-py3-none-any.whl", hash = "sha256:0f7d9f83a0a8dcfa8a2694a770590d98a67ea943e3d9f5298309a484758c4e27", size = 60935 }, +] + +[[package]] +name = "nh3" +version = "0.2.18" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/62/73/10df50b42ddb547a907deeb2f3c9823022580a7a47281e8eae8e003a9639/nh3-0.2.18.tar.gz", hash = "sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4", size = 15028 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/89/1daff5d9ba5a95a157c092c7c5f39b8dd2b1ddb4559966f808d31cfb67e0/nh3-0.2.18-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86", size = 1374474 }, + { url = "https://files.pythonhosted.org/packages/2c/b6/42fc3c69cabf86b6b81e4c051a9b6e249c5ba9f8155590222c2622961f58/nh3-0.2.18-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811", size = 694573 }, + { url = "https://files.pythonhosted.org/packages/45/b9/833f385403abaf0023c6547389ec7a7acf141ddd9d1f21573723a6eab39a/nh3-0.2.18-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200", size = 844082 }, + { url = "https://files.pythonhosted.org/packages/05/2b/85977d9e11713b5747595ee61f381bc820749daf83f07b90b6c9964cf932/nh3-0.2.18-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164", size = 782460 }, + { url = "https://files.pythonhosted.org/packages/72/f2/5c894d5265ab80a97c68ca36f25c8f6f0308abac649aaf152b74e7e854a8/nh3-0.2.18-cp37-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189", size = 879827 }, + { url = "https://files.pythonhosted.org/packages/ab/a7/375afcc710dbe2d64cfbd69e31f82f3e423d43737258af01f6a56d844085/nh3-0.2.18-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad", size = 841080 }, + { url = "https://files.pythonhosted.org/packages/c2/a8/3bb02d0c60a03ad3a112b76c46971e9480efa98a8946677b5a59f60130ca/nh3-0.2.18-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b", size = 924144 }, + { url = "https://files.pythonhosted.org/packages/1b/63/6ab90d0e5225ab9780f6c9fb52254fa36b52bb7c188df9201d05b647e5e1/nh3-0.2.18-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307", size = 769192 }, + { url = "https://files.pythonhosted.org/packages/a4/17/59391c28580e2c32272761629893e761442fc7666da0b1cdb479f3b67b88/nh3-0.2.18-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f", size = 791042 }, + { url = "https://files.pythonhosted.org/packages/a3/da/0c4e282bc3cff4a0adf37005fa1fb42257673fbc1bbf7d1ff639ec3d255a/nh3-0.2.18-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe", size = 1010073 }, + { url = "https://files.pythonhosted.org/packages/de/81/c291231463d21da5f8bba82c8167a6d6893cc5419b0639801ee5d3aeb8a9/nh3-0.2.18-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a", size = 1029782 }, + { url = "https://files.pythonhosted.org/packages/63/1d/842fed85cf66c973be0aed8770093d6a04741f65e2c388ddd4c07fd3296e/nh3-0.2.18-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50", size = 942504 }, + { url = "https://files.pythonhosted.org/packages/eb/61/73a007c74c37895fdf66e0edcd881f5eaa17a348ff02f4bb4bc906d61085/nh3-0.2.18-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204", size = 941541 }, + { url = "https://files.pythonhosted.org/packages/78/48/54a788fc9428e481b2f58e0cd8564f6c74ffb6e9ef73d39e8acbeae8c629/nh3-0.2.18-cp37-abi3-win32.whl", hash = "sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be", size = 573750 }, + { url = "https://files.pythonhosted.org/packages/26/8d/53c5b19c4999bdc6ba95f246f4ef35ca83d7d7423e5e38be43ad66544e5d/nh3-0.2.18-cp37-abi3-win_amd64.whl", hash = "sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844", size = 579012 }, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/fa/fbf4001037904031639e6bfbfc02badfc7e12f137a8afa254df6c4c8a670/oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918", size = 177352 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/80/cab10959dc1faead58dc8384a781dfbf93cb4d33d50988f7a69f1b7c9bbe/oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", size = 151688 }, +] + +[[package]] +name = "packaging" +version = "24.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985 }, +] + +[[package]] +name = "pkginfo" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/72/347ec5be4adc85c182ed2823d8d1c7b51e13b9a6b0c1aae59582eca652df/pkginfo-1.10.0.tar.gz", hash = "sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297", size = 378457 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/09/054aea9b7534a15ad38a363a2bd974c20646ab1582a387a95b8df1bfea1c/pkginfo-1.10.0-py3-none-any.whl", hash = "sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097", size = 30392 }, +] + +[[package]] +name = "pyasn1" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/a3/d2157f333900747f20984553aca98008b6dc843eb62f3a36030140ccec0d/pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c", size = 148088 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/7e/5f50d07d5e70a2addbccd90ac2950f81d1edd0783630651d9268d7f1db49/pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473", size = 85313 }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/00/e7bd1dec10667e3f2be602686537969a7ac92b0a7c5165be2e5875dc3971/pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6", size = 307859 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/68/8906226b15ef38e71dc926c321d2fe99de8048e9098b5dfd38343011c886/pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b", size = 181220 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pygments" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 }, +] + +[[package]] +name = "pyproject-hooks" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/07/6f63dda440d4abb191b91dc383b472dae3dd9f37e4c1e4a5c3db150531c6/pyproject_hooks-1.1.0.tar.gz", hash = "sha256:4b37730834edbd6bd37f26ece6b44802fb1c1ee2ece0e54ddff8bfc06db86965", size = 7838 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/f3/431b9d5fe7d14af7a32340792ef43b8a714e7726f1d7b69cc4e8e7a3f1d7/pyproject_hooks-1.1.0-py3-none-any.whl", hash = "sha256:7ceeefe9aec63a1064c18d939bdc3adf2d8aa1988a510afec15151578b232aa2", size = 9184 }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "pywin32" +version = "306" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/1e/fc18ad83ca553e01b97aa8393ff10e33c1fb57801db05488b83282ee9913/pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407", size = 8507689 }, + { url = "https://files.pythonhosted.org/packages/7e/9e/ad6b1ae2a5ad1066dc509350e0fbf74d8d50251a51e420a2a8feaa0cecbd/pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e", size = 9227547 }, + { url = "https://files.pythonhosted.org/packages/91/20/f744bff1da8f43388498503634378dbbefbe493e65675f2cc52f7185c2c2/pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a", size = 10388324 }, + { url = "https://files.pythonhosted.org/packages/14/91/17e016d5923e178346aabda3dfec6629d1a26efe587d19667542105cf0a6/pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b", size = 8507705 }, + { url = "https://files.pythonhosted.org/packages/83/1c/25b79fc3ec99b19b0a0730cc47356f7e2959863bf9f3cd314332bddb4f68/pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e", size = 9227429 }, + { url = "https://files.pythonhosted.org/packages/1c/43/e3444dc9a12f8365d9603c2145d16bf0a2f8180f343cf87be47f5579e547/pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040", size = 10388145 }, +] + +[[package]] +name = "pywin32-ctypes" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/9f/01a1a99704853cb63f253eea009390c88e7131c67e66a0a02099a8c917cb/pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755", size = 29471 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/3d/8161f7711c017e01ac9f008dfddd9410dff3674334c233bde66e7ba65bbf/pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8", size = 30756 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "readme-renderer" +version = "44.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "nh3" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310 }, +] + +[[package]] +name = "referencing" +version = "0.35.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/73ca1f8e72fff6fa52119dbd185f73a907b1989428917b24cff660129b6d/referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c", size = 62991 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/59/2056f61236782a2c86b33906c025d4f4a0b17be0161b63b70fd9e8775d36/referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de", size = 26684 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179 }, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481 }, +] + +[[package]] +name = "rfc3986" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/40/1520d68bfa07ab5a6f065a186815fb6610c86fe957bc065754e47f7b0840/rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c", size = 49026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/9a/9afaade874b2fa6c752c36f1548f718b5b83af81ed9b76628329dab81c1b/rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd", size = 31326 }, +] + +[[package]] +name = "rich" +version = "13.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/01/c954e134dc440ab5f96952fe52b4fdc64225530320a910473c1fe270d9aa/rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432", size = 221248 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/67/a37f6214d0e9fe57f6ae54b2956d550ca8365857f42a1ce0392bb21d9410/rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222", size = 240681 }, +] + +[[package]] +name = "rpds-py" +version = "0.20.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/55/64/b693f262791b818880d17268f3f8181ef799b0d187f6f731b1772e05a29a/rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121", size = 25814 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/2a/191374c52d7be0b056cc2a04d718d2244c152f915d4a8d2db2aacc526189/rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489", size = 318369 }, + { url = "https://files.pythonhosted.org/packages/0e/6a/2c9fdcc6d235ac0d61ec4fd9981184689c3e682abd05e3caa49bccb9c298/rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318", size = 311303 }, + { url = "https://files.pythonhosted.org/packages/d2/b2/725487d29633f64ef8f9cbf4729111a0b61702c8f8e94db1653930f52cce/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db", size = 366424 }, + { url = "https://files.pythonhosted.org/packages/7a/8c/668195ab9226d01b7cf7cd9e59c1c0be1df05d602df7ec0cf46f857dcf59/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5", size = 368359 }, + { url = "https://files.pythonhosted.org/packages/52/28/356f6a39c1adeb02cf3e5dd526f5e8e54e17899bef045397abcfbf50dffa/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5", size = 394886 }, + { url = "https://files.pythonhosted.org/packages/a2/65/640fb1a89080a8fb6f4bebd3dafb65a2edba82e2e44c33e6eb0f3e7956f1/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6", size = 432416 }, + { url = "https://files.pythonhosted.org/packages/a7/e8/85835077b782555d6b3416874b702ea6ebd7db1f145283c9252968670dd5/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209", size = 354819 }, + { url = "https://files.pythonhosted.org/packages/4f/87/1ac631e923d65cbf36fbcfc6eaa702a169496de1311e54be142f178e53ee/rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3", size = 373282 }, + { url = "https://files.pythonhosted.org/packages/e4/ce/cb316f7970189e217b998191c7cf0da2ede3d5437932c86a7210dc1e9994/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272", size = 541540 }, + { url = "https://files.pythonhosted.org/packages/90/d7/4112d7655ec8aff168ecc91d4ceb51c557336edde7e6ccf6463691a2f253/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad", size = 547640 }, + { url = "https://files.pythonhosted.org/packages/ab/44/4f61d64dfed98cc71623f3a7fcb612df636a208b4b2c6611eaa985e130a9/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58", size = 525555 }, + { url = "https://files.pythonhosted.org/packages/35/f2/a862d81eacb21f340d584cd1c749c289979f9a60e9229f78bffc0418a199/rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0", size = 199338 }, + { url = "https://files.pythonhosted.org/packages/cc/ec/77d0674f9af4872919f3738018558dd9d37ad3f7ad792d062eadd4af7cba/rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c", size = 213585 }, + { url = "https://files.pythonhosted.org/packages/89/b7/f9682c5cc37fcc035f4a0fc33c1fe92ec9cbfdee0cdfd071cf948f53e0df/rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6", size = 321468 }, + { url = "https://files.pythonhosted.org/packages/b8/ad/fc82be4eaceb8d444cb6fc1956ce972b3a0795104279de05e0e4131d0a47/rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b", size = 313062 }, + { url = "https://files.pythonhosted.org/packages/0e/1c/6039e80b13a08569a304dc13476dc986352dca4598e909384db043b4e2bb/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739", size = 370168 }, + { url = "https://files.pythonhosted.org/packages/dc/c9/5b9aa35acfb58946b4b785bc8e700ac313669e02fb100f3efa6176a83e81/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c", size = 371376 }, + { url = "https://files.pythonhosted.org/packages/7b/dd/0e0dbeb70d8a5357d2814764d467ded98d81d90d3570de4fb05ec7224f6b/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee", size = 397200 }, + { url = "https://files.pythonhosted.org/packages/e4/da/a47d931eb688ccfd77a7389e45935c79c41e8098d984d87335004baccb1d/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96", size = 426824 }, + { url = "https://files.pythonhosted.org/packages/0f/f7/a59a673594e6c2ff2dbc44b00fd4ecdec2fc399bb6a7bd82d612699a0121/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4", size = 357967 }, + { url = "https://files.pythonhosted.org/packages/5f/61/3ba1905396b2cb7088f9503a460b87da33452da54d478cb9241f6ad16d00/rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef", size = 378905 }, + { url = "https://files.pythonhosted.org/packages/08/31/6d0df9356b4edb0a3a077f1ef714e25ad21f9f5382fc490c2383691885ea/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821", size = 546348 }, + { url = "https://files.pythonhosted.org/packages/ae/15/d33c021de5cb793101df9961c3c746dfc476953dbbf5db337d8010dffd4e/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940", size = 553152 }, + { url = "https://files.pythonhosted.org/packages/70/2d/5536d28c507a4679179ab15aa0049440e4d3dd6752050fa0843ed11e9354/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174", size = 528807 }, + { url = "https://files.pythonhosted.org/packages/e3/62/7ebe6ec0d3dd6130921f8cffb7e34afb7f71b3819aa0446a24c5e81245ec/rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139", size = 200993 }, + { url = "https://files.pythonhosted.org/packages/ec/2f/b938864d66b86a6e4acadefdc56de75ef56f7cafdfd568a6464605457bd5/rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585", size = 214458 }, + { url = "https://files.pythonhosted.org/packages/99/32/43b919a0a423c270a838ac2726b1c7168b946f2563fd99a51aaa9692d00f/rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29", size = 321465 }, + { url = "https://files.pythonhosted.org/packages/58/a9/c4d899cb28e9e47b0ff12462e8f827381f243176036f17bef9c1604667f2/rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91", size = 312900 }, + { url = "https://files.pythonhosted.org/packages/8f/90/9e51670575b5dfaa8c823369ef7d943087bfb73d4f124a99ad6ef19a2b26/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24", size = 370973 }, + { url = "https://files.pythonhosted.org/packages/fc/c1/523f2a03f853fc0d4c1acbef161747e9ab7df0a8abf6236106e333540921/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7", size = 370890 }, + { url = "https://files.pythonhosted.org/packages/51/ca/2458a771f16b0931de4d384decbe43016710bc948036c8f4562d6e063437/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9", size = 397174 }, + { url = "https://files.pythonhosted.org/packages/00/7d/6e06807f6305ea2408b364efb0eef83a6e21b5e7b5267ad6b473b9a7e416/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8", size = 426449 }, + { url = "https://files.pythonhosted.org/packages/8c/d1/6c9e65260a819a1714510a7d69ac1d68aa23ee9ce8a2d9da12187263c8fc/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879", size = 357698 }, + { url = "https://files.pythonhosted.org/packages/5d/fb/ecea8b5286d2f03eec922be7173a03ed17278944f7c124348f535116db15/rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f", size = 378530 }, + { url = "https://files.pythonhosted.org/packages/e3/e3/ac72f858957f52a109c588589b73bd2fad4a0fc82387fb55fb34aeb0f9cd/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c", size = 545753 }, + { url = "https://files.pythonhosted.org/packages/b2/a4/a27683b519d5fc98e4390a3b130117d80fd475c67aeda8aac83c0e8e326a/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2", size = 552443 }, + { url = "https://files.pythonhosted.org/packages/a1/ed/c074d248409b4432b1ccb2056974175fa0af2d1bc1f9c21121f80a358fa3/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57", size = 528380 }, + { url = "https://files.pythonhosted.org/packages/d5/bd/04caf938895d2d78201e89c0c8a94dfd9990c34a19ff52fb01d0912343e3/rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a", size = 200540 }, + { url = "https://files.pythonhosted.org/packages/95/cc/109eb8b9863680411ae703664abacaa035820c7755acc9686d5dd02cdd2e/rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2", size = 214111 }, +] + +[[package]] +name = "rsa" +version = "4.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/65/7d973b89c4d2351d7fb232c2e452547ddfa243e93131e7cfa766da627b52/rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21", size = 29711 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/97/fa78e3d2f65c02c8e1268b9aba606569fe97f6c8f7c2d74394553347c145/rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", size = 34315 }, +] + +[[package]] +name = "secretstorage" +version = "3.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "jeepney" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/53/a4/f48c9d79cb507ed1373477dbceaba7401fd8a23af63b837fa61f1dcd3691/SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77", size = 19739 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/24/b4293291fa1dd830f353d2cb163295742fa87f179fcc8a20a306a81978b7/SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99", size = 15221 }, +] + +[[package]] +name = "six" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", size = 34041 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254", size = 11053 }, +] + +[[package]] +name = "tabulate" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/fe/802052aecb21e3797b8f7902564ab6ea0d60ff8ca23952079064155d1ae1/tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", size = 81090 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252 }, +] + +[[package]] +name = "twine" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "keyring" }, + { name = "pkginfo" }, + { name = "readme-renderer" }, + { name = "requests" }, + { name = "requests-toolbelt" }, + { name = "rfc3986" }, + { name = "rich" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/68/bd982e5e949ef8334e6f7dcf76ae40922a8750aa2e347291ae1477a4782b/twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db", size = 225531 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/ec/00f9d5fd040ae29867355e559a94e9a8429225a0284a3f5f091a3878bfc0/twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997", size = 38650 }, +] + +[[package]] +name = "urllib3" +version = "2.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/6d/fa469ae21497ddc8bc93e5877702dca7cb8f911e337aca7452b5724f1bb6/urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168", size = 292266 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/1c/89ffc63a9605b583d5df2be791a27bc1a42b7c32bab68d3c8f2f73a98cd4/urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", size = 121444 }, +] + +[[package]] +name = "warnet" +version = "0.10.0" +source = { editable = "." } +dependencies = [ + { name = "click" }, + { name = "docker" }, + { name = "flask" }, + { name = "jsonschema" }, + { name = "kubernetes" }, + { name = "pyyaml" }, + { name = "rich" }, + { name = "tabulate" }, +] + +[package.optional-dependencies] +build = [ + { name = "build" }, + { name = "twine" }, +] + +[package.metadata] +requires-dist = [ + { name = "build", marker = "extra == 'build'" }, + { name = "click", specifier = "==8.1.7" }, + { name = "docker", specifier = "==7.1.0" }, + { name = "flask", specifier = "==3.0.3" }, + { name = "jsonschema", specifier = "==4.23.0" }, + { name = "kubernetes", specifier = "==30.1.0" }, + { name = "pyyaml", specifier = "==6.0.2" }, + { name = "rich", specifier = "==13.7.1" }, + { name = "tabulate", specifier = "==0.9.0" }, + { name = "twine", marker = "extra == 'build'" }, +] + +[[package]] +name = "websocket-client" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e6/30/fba0d96b4b5fbf5948ed3f4681f7da2f9f64512e1d303f94b4cc174c24a5/websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da", size = 54648 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/44687a29792a70e111c5c477230a72c4b957d88d16141199bf9acb7537a3/websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526", size = 58826 }, +] + +[[package]] +name = "werkzeug" +version = "3.0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/e2/6dbcaab07560909ff8f654d3a2e5a60552d937c909455211b1b36d7101dc/werkzeug-3.0.4.tar.gz", hash = "sha256:34f2371506b250df4d4f84bfe7b0921e4762525762bbd936614909fe25cd7306", size = 803966 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/84/997bbf7c2bf2dc3f09565c6d0b4959fefe5355c18c4096cfd26d83e0785b/werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c", size = 227554 }, +] + +[[package]] +name = "zipp" +version = "3.20.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0e/af/9f2de5bd32549a1b705af7a7c054af3878816a1267cb389c03cc4f342a51/zipp-3.20.0.tar.gz", hash = "sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31", size = 23244 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/cc/b9958af9f9c86b51f846d8487440af495ecf19b16e426fce1ed0b0796175/zipp-3.20.0-py3-none-any.whl", hash = "sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d", size = 9432 }, +] From c26c371fb50c19244b59edf5b7f08ad3f8eca6da Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 14:50:33 +0200 Subject: [PATCH 093/710] ci: update packed actions --- .github/workflows/publish-dist.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/publish-dist.yml b/.github/workflows/publish-dist.yml index e78b04f93..0c59f8a37 100644 --- a/.github/workflows/publish-dist.yml +++ b/.github/workflows/publish-dist.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.x" + python-version: '3.12' - name: Install pypa/build run: >- python3 -m @@ -21,7 +21,7 @@ jobs: - name: Build a binary wheel and a source tarball run: python3 -m build - name: Store the distribution packages - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: python-package-distributions path: dist/ @@ -43,7 +43,7 @@ jobs: steps: - name: Download all the dists - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: python-package-distributions path: dist/ @@ -64,12 +64,12 @@ jobs: steps: - name: Download all the dists - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: python-package-distributions path: dist/ - name: Sign the dists with Sigstore - uses: sigstore/gh-action-sigstore-python@v2.1.1 + uses: sigstore/gh-action-sigstore-python@v3.0.0 with: inputs: >- ./dist/*.tar.gz From dde6adc057f3a61f3214496b4adbb2777915a52b Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 16:31:45 +0200 Subject: [PATCH 094/710] delete pods in default nmespace --- src/warnet/cli/network.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 139be46b5..01a308c36 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -13,6 +13,7 @@ delete_namespace, get_default_namespace, get_mission, + get_pods ) from .process import stream_command @@ -115,11 +116,14 @@ def start(network_name: str, logging: bool, network: str): @network.command() def down(): """Bring down a running warnet""" - namespace = get_default_namespace() - if delete_namespace(namespace) and delete_namespace("warnet-logging"): - print("Warnet network has been successfully brought down and the namespaces deleted.") + if delete_namespace("warnet-logging"): + print("Warnet logging deleted") else: - print("Failed to bring down warnet network or delete the namespaces.") + print("Warnet logging NOT deleted") + pods = get_pods() + for pod in pods.items: + cmd = f"helm uninstall {pod.metadata.name}" + stream_command(cmd) @network.command() From 120aeac12fccf5d5bfc4f65ff5c42904f5cb70c8 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 16:39:33 +0200 Subject: [PATCH 095/710] get pods from default namespace --- src/warnet/cli/k8s.py | 4 ++-- src/warnet/cli/network.py | 9 ++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/warnet/cli/k8s.py b/src/warnet/cli/k8s.py index e43a6a58c..9b8f72ba3 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/cli/k8s.py @@ -26,7 +26,7 @@ def get_dynamic_client() -> DynamicClient: def get_pods() -> V1PodList: sclient = get_static_client() - return sclient.list_namespaced_pod("warnet") + return sclient.list_namespaced_pod(get_default_namespace()) def get_mission(mission: str) -> list[V1PodList]: @@ -89,7 +89,7 @@ def apply_kubernetes_yaml_obj(yaml_obj: str) -> None: def delete_namespace(namespace: str) -> bool: command = f"kubectl delete namespace {namespace} --ignore-not-found" - return stream_command(command) + return run_command(command) def delete_pod(pod_name: str) -> bool: diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 01a308c36..97bb3ead9 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -9,12 +9,7 @@ from rich import print from .bitcoin import _rpc -from .k8s import ( - delete_namespace, - get_default_namespace, - get_mission, - get_pods -) +from .k8s import delete_namespace, get_default_namespace, get_mission, get_pods from .process import stream_command WAR_MANIFESTS = files("manifests") @@ -170,7 +165,7 @@ def _status(): stats = [] for tank in tanks: status = { - "tank_index": tank.metadata.labels["app.kubernetes.io/instance"], + "tank": tank.metadata.name, "bitcoin_status": tank.status.phase.lower(), } stats.append(status) From 11c1c4605c7a59960052a3e6f6a0d3d269ec099c Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 16:44:36 +0200 Subject: [PATCH 096/710] remove graph docs --- .github/workflows/apidocs.yml | 9 ---- docs/graph.md | 87 ---------------------------------- resources/scripts/graphdocs.py | 52 -------------------- 3 files changed, 148 deletions(-) delete mode 100644 docs/graph.md delete mode 100755 resources/scripts/graphdocs.py diff --git a/.github/workflows/apidocs.yml b/.github/workflows/apidocs.yml index 78f07d76e..860f25e61 100644 --- a/.github/workflows/apidocs.yml +++ b/.github/workflows/apidocs.yml @@ -29,12 +29,3 @@ jobs: run: | source .venv/bin/activate python3 resources/scripts/apidocs.py - - - name: Run graphdocs script - run: | - source .venv/bin/activate - python3 resources/scripts/graphdocs.py - - - uses: stefanzweifel/git-auto-commit-action@v5 - with: - commit_message: Update apidocs and/or graphdocs diff --git a/docs/graph.md b/docs/graph.md deleted file mode 100644 index 3b3a05c46..000000000 --- a/docs/graph.md +++ /dev/null @@ -1,87 +0,0 @@ -# Warnet Network Topology - -Warnet creates a Bitcoin network using a network topology from a [graphml](https://graphml.graphdrawing.org/specification.html) file. - -Before any scenarios or RPC commands can be executed, a Warnet network must be started from a graph. -See [warcli.md](warcli.md) for more details on these commands. - -To start a network called `"warnet"` from the [default graph file](../graphs/default.graphml): -``` -warcli network start -``` - -To start a network with custom configurations: -``` -warcli network start --network="network_name" -``` - -## Creating graphs automatically - -Graphs can be created via the graph menu: - -```bash -# show graph commands -warcli graph --help - -# Create a cycle graph of 12 nodes using default Bitcoin Core version (v26.0) -warcli graph create 12 --outfile=./12_x_v26.0.graphml - -# Start network with default name "warnet" -warcli network start ./12_x_v26.0.graphml -``` - -## Warnet graph nodes and edges - -Nodes in a Warnet graph MUST have either a `"version"` key or an `"image"` key. -These dictate what version of Bitcoin Core to deploy in a fiven tank. - -Edges without additional properties are interpreted as Bitcoin p2p connections. -If an edge has additional key-value properties, it will be interpreted as a -lightning network channel (see [lightning.md](lightning.md)). - -## GraphML file specification - -### GraphML file format and headers -```xml - - - - - - - - - - - - - - - - - - - - - - -``` - -| key | for | type | default | explanation | -|----------------|-------|---------|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| services | graph | string | | A space-separated list of extra service containers to deploy in the network. See [docs/services.md](services.md) for complete list of available services | -| version | node | string | | Bitcoin Core version with an available Warnet tank image on Dockerhub. May also be a GitHub repository with format user/repository:branch to build from source code | -| image | node | string | | Bitcoin Core Warnet tank image on Dockerhub with the format repository/image:tag | -| bitcoin_config | node | string | | A string of Bitcoin Core options in command-line format, e.g. '-debug=net -blocksonly' | -| tc_netem | node | string | | A tc-netem command as a string beginning with 'tc qdisc add dev eth0 root netem' | -| exporter | node | boolean | False | Whether to attach a Prometheus data exporter to the tank | -| metrics | node | string | | A space-separated string of RPC queries to scrape by Prometheus | -| collect_logs | node | boolean | False | Whether to collect Bitcoin Core debug logs with Promtail | -| build_args | node | string | | A string of configure options used when building Bitcoin Core from source code, e.g. '--without-gui --disable-tests' | -| ln | node | string | | Attach a lightning network node of this implementation (currently only supports 'lnd' or 'cln') | -| ln_image | node | string | | Specify a lightning network node image from Dockerhub with the format repository/image:tag | -| ln_cb_image | node | string | | Specify a lnd Circuit Breaker image from Dockerhub with the format repository/image:tag | -| ln_config | node | string | | A string of arguments for the lightning network node in command-line format, e.g. '--protocol.wumbo-channels --bitcoin.timelockdelta=80' | -| channel_open | edge | string | | Indicate that this edge is a lightning channel with these arguments passed to lnd openchannel | -| source_policy | edge | string | | Update the channel originator policy by passing these arguments passed to lnd updatechanpolicy | -| target_policy | edge | string | | Update the channel partner policy by passing these arguments passed to lnd updatechanpolicy | diff --git a/resources/scripts/graphdocs.py b/resources/scripts/graphdocs.py deleted file mode 100755 index 631d7df45..000000000 --- a/resources/scripts/graphdocs.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 - -import os -import re -from pathlib import Path - -from tabulate import tabulate - -from warnet.cli.util import load_schema - -graph_schema = load_schema() - -file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "graph.md" - -doc = "" - -doc += "### GraphML file format and headers\n" -doc += "```xml\n" -doc += '\n' - -sections = ["graph", "node", "edge"] - -for section in sections: - for name, details in graph_schema[section]["properties"].items(): - if "comment" not in details: - continue - vname = f'"{name}"' - vtype = f'"{details["type"]}"' - doc += f' \n' -doc += ' \n \n \n \n\n' -doc += "```\n\n" - -headers = ["key", "for", "type", "default", "explanation"] -data = [] -for section in sections: - data += [ - [name, section, p["type"], p.get("default", ""), p["comment"]] - for name, p in graph_schema[section]["properties"].items() - if "comment" in p - ] - -doc += tabulate(data, headers=headers, tablefmt="github") - - -with open(file_path) as file: - text = file.read() - -pattern = r"(## GraphML file specification\n)(.*\n)*?\Z" -updated_text = re.sub(pattern, rf"\1\n{doc}\n", text) - -with open(file_path, "w") as file: - file.write(updated_text) From b019ca581d8c4dd8873133f74fb606e750ece753 Mon Sep 17 00:00:00 2001 From: josibake Date: Thu, 22 Aug 2024 13:48:11 +0200 Subject: [PATCH 097/710] fix up network / namespace create commands this allows networks and namespaces to be sourced from outside of our directory (i.e., after pip install). also some helper create and init commands for users. also adds an admin group, since only admins can create namespaces --- resources/charts/__init__.py | 0 resources/namespaces/__init__.py | 0 .../namespace-defaults.yaml | 0 .../two_namespaces_two_users/namespaces.yaml | 0 .../networks}/6_node_bitcoin/network.yaml | 0 .../6_node_bitcoin/node-defaults.yaml | 0 resources/networks/__init__.py | 0 src/warnet/cli/admin.py | 46 +++++++++++++++++++ src/warnet/cli/main.py | 39 +++++++++++++--- src/warnet/cli/namespaces.py | 37 +++++++++------ src/warnet/cli/network.py | 38 +++++++++------ 11 files changed, 126 insertions(+), 34 deletions(-) create mode 100644 resources/charts/__init__.py create mode 100644 resources/namespaces/__init__.py rename namespaces/two_namespaces_two_users/defaults.yaml => resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml (100%) rename {namespaces => resources/namespaces}/two_namespaces_two_users/namespaces.yaml (100%) rename {networks => resources/networks}/6_node_bitcoin/network.yaml (100%) rename networks/6_node_bitcoin/defaults.yaml => resources/networks/6_node_bitcoin/node-defaults.yaml (100%) create mode 100644 resources/networks/__init__.py create mode 100644 src/warnet/cli/admin.py diff --git a/resources/charts/__init__.py b/resources/charts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/resources/namespaces/__init__.py b/resources/namespaces/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/namespaces/two_namespaces_two_users/defaults.yaml b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml similarity index 100% rename from namespaces/two_namespaces_two_users/defaults.yaml rename to resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml diff --git a/namespaces/two_namespaces_two_users/namespaces.yaml b/resources/namespaces/two_namespaces_two_users/namespaces.yaml similarity index 100% rename from namespaces/two_namespaces_two_users/namespaces.yaml rename to resources/namespaces/two_namespaces_two_users/namespaces.yaml diff --git a/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml similarity index 100% rename from networks/6_node_bitcoin/network.yaml rename to resources/networks/6_node_bitcoin/network.yaml diff --git a/networks/6_node_bitcoin/defaults.yaml b/resources/networks/6_node_bitcoin/node-defaults.yaml similarity index 100% rename from networks/6_node_bitcoin/defaults.yaml rename to resources/networks/6_node_bitcoin/node-defaults.yaml diff --git a/resources/networks/__init__.py b/resources/networks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/warnet/cli/admin.py b/src/warnet/cli/admin.py new file mode 100644 index 000000000..224e17b43 --- /dev/null +++ b/src/warnet/cli/admin.py @@ -0,0 +1,46 @@ +import os +from pathlib import Path +import click +from rich import print as richprint + +from .namespaces import copy_namespaces_defaults +from .network import copy_network_defaults + + +@click.group(name="admin", hidden=True) +def admin(): + """Admin commands for warnet project management""" + pass + + +@admin.command() +@click.argument("directory", type=Path) +def create(directory): + """Create a new warnet project in the specified directory""" + if os.path.exists(directory): + richprint(f"[red]Error: Directory {directory} already exists[/red]") + return + + copy_network_defaults(directory) + copy_namespaces_defaults(directory) + richprint( + f"[green]Copied network and namespace example files to {directory / 'networks'}[/green]" + ) + richprint(f"[green]Created warnet project structure in {directory}[/green]") + + +@admin.command() +def init(): + """Initialize a warnet project in the current directory""" + current_dir = os.getcwd() + if os.listdir(current_dir): + richprint("[yellow]Warning: Current directory is not empty[/yellow]") + if not click.confirm("Do you want to continue?", default=True): + return + + copy_network_defaults(Path(current_dir)) + copy_namespaces_defaults(Path(current_dir)) + richprint( + f"[green]Copied network and namespace example files to {Path(current_dir) / 'networks'}[/green]" + ) + richprint(f"[green]Created warnet project structure in {current_dir}[/green]") diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index b09738675..780202f3d 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -1,17 +1,15 @@ import os import subprocess from importlib.resources import files - +from pathlib import Path import click from rich import print as richprint - +from .admin import admin from .bitcoin import bitcoin from .graph import graph from .image import image - -# from .ln import ln from .namespaces import namespaces -from .network import network +from .network import network, copy_network_defaults from .scenarios import scenarios QUICK_START_PATH = files("scripts").joinpath("quick_start.sh") @@ -25,10 +23,10 @@ def cli(): cli.add_command(bitcoin) cli.add_command(graph) cli.add_command(image) -# cli.add_command(ln) cli.add_command(namespaces) cli.add_command(network) cli.add_command(scenarios) +cli.add_command(admin) @cli.command(name="help") @@ -96,5 +94,34 @@ def setup(): return False +@cli.command() +@click.argument("directory", type=Path) +def create(directory: Path): + """Create a new warnet project in the specified directory""" + full_path = Path() + full_path = directory if directory.is_absolute() else directory.resolve() + if os.path.exists(directory): + richprint(f"[red]Error: Directory {full_path} already exists[/red]") + return + + copy_network_defaults(full_path) + richprint(f"[green]Copied network example files to {full_path / 'networks'}[/green]") + richprint(f"[green]Created warnet project structure in {full_path}[/green]") + + +@cli.command() +def init(): + """Initialize a warnet project in the current directory""" + current_dir = os.getcwd() + if os.listdir(current_dir): + richprint("[yellow]Warning: Current directory is not empty[/yellow]") + if not click.confirm("Do you want to continue?", default=True): + return + + copy_network_defaults(current_dir) + richprint(f"[green]Copied network example files to {Path(current_dir) / 'networks'}[/green]") + richprint(f"[green]Created warnet project structure in {current_dir}[/green]") + + if __name__ == "__main__": cli() diff --git a/src/warnet/cli/namespaces.py b/src/warnet/cli/namespaces.py index 7c208cb94..f1fc31429 100644 --- a/src/warnet/cli/namespaces.py +++ b/src/warnet/cli/namespaces.py @@ -1,35 +1,43 @@ -import os import tempfile from pathlib import Path +import shutil +from importlib.resources import files import click import yaml from .process import run_command, stream_command +WARNET_NAMESPACES_DIR = files("namespaces") NAMESPACES_DIR = Path("namespaces") -DEFAULT_NAMESPACES = "two_namespaces_two_users" +DEFAULT_NAMESPACES = Path("two_namespaces_two_users") NAMESPACES_FILE = "namespaces.yaml" -DEFAULTS_FILE = "defaults.yaml" +DEFAULTS_FILE = "namespace-defaults.yaml" HELM_COMMAND = "helm upgrade --install" -BITCOIN_CHART_LOCATION = "./resources/charts/namespaces" +BITCOIN_CHART_LOCATION = Path(str(files("charts").joinpath("namespaces"))) +def copy_namespaces_defaults(directory: Path): + """Create the project structure for a warnet project""" + (directory / NAMESPACES_DIR / DEFAULT_NAMESPACES).mkdir(parents=True, exist_ok=True) + target_namespaces_defaults = directory / NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE + target_namespaces_example = directory / NAMESPACES_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE + shutil.copy2(WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE, target_namespaces_defaults) + shutil.copy2(WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES/ NAMESPACES_FILE, target_namespaces_example) + @click.group(name="namespaces") def namespaces(): """Namespaces commands""" @namespaces.command() -@click.argument("namespaces", default=DEFAULT_NAMESPACES) -def deploy(namespaces: str): +@click.argument("namespaces_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), default=NAMESPACES_DIR / DEFAULT_NAMESPACES) +def deploy(namespaces_dir: Path): """Deploy namespaces with users from a """ - full_path = os.path.join(NAMESPACES_DIR, namespaces) - namespaces_file_path = os.path.join(full_path, NAMESPACES_FILE) - defaults_file_path = os.path.join(full_path, DEFAULTS_FILE) + namespaces_file_path = namespaces_dir / NAMESPACES_FILE + defaults_file_path = namespaces_dir / DEFAULTS_FILE - namespaces_file = {} - with open(namespaces_file_path) as f: + with namespaces_file_path.open() as f: namespaces_file = yaml.safe_load(f) # validate names before deploying @@ -44,7 +52,7 @@ def deploy(namespaces: str): for namespace in namespaces_file["namespaces"]: print(f"Deploying namespace: {namespace.get('name')}") try: - temp_override_file_path = "" + temp_override_file_path = Path() namespace_name = namespace.get("name") # all the keys apart from name namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} @@ -58,7 +66,7 @@ def deploy(namespaces: str): mode="w", suffix=".yaml", delete=False ) as temp_file: yaml.dump(namespace_config_override, temp_file) - temp_override_file_path = temp_file.name + temp_override_file_path = Path(temp_file.name) cmd = f"{cmd} -f {temp_override_file_path}" if not stream_command(cmd): @@ -67,6 +75,9 @@ def deploy(namespaces: str): except Exception as e: print(f"Error: {e}") return + finally: + if temp_override_file_path.exists(): + temp_override_file_path.unlink() @namespaces.command() diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 97bb3ead9..b4ff05642 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -3,6 +3,7 @@ import tempfile from importlib.resources import files from pathlib import Path +import shutil import click import yaml @@ -13,12 +14,13 @@ from .process import stream_command WAR_MANIFESTS = files("manifests") +WARNET_NETWORK_DIR = files("networks") NETWORK_DIR = Path("networks") -DEFAULT_NETWORK = "6_node_bitcoin" +DEFAULT_NETWORK = Path("6_node_bitcoin") NETWORK_FILE = "network.yaml" -DEFAULTS_FILE = "defaults.yaml" +DEFAULTS_FILE = "node-defaults.yaml" HELM_COMMAND = "helm upgrade --install --create-namespace" -BITCOIN_CHART_LOCATION = "./resources/charts/bitcoincore" +BITCOIN_CHART_LOCATION = str(files("charts").joinpath("bitcoincore")) @click.group(name="network") @@ -63,24 +65,30 @@ def setup_logging_helm() -> bool: return True +def copy_network_defaults(directory: Path): + """Create the project structure for a warnet project""" + (directory / NETWORK_DIR / DEFAULT_NETWORK).mkdir(parents=True, exist_ok=True) + target_network_defaults = directory / NETWORK_DIR / DEFAULT_NETWORK / DEFAULTS_FILE + target_network_example = directory / NETWORK_DIR / DEFAULT_NETWORK / NETWORK_FILE + shutil.copy2(WARNET_NETWORK_DIR / DEFAULT_NETWORK / DEFAULTS_FILE, target_network_defaults) + shutil.copy2(WARNET_NETWORK_DIR / DEFAULT_NETWORK / NETWORK_FILE, target_network_example) + + @network.command() -@click.argument("network_name", default=DEFAULT_NETWORK) -@click.option("--network", default="warnet", show_default=True) +@click.argument("network_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), default=Path(NETWORK_DIR) / DEFAULT_NETWORK) @click.option("--logging/--no-logging", default=False) -def start(network_name: str, logging: bool, network: str): - """Start a warnet with topology loaded from into [network]""" - full_path = os.path.join(NETWORK_DIR, network_name) - network_file_path = os.path.join(full_path, NETWORK_FILE) - defaults_file_path = os.path.join(full_path, DEFAULTS_FILE) - - network_file = {} - with open(network_file_path) as f: +def deploy(network_dir: Path, logging: bool): + """Deploy a warnet with topology loaded from """ + network_file_path = network_dir / NETWORK_FILE + defaults_file_path = network_dir / DEFAULTS_FILE + + with network_file_path.open() as f: network_file = yaml.safe_load(f) namespace = get_default_namespace() for node in network_file["nodes"]: - print(f"Starting node: {node.get('name')}") + print(f"Deploying node: {node.get('name')}") try: temp_override_file_path = "" node_name = node.get("name") @@ -94,7 +102,7 @@ def start(network_name: str, logging: bool, network: str): mode="w", suffix=".yaml", delete=False ) as temp_file: yaml.dump(node_config_override, temp_file) - temp_override_file_path = temp_file.name + temp_override_file_path = Path(temp_file.name) cmd = f"{cmd} -f {temp_override_file_path}" if not stream_command(cmd): From 2167a25dc7c69897758653566dc5fcf79a1f8b4c Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 18:30:05 +0200 Subject: [PATCH 098/710] scenarios test --- src/.DS_Store | Bin 6148 -> 0 bytes src/warnet/cli/scenarios.py | 36 +++++++++++++++++++++--------- src/warnet/scenarios/commander.py | 8 +++++-- test/scenarios_test.py | 8 +++---- 4 files changed, 35 insertions(+), 17 deletions(-) delete mode 100644 src/.DS_Store diff --git a/src/.DS_Store b/src/.DS_Store deleted file mode 100644 index de7b43e8b97cfee2d79f475b5fdeb8cabe270ba2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5Z-NTyQK&@sCW!`E!g58;w8lT0!H+pQWFw17_%i!&7l->)EDwmd>&_Z zH-}*GC}L+|_nV!c-OLBsAI2DW7vY#Oi!mmkA#zk21kH`EmI+4WYL1BIK{1~Ni3}Ph z`imy~_AV=!&oY*>`uBeXvm~D9qtPdCwL2T#p6H2zxc8o9;pKk5n7e*7(vM0O`L|ZaOENm=PK9J4$&9=skO6Qo*W$y?eS`8Ywh&# zV72Ot?Y;f8%jt9SlFBztBL~KnY#OZK9h6!{uU?iWGJOPll~ctM5(C5lF+dEgBLn6% z5QB9ro@yorh=Cs&!2Q97hUge9HL9%xI=nukzk`SZI=&?kZG(=%QX_akxK0JssoXp< zxK0PVZQ>k*rAD33xLO(JF)LS(7p_(ZyRE_*cQjH@3=ji#2HJXP;rV|7zf9vJzn(%P zVt^R= start + target_blocks From 22c1df24afc9ab5910e960ceb7f64cbccd5b537a Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 18:32:32 +0200 Subject: [PATCH 099/710] start -> deploy --- test/rpc_test.py | 2 +- test/scenarios_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/rpc_test.py b/test/rpc_test.py index ae08a57bb..5700b6647 100755 --- a/test/rpc_test.py +++ b/test/rpc_test.py @@ -24,7 +24,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.network_dir}")) + self.log.info(self.warcli(f"network deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 1b380c5a9..ed5b70cb1 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -24,7 +24,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.network_dir}")) + self.log.info(self.warcli(f"network deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() From 600e9ebf50c3325448684185d039a35c331a9883 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 18:37:43 +0200 Subject: [PATCH 100/710] defaults -> node-defaults --- test/data/12_node_ring/{defaults.yaml => node-defaults.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/data/12_node_ring/{defaults.yaml => node-defaults.yaml} (100%) diff --git a/test/data/12_node_ring/defaults.yaml b/test/data/12_node_ring/node-defaults.yaml similarity index 100% rename from test/data/12_node_ring/defaults.yaml rename to test/data/12_node_ring/node-defaults.yaml From 2ce12240fe7e20542460cdaad00dce3fe44737b6 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 18:41:00 +0200 Subject: [PATCH 101/710] ruff forammting --- src/warnet/cli/admin.py | 1 + src/warnet/cli/main.py | 4 +++- src/warnet/cli/namespaces.py | 19 ++++++++++++++----- src/warnet/cli/network.py | 9 ++++++--- src/warnet/cli/scenarios.py | 2 +- src/warnet/scenarios/commander.py | 6 +++--- 6 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/warnet/cli/admin.py b/src/warnet/cli/admin.py index 224e17b43..332dd77eb 100644 --- a/src/warnet/cli/admin.py +++ b/src/warnet/cli/admin.py @@ -1,5 +1,6 @@ import os from pathlib import Path + import click from rich import print as richprint diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index 780202f3d..d60fa987d 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -2,14 +2,16 @@ import subprocess from importlib.resources import files from pathlib import Path + import click from rich import print as richprint + from .admin import admin from .bitcoin import bitcoin from .graph import graph from .image import image from .namespaces import namespaces -from .network import network, copy_network_defaults +from .network import copy_network_defaults, network from .scenarios import scenarios QUICK_START_PATH = files("scripts").joinpath("quick_start.sh") diff --git a/src/warnet/cli/namespaces.py b/src/warnet/cli/namespaces.py index f1fc31429..ca825e1b2 100644 --- a/src/warnet/cli/namespaces.py +++ b/src/warnet/cli/namespaces.py @@ -1,7 +1,7 @@ -import tempfile -from pathlib import Path import shutil +import tempfile from importlib.resources import files +from pathlib import Path import click import yaml @@ -22,8 +22,13 @@ def copy_namespaces_defaults(directory: Path): (directory / NAMESPACES_DIR / DEFAULT_NAMESPACES).mkdir(parents=True, exist_ok=True) target_namespaces_defaults = directory / NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE target_namespaces_example = directory / NAMESPACES_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE - shutil.copy2(WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE, target_namespaces_defaults) - shutil.copy2(WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES/ NAMESPACES_FILE, target_namespaces_example) + shutil.copy2( + WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE, target_namespaces_defaults + ) + shutil.copy2( + WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE, target_namespaces_example + ) + @click.group(name="namespaces") def namespaces(): @@ -31,7 +36,11 @@ def namespaces(): @namespaces.command() -@click.argument("namespaces_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), default=NAMESPACES_DIR / DEFAULT_NAMESPACES) +@click.argument( + "namespaces_dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), + default=NAMESPACES_DIR / DEFAULT_NAMESPACES, +) def deploy(namespaces_dir: Path): """Deploy namespaces with users from a """ namespaces_file_path = namespaces_dir / NAMESPACES_FILE diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index b4ff05642..74542c648 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -1,9 +1,8 @@ import json -import os +import shutil import tempfile from importlib.resources import files from pathlib import Path -import shutil import click import yaml @@ -75,7 +74,11 @@ def copy_network_defaults(directory: Path): @network.command() -@click.argument("network_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), default=Path(NETWORK_DIR) / DEFAULT_NETWORK) +@click.argument( + "network_dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), + default=Path(NETWORK_DIR) / DEFAULT_NETWORK, +) @click.option("--logging/--no-logging", default=False) def deploy(network_dir: Path, logging: bool): """Deploy a warnet with topology loaded from """ diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index 9f6dc37f3..370b93ce9 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -14,7 +14,7 @@ from warnet import scenarios as SCENARIOS -from .k8s import apply_kubernetes_yaml, get_mission, get_default_namespace +from .k8s import apply_kubernetes_yaml, get_default_namespace, get_mission @click.group(name="scenarios") diff --git a/src/warnet/scenarios/commander.py b/src/warnet/scenarios/commander.py index c0dd5097c..2c2adf882 100644 --- a/src/warnet/scenarios/commander.py +++ b/src/warnet/scenarios/commander.py @@ -22,12 +22,12 @@ from test_framework.util import PortSeed, get_rpc_proxy WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" -WARNET = [] + try: with open(WARNET_FILE) as file: WARNET = json.load(file) -except: - pass +except Exception: + WARNET = [] # Ensure that all RPC calls are made with brand new http connections From b705623abe41b5eacdaf465fc77dde0e45d589a9 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 18:41:30 +0200 Subject: [PATCH 102/710] run scenarios test --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7d714ecce..d021cb045 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [ rpc_test.py ] + test: [scenarios_test.py, rpc_test.py ] steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 From cb9da6532b634d57c6dccd4b19d8cf1688c618e7 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 20:03:09 +0200 Subject: [PATCH 103/710] more logs --- test/scenarios_test.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/test/scenarios_test.py b/test/scenarios_test.py index ed5b70cb1..7f126e21b 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -8,7 +8,7 @@ from warnet.cli.k8s import delete_pod from warnet.cli.scenarios import _active as scenarios_active from warnet.cli.scenarios import _available as scenarios_available - +from warnet.cli.process import run_command class ScenariosTest(TestBase): def __init__(self): @@ -79,6 +79,17 @@ def run_and_check_miner_scenario_from_file(self): def check_blocks(self, target_blocks, start: int = 0): count = int(self.warcli("bitcoin rpc tank-0000 getblockcount")) self.log.debug(f"Current block count: {count}, target: {start + target_blocks}") + + try: + active = scenarios_active() + commander = active[0]["commander"] + command = f"kubectl logs {commander}" + print("\ncommander output:") + print(run_command(command)) + print("\n") + except Exception: + pass + return count >= start + target_blocks def stop_scenario(self): From 2f12e7e55219f30c114c66e4188c52b557c46040 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 20:15:12 +0200 Subject: [PATCH 104/710] ruff lint --- src/warnet/cli/network.py | 16 +++++++--------- test/scenarios_test.py | 3 ++- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 74542c648..6a2fee53b 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -126,20 +126,18 @@ def down(): print("Warnet logging deleted") else: print("Warnet logging NOT deleted") + tanks = get_mission("tank") + for tank in tanks: + cmd = f"helm uninstall {tank.metadata.name}" + stream_command(cmd) + # Clean up scenarios and other pods + # TODO: scenarios should be helm-ified as well pods = get_pods() for pod in pods.items: - cmd = f"helm uninstall {pod.metadata.name}" + cmd = f"kubectl delete pod {pod.metadata.name}" stream_command(cmd) -@network.command() -@click.option("--follow", "-f", is_flag=True, help="Follow logs") -def logs(follow: bool): - """Get Kubernetes logs from the RPC server""" - command = f"kubectl logs rpc-0{' --follow' if follow else ''}" - stream_command(command) - - @network.command() def connected(): """Determine if all p2p connections defined in graph are established""" diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 7f126e21b..b5b940338 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -6,9 +6,10 @@ from test_base import TestBase from warnet.cli.k8s import delete_pod +from warnet.cli.process import run_command from warnet.cli.scenarios import _active as scenarios_active from warnet.cli.scenarios import _available as scenarios_available -from warnet.cli.process import run_command + class ScenariosTest(TestBase): def __init__(self): From ba3c15e20d9fda39f2b87e3fa75927ddf9890e72 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 21:23:43 +0200 Subject: [PATCH 105/710] no default files --- src/warnet/cli/namespaces.py | 4 +--- src/warnet/cli/network.py | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/warnet/cli/namespaces.py b/src/warnet/cli/namespaces.py index ca825e1b2..83c433bc9 100644 --- a/src/warnet/cli/namespaces.py +++ b/src/warnet/cli/namespaces.py @@ -37,9 +37,7 @@ def namespaces(): @namespaces.command() @click.argument( - "namespaces_dir", - type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), - default=NAMESPACES_DIR / DEFAULT_NAMESPACES, + "namespaces_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path) ) def deploy(namespaces_dir: Path): """Deploy namespaces with users from a """ diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 6a2fee53b..8c99a2f98 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -77,7 +77,6 @@ def copy_network_defaults(directory: Path): @click.argument( "network_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), - default=Path(NETWORK_DIR) / DEFAULT_NETWORK, ) @click.option("--logging/--no-logging", default=False) def deploy(network_dir: Path, logging: bool): From 5e590853ec5e59e05bacb03af3c0959a49473a7a Mon Sep 17 00:00:00 2001 From: josibake Date: Thu, 22 Aug 2024 22:17:23 +0200 Subject: [PATCH 106/710] move namespaces out of warcli and into warcli admin --- src/warnet/cli/admin.py | 4 ++-- src/warnet/cli/main.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/warnet/cli/admin.py b/src/warnet/cli/admin.py index 332dd77eb..e843bb030 100644 --- a/src/warnet/cli/admin.py +++ b/src/warnet/cli/admin.py @@ -4,7 +4,7 @@ import click from rich import print as richprint -from .namespaces import copy_namespaces_defaults +from .namespaces import namespaces, copy_namespaces_defaults from .network import copy_network_defaults @@ -13,7 +13,7 @@ def admin(): """Admin commands for warnet project management""" pass - +admin.add_command(namespaces) @admin.command() @click.argument("directory", type=Path) def create(directory): diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index d60fa987d..a87068aac 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -25,7 +25,6 @@ def cli(): cli.add_command(bitcoin) cli.add_command(graph) cli.add_command(image) -cli.add_command(namespaces) cli.add_command(network) cli.add_command(scenarios) cli.add_command(admin) From e1f74d181938a07e7c1143cb4bafab823801cf92 Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 07:03:41 -0500 Subject: [PATCH 107/710] add ten semi unconnected nodes yaml --- test/data/ten_semi_unconnected/defaults.yaml | 4 +++ test/data/ten_semi_unconnected/network.yaml | 31 ++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 test/data/ten_semi_unconnected/defaults.yaml create mode 100644 test/data/ten_semi_unconnected/network.yaml diff --git a/test/data/ten_semi_unconnected/defaults.yaml b/test/data/ten_semi_unconnected/defaults.yaml new file mode 100644 index 000000000..7e021cad1 --- /dev/null +++ b/test/data/ten_semi_unconnected/defaults.yaml @@ -0,0 +1,4 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" diff --git a/test/data/ten_semi_unconnected/network.yaml b/test/data/ten_semi_unconnected/network.yaml new file mode 100644 index 000000000..5071de9c4 --- /dev/null +++ b/test/data/ten_semi_unconnected/network.yaml @@ -0,0 +1,31 @@ +nodes: + - name: tank-0000 + config: | + debug=rpc + debug=validation + - name: tank-0001 + config: | + debug=net + debug=validation + - name: tank-0002 + config: | + debug=validation + - name: tank-0003 + config: | + debug=validation + - name: tank-0004 + - name: tank-0005 + config: | + debug=validation + - name: tank-0006 + - name: tank-0007 + config: | + debug=validation + - name: tank-0008 + connect: + - tank-0009 + config: | + debug=validation + - name: tank-0009 + config: | + debug=validation From 798f57e77396bdd691c49aa02ed5c1404a9b8d81 Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 07:03:55 -0500 Subject: [PATCH 108/710] update ten semi unconnected path --- test/dag_connection_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index a034a1818..ab2188cf0 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -9,8 +9,8 @@ class DAGConnectionTest(TestBase): def __init__(self): super().__init__() - self.graph_file_path = ( - Path(os.path.dirname(__file__)) / "data" / "ten_semi_unconnected.graphml" + self.network_dir = ( + Path(os.path.dirname(__file__)) / "data" / "ten_semi_unconnected" ) def run_test(self): @@ -22,7 +22,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) + self.log.info(self.warcli(f"network start {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() From 144484b0d2539a0cfc54bdafea95a11e3f87a8bb Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 09:39:25 -0500 Subject: [PATCH 109/710] get rid of old dag graphml --- test/data/ten_semi_unconnected.graphml | 101 ------------------------- 1 file changed, 101 deletions(-) delete mode 100644 test/data/ten_semi_unconnected.graphml diff --git a/test/data/ten_semi_unconnected.graphml b/test/data/ten_semi_unconnected.graphml deleted file mode 100644 index c2277407c..000000000 --- a/test/data/ten_semi_unconnected.graphml +++ /dev/null @@ -1,101 +0,0 @@ - - - - - - - - - - - - - - - - - - - - 26.0 - - - - False - False - - - bitcoindevproject/bitcoin:26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - 26.0 - - - - False - False - - - - From 3ab8516af37c915aced30d8b1e913d6a9895760f Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 10:28:19 -0500 Subject: [PATCH 110/710] add placeholder assertion --- test/data/scenario_connect_dag.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/data/scenario_connect_dag.py b/test/data/scenario_connect_dag.py index 47f1ac247..039f161c3 100644 --- a/test/data/scenario_connect_dag.py +++ b/test/data/scenario_connect_dag.py @@ -93,6 +93,9 @@ def run_test(self): self.assert_connection(eight_peers, 9, ConnectionType.DNS) self.assert_connection(nine_peers, 8, ConnectionType.IP) + # TODO: This needs to cause the test to fail + # assert False + self.log.info( f"Successfully ran the connect_dag.py scenario using a temporary file: " f"{os.path.basename(__file__)} " From d680ef2a63290df54276443df78b034fa8865a69 Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 13:01:42 -0500 Subject: [PATCH 111/710] make ruff format happy --- test/dag_connection_test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index ab2188cf0..0a8480a42 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -9,9 +9,7 @@ class DAGConnectionTest(TestBase): def __init__(self): super().__init__() - self.network_dir = ( - Path(os.path.dirname(__file__)) / "data" / "ten_semi_unconnected" - ) + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "ten_semi_unconnected" def run_test(self): try: From 2b4fda44985b85d5605944049d6d34abb73d179e Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 13:06:41 -0500 Subject: [PATCH 112/710] rename to node-default.yaml get rid of previous defaults file --- .../ten_semi_unconnected/{defaults.yaml => node-defaults.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/data/ten_semi_unconnected/{defaults.yaml => node-defaults.yaml} (100%) diff --git a/test/data/ten_semi_unconnected/defaults.yaml b/test/data/ten_semi_unconnected/node-defaults.yaml similarity index 100% rename from test/data/ten_semi_unconnected/defaults.yaml rename to test/data/ten_semi_unconnected/node-defaults.yaml From 67f694ac2a949488d26a9ed3e87c5dfa6fdf959b Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 13:06:47 -0500 Subject: [PATCH 113/710] change to `network deploy` --- test/dag_connection_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 0a8480a42..4982de0ca 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -20,7 +20,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.network_dir}")) + self.log.info(self.warcli(f"network deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() From 48d89918f2584c678a08f1457307262b46c71ab4 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 22 Aug 2024 22:33:47 +0200 Subject: [PATCH 114/710] dag connection test passing --- .github/workflows/test.yml | 2 +- src/warnet/cli/admin.py | 5 ++- src/warnet/cli/main.py | 1 - src/warnet/scenarios/commander.py | 64 +++++++++++-------------------- 4 files changed, 27 insertions(+), 45 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d021cb045..f14e54192 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [scenarios_test.py, rpc_test.py ] + test: [scenarios_test.py, rpc_test.py, dag_connection_test.py ] steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/src/warnet/cli/admin.py b/src/warnet/cli/admin.py index e843bb030..89bd6252a 100644 --- a/src/warnet/cli/admin.py +++ b/src/warnet/cli/admin.py @@ -4,7 +4,7 @@ import click from rich import print as richprint -from .namespaces import namespaces, copy_namespaces_defaults +from .namespaces import copy_namespaces_defaults, namespaces from .network import copy_network_defaults @@ -13,7 +13,10 @@ def admin(): """Admin commands for warnet project management""" pass + admin.add_command(namespaces) + + @admin.command() @click.argument("directory", type=Path) def create(directory): diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index a87068aac..e43c22002 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -10,7 +10,6 @@ from .bitcoin import bitcoin from .graph import graph from .image import image -from .namespaces import namespaces from .network import copy_network_defaults, network from .scenarios import scenarios diff --git a/src/warnet/scenarios/commander.py b/src/warnet/scenarios/commander.py index 2c2adf882..e6fdde71d 100644 --- a/src/warnet/scenarios/commander.py +++ b/src/warnet/scenarios/commander.py @@ -1,6 +1,5 @@ import argparse import configparser -import ipaddress import json import logging import os @@ -97,6 +96,7 @@ def setup(self): cwd=self.options.tmpdir, coverage_dir=self.options.coveragedir, ) + node.tank = tank["tank"] node.rpc = get_rpc_proxy( f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", i, @@ -306,83 +306,63 @@ def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool """ from_connection = self.nodes[a] to_connection = self.nodes[b] - - to_ip_port = self.warnet.tanks[b].get_dns_addr() - from_ip_port = self.warnet.tanks[a].get_ip_addr() + from_num_peers = 1 + len(from_connection.getpeerinfo()) + to_num_peers = 1 + len(to_connection.getpeerinfo()) + ip_port = self.nodes[b].rpchost + ":18444" if peer_advertises_v2 is None: peer_advertises_v2 = self.options.v2transport if peer_advertises_v2: - from_connection.addnode(node=to_ip_port, command="onetry", v2transport=True) + from_connection.addnode(node=ip_port, command="onetry", v2transport=True) else: # skip the optional third argument (default false) for # compatibility with older clients - from_connection.addnode(to_ip_port, "onetry") + from_connection.addnode(ip_port, "onetry") if not wait_for_connect: return - def get_peer_ip(peer): - try: # we encounter a regular ip address - ip_addr = str(ipaddress.ip_address(peer["addr"].split(":")[0])) - return ip_addr - except ValueError as err: # or we encounter a service name - try: - # NETWORK-tank-TANK_INDEX-service - # NETWORK-test-TEST-tank-TANK_INDEX-service - tank_index = int(peer["addr"].split("-")[-2]) - except (ValueError, IndexError) as inner_err: - raise ValueError( - "could not derive tank index from service name: {} {}".format( - peer["addr"], inner_err - ) - ) from err - - ip_addr = self.warnet.tanks[tank_index].get_ip_addr() - return ip_addr - # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: # * Must have a version message before anything else # * Must have a verack message before anything else self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["version"] != 0 - for peer in from_connection.getpeerinfo() - ) + lambda: sum(peer["version"] != 0 for peer in from_connection.getpeerinfo()) + == from_num_peers ) self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port and peer["version"] != 0 - for peer in to_connection.getpeerinfo() - ) + lambda: sum(peer["version"] != 0 for peer in to_connection.getpeerinfo()) + == to_num_peers ) self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + lambda: sum( + peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 for peer in from_connection.getpeerinfo() ) + == from_num_peers ) self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port - and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 + lambda: sum( + peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 for peer in to_connection.getpeerinfo() ) + == to_num_peers ) # The message bytes are counted before processing the message, so make # sure it was fully processed by waiting for a ping. self.wait_until( - lambda: any( - peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + lambda: sum( + peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in from_connection.getpeerinfo() ) + == from_num_peers ) self.wait_until( - lambda: any( - get_peer_ip(peer) == from_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 + lambda: sum( + peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in to_connection.getpeerinfo() ) + == to_num_peers ) From 846fc9d72fedbea32c562bdfd5f15c3dd444b68d Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 18:43:41 +0200 Subject: [PATCH 115/710] added prometheus monitoring into bitcoincore chart --- .../charts/bitcoincore/templates/pod.yaml | 23 +++++++++++++++++++ .../charts/bitcoincore/templates/service.yaml | 4 ++++ .../bitcoincore/templates/servicemonitor.yaml | 15 ++++++++++++ resources/charts/bitcoincore/values.yaml | 4 ++++ .../networks/6_node_bitcoin/network.yaml | 1 - src/warnet/cli/network.py | 3 +++ test/logging_test.py | 2 +- 7 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 resources/charts/bitcoincore/templates/servicemonitor.yaml diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 88f9fedd2..c31854cd2 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,6 +7,7 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} + tank: {{ include "bitcoincore.fullname" . }} annotations: init_peers: "{{ .Values.connect | len }}" spec: @@ -48,6 +49,28 @@ spec: - mountPath: /root/.bitcoin/bitcoin.conf name: config subPath: bitcoin.conf + {{- if .Values.metricsExport }} + - name: prometheus + image: bitcoindevproject/bitcoin-exporter:latest + imagePullPolicy: IfNotPresent + ports: + - name: prom-metrics + containerPort: {{ .Values.prometheusMetricsPort }} + protocol: TCP + env: + - name: BITCOIN_RPC_HOST + value: "127.0.0.1" + - name: BITCOIN_RPC_PORT + value: "{{ .Values.regtest.RPCPort }}" + - name: BITCOIN_RPC_USER + value: user + - name: BITCOIN_RPC_PASSWORD + value: password + {{- if .Values.metrics }} + - name: METRICS + value: {{ .Values.metrics }} + {{- end }} + {{- end}} volumes: {{- with .Values.volumes }} {{- toYaml . | nindent 4 }} diff --git a/resources/charts/bitcoincore/templates/service.yaml b/resources/charts/bitcoincore/templates/service.yaml index f2bb4fdf2..9b2fd3f4d 100644 --- a/resources/charts/bitcoincore/templates/service.yaml +++ b/resources/charts/bitcoincore/templates/service.yaml @@ -23,5 +23,9 @@ spec: targetPort: zmq-block protocol: TCP name: zmq-block + - port: {{ .Values.prometheusMetricsPort }} + targetPort: prom-metrics + protocol: TCP + name: prometheus-metrics selector: {{- include "bitcoincore.selectorLabels" . | nindent 4 }} diff --git a/resources/charts/bitcoincore/templates/servicemonitor.yaml b/resources/charts/bitcoincore/templates/servicemonitor.yaml new file mode 100644 index 000000000..a3969e76b --- /dev/null +++ b/resources/charts/bitcoincore/templates/servicemonitor.yaml @@ -0,0 +1,15 @@ +{{- if .Values.metricsExport }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "bitcoincore.fullname" . }} + labels: + app.kubernetes.io/name: bitcoind-metrics + release: prometheus +spec: + endpoints: + - port: prometheus-metrics + selector: + matchLabels: + tank: {{ include "bitcoincore.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index d22d3c005..aac305ec6 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -106,6 +106,10 @@ affinity: {} chain: regtest +collectLogs: false +metricsExport: false +prometheusMetricsPort: 9332 + regtestConfig: | regtest=1 diff --git a/resources/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml index 5269c6a5a..192f56a4e 100644 --- a/resources/networks/6_node_bitcoin/network.yaml +++ b/resources/networks/6_node_bitcoin/network.yaml @@ -27,5 +27,4 @@ nodes: - name: tank-0005 connect: - tank-0006 - - tank-0007 - name: tank-0006 \ No newline at end of file diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 8c99a2f98..6adab1320 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -154,6 +154,9 @@ def _connected(): manuals += 1 # Even if more edges are specifed, bitcoind only allows # 8 manual outbound connections + + print("manual " + str(manuals)) + print(tank.metadata.annotations["init_peers"]) if min(8, int(tank.metadata.annotations["init_peers"])) > manuals: print("Network not connected") return False diff --git a/test/logging_test.py b/test/logging_test.py index b24826072..bbf9ef0bc 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -54,7 +54,7 @@ def start_logging(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) + self.log.info(self.warcli(f"network deploy")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() From 927e44221503b343c5fe76f29e055c974809e0c4 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 23:04:18 +0200 Subject: [PATCH 116/710] switched logging test to debug rpc --- resources/charts/bitcoincore/templates/pod.yaml | 5 ++++- resources/networks/6_node_bitcoin/node-defaults.yaml | 3 ++- test/logging_test.py | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index c31854cd2..d878388f8 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,7 +7,10 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} - tank: {{ include "bitcoincore.fullname" . }} + tank: {{ include "bitcoincore.fullname" . }} + {{- if .Values.collectLogs }} + collect_logs: "true" + {{- end }} annotations: init_peers: "{{ .Values.connect | len }}" spec: diff --git a/resources/networks/6_node_bitcoin/node-defaults.yaml b/resources/networks/6_node_bitcoin/node-defaults.yaml index 2ff2e554f..a314fda7d 100644 --- a/resources/networks/6_node_bitcoin/node-defaults.yaml +++ b/resources/networks/6_node_bitcoin/node-defaults.yaml @@ -22,4 +22,5 @@ image: tag: "27.0" config: | - dns=1 \ No newline at end of file + dns=1 + debug=rpc \ No newline at end of file diff --git a/test/logging_test.py b/test/logging_test.py index bbf9ef0bc..4419747ae 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -54,7 +54,7 @@ def start_logging(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network deploy")) + self.log.info(self.warcli(f"network deploy ./resources/networks/6_node_bitcoin")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() From 43d5d7d5e03fd312f1abf905caf2f13666d3910b Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 22 Aug 2024 23:52:02 +0200 Subject: [PATCH 117/710] metrics are coming through --- resources/charts/bitcoincore/templates/pod.yaml | 2 +- resources/charts/bitcoincore/templates/service.yaml | 1 + resources/charts/bitcoincore/templates/servicemonitor.yaml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index d878388f8..082ae7961 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,7 +7,7 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} - tank: {{ include "bitcoincore.fullname" . }} + app: {{ include "bitcoincore.fullname" . }} {{- if .Values.collectLogs }} collect_logs: "true" {{- end }} diff --git a/resources/charts/bitcoincore/templates/service.yaml b/resources/charts/bitcoincore/templates/service.yaml index 9b2fd3f4d..728cfb5b4 100644 --- a/resources/charts/bitcoincore/templates/service.yaml +++ b/resources/charts/bitcoincore/templates/service.yaml @@ -4,6 +4,7 @@ metadata: name: {{ include "bitcoincore.fullname" . }} labels: {{- include "bitcoincore.labels" . | nindent 4 }} + app: {{ include "bitcoincore.fullname" . }} spec: type: {{ .Values.service.type }} ports: diff --git a/resources/charts/bitcoincore/templates/servicemonitor.yaml b/resources/charts/bitcoincore/templates/servicemonitor.yaml index a3969e76b..46c7136e0 100644 --- a/resources/charts/bitcoincore/templates/servicemonitor.yaml +++ b/resources/charts/bitcoincore/templates/servicemonitor.yaml @@ -11,5 +11,5 @@ spec: - port: prometheus-metrics selector: matchLabels: - tank: {{ include "bitcoincore.fullname" . }} + app: {{ include "bitcoincore.fullname" . }} {{- end }} \ No newline at end of file From c85403f3fbe1870d44755cc05d457e0a970af8a0 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 23:59:04 +0200 Subject: [PATCH 118/710] temp: disable logging stack --- resources/networks/6_node_bitcoin/node-defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/networks/6_node_bitcoin/node-defaults.yaml b/resources/networks/6_node_bitcoin/node-defaults.yaml index a314fda7d..332541819 100644 --- a/resources/networks/6_node_bitcoin/node-defaults.yaml +++ b/resources/networks/6_node_bitcoin/node-defaults.yaml @@ -1,7 +1,7 @@ chain: regtest collectLogs: true -metricsExport: true +metricsExport: false resources: {} # We usually recommend not to specify default resources and to leave this as a conscious From 6ae3fc3408bfb4adee3215805b0d7d63dfb291a7 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 23 Aug 2024 00:04:44 +0200 Subject: [PATCH 119/710] logging test passing --- src/warnet/cli/network.py | 2 +- test/data/logging.graphml | 35 ---------------------------- test/data/logging/network.yaml | 13 +++++++++++ test/data/logging/node-defaults.yaml | 4 ++++ test/logging_test.py | 4 ++-- 5 files changed, 20 insertions(+), 38 deletions(-) delete mode 100644 test/data/logging.graphml create mode 100644 test/data/logging/network.yaml create mode 100644 test/data/logging/node-defaults.yaml diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py index 6adab1320..1b4b1752d 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/cli/network.py @@ -154,7 +154,7 @@ def _connected(): manuals += 1 # Even if more edges are specifed, bitcoind only allows # 8 manual outbound connections - + print("manual " + str(manuals)) print(tank.metadata.annotations["init_peers"]) if min(8, int(tank.metadata.annotations["init_peers"])) > manuals: diff --git a/test/data/logging.graphml b/test/data/logging.graphml deleted file mode 100644 index 54b3b73cb..000000000 --- a/test/data/logging.graphml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - - - - - - - 27.0 - true - - - 27.0 - true - txrate=getchaintxstats(10)["txrate"] - - - 27.0 - - - - - - diff --git a/test/data/logging/network.yaml b/test/data/logging/network.yaml new file mode 100644 index 000000000..59de12158 --- /dev/null +++ b/test/data/logging/network.yaml @@ -0,0 +1,13 @@ +nodes: + - name: tank-0000 + connect: + - tank-0002 + metricsExport: true + - name: tank-0001 + connect: + - tank-0002 + metricsExport: true + metrics: txrate=getchaintxstats(10)["txrate"] + - name: tank-0002 + connect: + - tank-0000 \ No newline at end of file diff --git a/test/data/logging/node-defaults.yaml b/test/data/logging/node-defaults.yaml new file mode 100644 index 000000000..7e021cad1 --- /dev/null +++ b/test/data/logging/node-defaults.yaml @@ -0,0 +1,4 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" diff --git a/test/logging_test.py b/test/logging_test.py index 4419747ae..c19fc888f 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -14,7 +14,7 @@ class LoggingTest(TestBase): def __init__(self): super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "logging.graphml" + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "logging" self.scripts_dir = Path(os.path.dirname(__file__)) / ".." / "resources" / "scripts" self.connect_logging_process = None self.connect_logging_thread = None @@ -54,7 +54,7 @@ def start_logging(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network deploy ./resources/networks/6_node_bitcoin")) + self.log.info(self.warcli(f"network deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() From 6bafb2c2cee463f40b115f52cbbcb8a6bf9c2187 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 23 Aug 2024 00:06:14 +0200 Subject: [PATCH 120/710] ci run logging tes --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f14e54192..b82474a17 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: strategy: matrix: # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [scenarios_test.py, rpc_test.py, dag_connection_test.py ] + test: [scenarios_test.py, rpc_test.py, dag_connection_test.py, logging_test.py] steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 From 36083f9bff284bed5a652dd1207097bc61b91e3a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 00:45:44 +0200 Subject: [PATCH 121/710] refactor out from cli dir --- pyproject.toml | 8 ++- .../cli => resources/scenarios}/__init__.py | 0 .../scenarios/commander.py | 0 .../warnet => resources}/scenarios/ln_init.py | 5 +- .../scenarios/miner_std.py | 5 +- .../scenarios/sens_relay.py | 5 +- .../scenarios/tx_flood.py | 5 +- src/warnet/{cli => }/admin.py | 0 src/warnet/{cli => }/bitcoin.py | 0 src/warnet/{cli => }/graph.py | 0 src/warnet/{cli => }/image.py | 0 src/warnet/{cli => }/image_build.py | 2 +- src/warnet/{cli => }/k8s.py | 2 - src/warnet/{cli => }/ln.py | 0 src/warnet/{cli => }/main.py | 2 +- src/warnet/{cli => }/namespaces.py | 4 +- src/warnet/{cli => }/network.py | 8 +-- src/warnet/{cli => }/process.py | 0 src/warnet/{cli => }/scenarios.py | 52 +++++++++++-------- src/warnet/scenarios/__init__.py | 0 src/warnet/{cli => }/util.py | 0 test/data/scenario_p2p_interface.py | 6 ++- test/scenarios_test.py | 10 ++-- test/test_base.py | 6 +-- 24 files changed, 72 insertions(+), 48 deletions(-) rename {src/warnet/cli => resources/scenarios}/__init__.py (100%) rename {src/warnet => resources}/scenarios/commander.py (100%) rename {src/warnet => resources}/scenarios/ln_init.py (98%) rename {src/warnet => resources}/scenarios/miner_std.py (95%) rename {src/warnet => resources}/scenarios/sens_relay.py (91%) rename {src/warnet => resources}/scenarios/tx_flood.py (95%) rename src/warnet/{cli => }/admin.py (100%) rename src/warnet/{cli => }/bitcoin.py (100%) rename src/warnet/{cli => }/graph.py (100%) rename src/warnet/{cli => }/image.py (100%) rename src/warnet/{cli => }/image_build.py (97%) rename src/warnet/{cli => }/k8s.py (97%) rename src/warnet/{cli => }/ln.py (100%) rename src/warnet/{cli => }/main.py (98%) rename src/warnet/{cli => }/namespaces.py (97%) rename src/warnet/{cli => }/network.py (96%) rename src/warnet/{cli => }/process.py (100%) rename src/warnet/{cli => }/scenarios.py (86%) delete mode 100644 src/warnet/scenarios/__init__.py rename src/warnet/{cli => }/util.py (100%) diff --git a/pyproject.toml b/pyproject.toml index 53866fd32..69e2e2b93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ dependencies = [ ] [project.scripts] -warcli = "warnet.cli.main:cli" +warcli = "warnet.main:cli" [project.urls] Homepage = "https://warnet.dev" @@ -42,4 +42,8 @@ build-backend = "setuptools.build_meta" include-package-data = true [tool.setuptools.packages.find] -where = ["src", "resources"] +where = ["src", "."] +include = ["warnet*", "test_framework*", "resources*"] + +[tool.setuptools.package-data] +"resources" = ["**/*"] diff --git a/src/warnet/cli/__init__.py b/resources/scenarios/__init__.py similarity index 100% rename from src/warnet/cli/__init__.py rename to resources/scenarios/__init__.py diff --git a/src/warnet/scenarios/commander.py b/resources/scenarios/commander.py similarity index 100% rename from src/warnet/scenarios/commander.py rename to resources/scenarios/commander.py diff --git a/src/warnet/scenarios/ln_init.py b/resources/scenarios/ln_init.py similarity index 98% rename from src/warnet/scenarios/ln_init.py rename to resources/scenarios/ln_init.py index 4894e1dc0..e915ee721 100644 --- a/src/warnet/scenarios/ln_init.py +++ b/resources/scenarios/ln_init.py @@ -3,7 +3,10 @@ from time import sleep # The base class exists inside the commander container -from commander import Commander +try: + from commander import Commander +except ImportError: + from resources.scenarios.commander import Commander def cli_help(): diff --git a/src/warnet/scenarios/miner_std.py b/resources/scenarios/miner_std.py similarity index 95% rename from src/warnet/scenarios/miner_std.py rename to resources/scenarios/miner_std.py index 1f54f86f9..fcfea9841 100755 --- a/src/warnet/scenarios/miner_std.py +++ b/resources/scenarios/miner_std.py @@ -3,7 +3,10 @@ from time import sleep # The base class exists inside the commander container -from commander import Commander +try: + from commander import Commander +except ImportError: + from resources.scenarios.commander import Commander def cli_help(): diff --git a/src/warnet/scenarios/sens_relay.py b/resources/scenarios/sens_relay.py similarity index 91% rename from src/warnet/scenarios/sens_relay.py rename to resources/scenarios/sens_relay.py index 0fa4ed55a..210b43760 100644 --- a/src/warnet/scenarios/sens_relay.py +++ b/resources/scenarios/sens_relay.py @@ -1,7 +1,10 @@ #!/usr/bin/env python3 # The base class exists inside the commander container -from commander import Commander +try: + from commander import Commander +except ImportError: + from resources.scenarios.commander import Commander def cli_help(): diff --git a/src/warnet/scenarios/tx_flood.py b/resources/scenarios/tx_flood.py similarity index 95% rename from src/warnet/scenarios/tx_flood.py rename to resources/scenarios/tx_flood.py index 69b00b460..5da5f8b53 100755 --- a/src/warnet/scenarios/tx_flood.py +++ b/resources/scenarios/tx_flood.py @@ -4,7 +4,10 @@ from time import sleep # The base class exists inside the commander container -from commander import Commander +try: + from commander import Commander +except ImportError: + from resources.scenarios.commander import Commander def cli_help(): diff --git a/src/warnet/cli/admin.py b/src/warnet/admin.py similarity index 100% rename from src/warnet/cli/admin.py rename to src/warnet/admin.py diff --git a/src/warnet/cli/bitcoin.py b/src/warnet/bitcoin.py similarity index 100% rename from src/warnet/cli/bitcoin.py rename to src/warnet/bitcoin.py diff --git a/src/warnet/cli/graph.py b/src/warnet/graph.py similarity index 100% rename from src/warnet/cli/graph.py rename to src/warnet/graph.py diff --git a/src/warnet/cli/image.py b/src/warnet/image.py similarity index 100% rename from src/warnet/cli/image.py rename to src/warnet/image.py diff --git a/src/warnet/cli/image_build.py b/src/warnet/image_build.py similarity index 97% rename from src/warnet/cli/image_build.py rename to src/warnet/image_build.py index 1cc7864f8..98f502e23 100644 --- a/src/warnet/cli/image_build.py +++ b/src/warnet/image_build.py @@ -3,7 +3,7 @@ ARCHES = ["amd64", "arm64", "armhf"] -dockerfile_path = files("images.bitcoin").joinpath("Dockerfile") +dockerfile_path = files("resources.images.bitcoin").joinpath("Dockerfile") def run_command(command): diff --git a/src/warnet/cli/k8s.py b/src/warnet/k8s.py similarity index 97% rename from src/warnet/cli/k8s.py rename to src/warnet/k8s.py index 9b8f72ba3..22adb96e0 100644 --- a/src/warnet/cli/k8s.py +++ b/src/warnet/k8s.py @@ -1,6 +1,5 @@ import json import tempfile -from importlib.resources import files from pathlib import Path import yaml @@ -10,7 +9,6 @@ from .process import run_command, stream_command -WAR_MANIFESTS = files("manifests") DEFAULT_NAMESPACE = "warnet" diff --git a/src/warnet/cli/ln.py b/src/warnet/ln.py similarity index 100% rename from src/warnet/cli/ln.py rename to src/warnet/ln.py diff --git a/src/warnet/cli/main.py b/src/warnet/main.py similarity index 98% rename from src/warnet/cli/main.py rename to src/warnet/main.py index e43c22002..a79356baa 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/main.py @@ -13,7 +13,7 @@ from .network import copy_network_defaults, network from .scenarios import scenarios -QUICK_START_PATH = files("scripts").joinpath("quick_start.sh") +QUICK_START_PATH = files("resources.scripts").joinpath("quick_start.sh") @click.group() diff --git a/src/warnet/cli/namespaces.py b/src/warnet/namespaces.py similarity index 97% rename from src/warnet/cli/namespaces.py rename to src/warnet/namespaces.py index 83c433bc9..0b14de4dd 100644 --- a/src/warnet/cli/namespaces.py +++ b/src/warnet/namespaces.py @@ -8,13 +8,13 @@ from .process import run_command, stream_command -WARNET_NAMESPACES_DIR = files("namespaces") +WARNET_NAMESPACES_DIR = files("resources").joinpath("namespaces") NAMESPACES_DIR = Path("namespaces") DEFAULT_NAMESPACES = Path("two_namespaces_two_users") NAMESPACES_FILE = "namespaces.yaml" DEFAULTS_FILE = "namespace-defaults.yaml" HELM_COMMAND = "helm upgrade --install" -BITCOIN_CHART_LOCATION = Path(str(files("charts").joinpath("namespaces"))) +BITCOIN_CHART_LOCATION = Path(str(files("resources.charts").joinpath("namespaces"))) def copy_namespaces_defaults(directory: Path): diff --git a/src/warnet/cli/network.py b/src/warnet/network.py similarity index 96% rename from src/warnet/cli/network.py rename to src/warnet/network.py index 1b4b1752d..d160587e3 100644 --- a/src/warnet/cli/network.py +++ b/src/warnet/network.py @@ -12,14 +12,14 @@ from .k8s import delete_namespace, get_default_namespace, get_mission, get_pods from .process import stream_command -WAR_MANIFESTS = files("manifests") -WARNET_NETWORK_DIR = files("networks") -NETWORK_DIR = Path("networks") +WAR_MANIFESTS = files("resources.manifests") +WARNET_NETWORK_DIR = files("resources.networks") +NETWORK_DIR = Path("resources.networks") DEFAULT_NETWORK = Path("6_node_bitcoin") NETWORK_FILE = "network.yaml" DEFAULTS_FILE = "node-defaults.yaml" HELM_COMMAND = "helm upgrade --install --create-namespace" -BITCOIN_CHART_LOCATION = str(files("charts").joinpath("bitcoincore")) +BITCOIN_CHART_LOCATION = str(files("resources.charts").joinpath("bitcoincore")) @click.group(name="network") diff --git a/src/warnet/cli/process.py b/src/warnet/process.py similarity index 100% rename from src/warnet/cli/process.py rename to src/warnet/process.py diff --git a/src/warnet/cli/scenarios.py b/src/warnet/scenarios.py similarity index 86% rename from src/warnet/cli/scenarios.py rename to src/warnet/scenarios.py index 370b93ce9..8ce115444 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/scenarios.py @@ -5,6 +5,7 @@ import sys import tempfile import time +from importlib.resources import files import click import yaml @@ -12,14 +13,12 @@ from rich.console import Console from rich.table import Table -from warnet import scenarios as SCENARIOS - from .k8s import apply_kubernetes_yaml, get_default_namespace, get_mission @click.group(name="scenarios") def scenarios(): - """Manage scenarios on a running network""" + """Manage scenarios on a network""" @scenarios.command() @@ -41,24 +40,22 @@ def available(): def _available(): - # This ugly hack temporarily allows us to import the scenario modules - # in the context in which they run: as __main__ from - # the root directory of the commander container. - scenarios_path = SCENARIOS.__path__ - sys.path.insert(0, scenarios_path[0]) - - scenario_list = [] - for s in pkgutil.iter_modules(scenarios_path): - module_name = f"warnet.scenarios.{s.name}" - try: - m = importlib.import_module(module_name) - if hasattr(m, "cli_help"): - scenario_list.append((s.name, m.cli_help())) - except Exception as e: - print(f"Ignoring module: {module_name} because {e}") - - # Clean up that ugly hack - sys.path.pop(0) + scenarios_dir = files("resources.scenarios") + sys.path.append(scenarios_dir) + + try: + scenario_list = [] + package_name = "resources.scenarios" + for _, name, _ in pkgutil.iter_modules([scenarios_dir]): + module_name = f"{package_name}.{name}" + try: + m = importlib.import_module(module_name) + if hasattr(m, "cli_help"): + scenario_list.append((name, m.cli_help())) + except Exception as e: + print(f"Error importing module {module_name}: {e}") + finally: + sys.path.remove(scenarios_dir) return scenario_list @@ -72,7 +69,7 @@ def run(scenario: str, additional_args: tuple[str]): """ # Use importlib.resources to get the scenario path - scenario_package = "warnet.scenarios" + scenario_package = "resources.scenarios" scenario_filename = f"{scenario}.py" # Ensure the scenario file exists within the package @@ -86,7 +83,7 @@ def run(scenario: str, additional_args: tuple[str]): @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) def run_file(scenario_path: str, additional_args: tuple[str]): """ - Run from the Warnet Test Framework with optional arguments + Start with optional arguments """ if not scenario_path.endswith(".py"): print("Error. Currently only python scenarios are supported") @@ -206,3 +203,12 @@ def active(): def _active() -> list[str]: commanders = get_mission("commander") return [{"commander": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] + + +@scenarios.command() +@click.argument("pid", type=int) +def stop(pid: int): + """ + Stop scenario + """ + pass diff --git a/src/warnet/scenarios/__init__.py b/src/warnet/scenarios/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/warnet/cli/util.py b/src/warnet/util.py similarity index 100% rename from src/warnet/cli/util.py rename to src/warnet/util.py diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index 54c044c7c..27508681f 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -2,7 +2,11 @@ from collections import defaultdict # The base class exists inside the commander container -from commander import Commander +try: + from commander import Commander +except Exception: + from resources.scenarios.commander import Commander + from test_framework.messages import CInv, msg_getdata from test_framework.p2p import P2PInterface diff --git a/test/scenarios_test.py b/test/scenarios_test.py index b5b940338..a56714ecd 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -5,10 +5,10 @@ from test_base import TestBase -from warnet.cli.k8s import delete_pod -from warnet.cli.process import run_command -from warnet.cli.scenarios import _active as scenarios_active -from warnet.cli.scenarios import _available as scenarios_available +from warnet.k8s import delete_pod +from warnet.process import run_command +from warnet.scenarios import _active as scenarios_active +from warnet.scenarios import _available as scenarios_available class ScenariosTest(TestBase): @@ -69,7 +69,7 @@ def run_and_check_miner_scenario(self): self.stop_scenario() def run_and_check_miner_scenario_from_file(self): - scenario_file = "src/warnet/scenarios/miner_std.py" + scenario_file = "resources/scenarios/miner_std.py" self.log.info(f"Running scenario from file: {scenario_file}") self.warcli(f"scenarios run-file {scenario_file} --allnodes --interval=1") start = int(self.warcli("bitcoin rpc tank-0000 getblockcount")) diff --git a/test/test_base.py b/test/test_base.py index 5e7c6953b..4753cf2d7 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -11,9 +11,9 @@ from time import sleep from warnet import SRC_DIR -from warnet.cli.network import _connected as network_connected -from warnet.cli.network import _status as network_status -from warnet.cli.scenarios import _active as scenarios_active +from warnet.network import _connected as network_connected +from warnet.network import _status as network_status +from warnet.scenarios import _active as scenarios_active class TestBase: From 8bc888df181fd6c1dc78be802bff6a7a1fb64eb7 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 10:09:58 +0200 Subject: [PATCH 122/710] remove dup help command `--help` exists natively via Click, use it. --- src/warnet/main.py | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index a79356baa..0ab3f75e4 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -29,42 +29,6 @@ def cli(): cli.add_command(admin) -@cli.command(name="help") -@click.argument("commands", required=False, nargs=-1) -@click.pass_context -def help_command(ctx, commands): - """ - Display help information for the given [command] (and sub-command). - If no command is given, display help for the main CLI. - """ - if not commands: - # Display help for the main CLI - richprint(ctx.parent.get_help()) - return - - # Recurse down the subcommands, fetching the command object for each - cmd_obj = cli - for command in commands: - cmd_obj = cmd_obj.get_command(ctx, command) - if cmd_obj is None: - richprint(f'Unknown command "{command}" in {commands}') - return - ctx = click.Context(cmd_obj, info_name=command, parent=ctx) - - if cmd_obj is None: - richprint(f"Unknown command: {commands}") - return - - # Get the help info - help_info = cmd_obj.get_help(ctx).strip() - # Get rid of the duplication - help_info = help_info.replace("Usage: warcli help [COMMANDS]...", "Usage: warcli", 1) - richprint(help_info) - - -cli.add_command(help_command) - - @cli.command() def setup(): """Check Warnet requirements are installed""" From 0f92fc1eba896765a1b411b80b246650ceb4b7f1 Mon Sep 17 00:00:00 2001 From: Grant Date: Thu, 22 Aug 2024 16:44:21 -0500 Subject: [PATCH 123/710] add auth command --- src/warnet/main.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/src/warnet/main.py b/src/warnet/main.py index 0ab3f75e4..deb1463cd 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -1,9 +1,11 @@ import os import subprocess +import sys from importlib.resources import files from pathlib import Path import click +import yaml from rich import print as richprint from .admin import admin @@ -87,5 +89,42 @@ def init(): richprint(f"[green]Created warnet project structure in {current_dir}[/green]") +@cli.command() +@click.argument("kube_config", type=str) +def auth(kube_config: str) -> None: + """ + Authorize access to a warnet cluster using a kube config file + """ + try: + current_kubeconfig = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) + combined_kubeconfig = ( + f"{current_kubeconfig}:{kube_config}" if current_kubeconfig else kube_config + ) + os.environ["KUBECONFIG"] = combined_kubeconfig + command = "kubectl config view --flatten" + result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True) + except subprocess.CalledProcessError as e: + print("Error occurred while executing kubectl config view --flatten:") + print(e.stderr) + sys.exit(1) + + if result.returncode == 0: + with open(current_kubeconfig, "w") as file: + file.write(result.stdout) + print(f"Authorization file written to: {current_kubeconfig}") + else: + print("Could not create authorization file") + print(result.stderr) + sys.exit(result.returncode) + + with open(current_kubeconfig) as file: + contents = yaml.safe_load(file) + print("\nUse the following command to switch to a new user:") + print(" kubectl config use context [user]\n") + print("Available users:") + for context in contents["contexts"]: + print(f" {context['name']}") + + if __name__ == "__main__": cli() From 85b08dbc96b758db3efef613fd624878258b3c8c Mon Sep 17 00:00:00 2001 From: Grant Date: Fri, 23 Aug 2024 03:56:01 -0500 Subject: [PATCH 124/710] fix docs --- src/warnet/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index deb1463cd..332d6a91b 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -120,7 +120,7 @@ def auth(kube_config: str) -> None: with open(current_kubeconfig) as file: contents = yaml.safe_load(file) print("\nUse the following command to switch to a new user:") - print(" kubectl config use context [user]\n") + print(" kubectl config use-context [user]\n") print("Available users:") for context in contents["contexts"]: print(f" {context['name']}") From 37101e870c8fffc7d4d90378729804738e00c663 Mon Sep 17 00:00:00 2001 From: Grant Date: Fri, 23 Aug 2024 04:01:28 -0500 Subject: [PATCH 125/710] fix api import --- resources/scripts/apidocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index e02cd9939..42607de66 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -7,7 +7,7 @@ from click import Context from tabulate import tabulate -from warnet.cli.main import cli +from warnet.main import cli file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "warcli.md" From ad085eab17b8e4f982392721ba81b24a4f24bcc3 Mon Sep 17 00:00:00 2001 From: josibake Date: Fri, 23 Aug 2024 12:06:00 +0200 Subject: [PATCH 126/710] rough cli re-write --- src/warnet/control.py | 239 ++++++++++++++++++++++++++++++++++++++++++ src/warnet/deploy.py | 106 +++++++++++++++++++ src/warnet/main.py | 12 ++- src/warnet/status.py | 63 +++++++++++ 4 files changed, 416 insertions(+), 4 deletions(-) create mode 100644 src/warnet/control.py create mode 100644 src/warnet/deploy.py create mode 100644 src/warnet/status.py diff --git a/src/warnet/control.py b/src/warnet/control.py new file mode 100644 index 000000000..45af40819 --- /dev/null +++ b/src/warnet/control.py @@ -0,0 +1,239 @@ +import click +from rich import print +from rich.console import Console +from rich.table import Table +import json +import os +import tempfile +import time +import yaml + +from warnet import scenarios as SCENARIOS +from .k8s import get_mission, delete_namespace, get_default_namespace, apply_kubernetes_yaml, get_pods +from .process import run_command, stream_command +from rich.prompt import Prompt, Confirm + + +console = Console() + +def get_active_scenarios(): + """Get list of active scenarios""" + commanders = get_mission("commander") + return [c.metadata.name for c in commanders] + +@click.command() +@click.argument('scenario_name', required=False) +def stop(scenario_name): + """Stop a running scenario or all scenarios""" + active_scenarios = get_active_scenarios() + + if not active_scenarios: + console.print("[bold red]No active scenarios found.[/bold red]") + return + + if not scenario_name: + table = Table(title="Active Scenarios", show_header=True, header_style="bold magenta") + table.add_column("Number", style="cyan", justify="right") + table.add_column("Scenario Name", style="green") + + for idx, name in enumerate(active_scenarios, 1): + table.add_row(str(idx), name) + + console.print(table) + + choices = [str(i) for i in range(1, len(active_scenarios) + 1)] + ['a', 'q'] + choice = Prompt.ask( + "[bold yellow]Enter the number of the scenario to stop, 'a' to stop all, or 'q' to quit[/bold yellow]", + choices=choices, + show_choices=False + ) + + if choice == 'q': + console.print("[bold blue]Operation cancelled.[/bold blue]") + return + elif choice == 'a': + if Confirm.ask("[bold red]Are you sure you want to stop all scenarios?[/bold red]"): + stop_all_scenarios(active_scenarios) + else: + console.print("[bold blue]Operation cancelled.[/bold blue]") + return + + scenario_name = active_scenarios[int(choice) - 1] + + if scenario_name not in active_scenarios: + console.print(f"[bold red]No active scenario found with name: {scenario_name}[/bold red]") + return + + stop_scenario(scenario_name) + +def stop_scenario(scenario_name): + """Stop a single scenario""" + cmd = f"kubectl delete pod {scenario_name}" + if stream_command(cmd): + console.print(f"[bold green]Successfully stopped scenario: {scenario_name}[/bold green]") + else: + console.print(f"[bold red]Failed to stop scenario: {scenario_name}[/bold red]") + +def stop_all_scenarios(scenarios): + """Stop all active scenarios""" + with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"): + for scenario in scenarios: + stop_scenario(scenario) + console.print("[bold green]All scenarios have been stopped.[/bold green]") + +def list_active_scenarios(): + """List all active scenarios""" + commanders = get_mission("commander") + if not commanders: + print("No active scenarios found.") + return + + console = Console() + table = Table(title="Active Scenarios", show_header=True, header_style="bold magenta") + table.add_column("Name", style="cyan") + table.add_column("Status", style="green") + + for commander in commanders: + table.add_row(commander.metadata.name, commander.status.phase.lower()) + + console.print(table) + + +@click.command() +def down(): + """Bring down a running warnet""" + console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") + + # Delete warnet-logging namespace + if delete_namespace("warnet-logging"): + console.print("[green]Warnet logging deleted[/green]") + else: + console.print("[red]Warnet logging NOT deleted[/red]") + + # Uninstall tanks + tanks = get_mission("tank") + with console.status("[yellow]Uninstalling tanks...[/yellow]"): + for tank in tanks: + cmd = f"helm uninstall {tank.metadata.name}" + if stream_command(cmd): + console.print(f"[green]Uninstalled tank: {tank.metadata.name}[/green]") + else: + console.print(f"[red]Failed to uninstall tank: {tank.metadata.name}[/red]") + + # Clean up scenarios and other pods + pods = get_pods() + with console.status("[yellow]Cleaning up remaining pods...[/yellow]"): + for pod in pods.items: + cmd = f"kubectl delete pod {pod.metadata.name}" + if stream_command(cmd): + console.print(f"[green]Deleted pod: {pod.metadata.name}[/green]") + else: + console.print(f"[red]Failed to delete pod: {pod.metadata.name}[/red]") + + console.print("[bold green]Warnet has been brought down.[/bold green]") + +def get_active_network(namespace): + """Get the name of the active network (Helm release) in the given namespace""" + cmd = f"helm list --namespace {namespace} --output json" + result = run_command(cmd) + if result: + import json + releases = json.loads(result) + if releases: + # Assuming the first release is the active network + return releases[0]['name'] + return None + +@click.command() +@click.argument('scenario_file', type=click.Path(exists=True, file_okay=True, dir_okay=False)) +@click.argument('additional_args', nargs=-1, type=click.UNPROCESSED) +def run(scenario_file: str, additional_args: tuple[str]): + """Run a scenario from a file""" + scenario_path = os.path.abspath(scenario_file) + scenario_name = os.path.splitext(os.path.basename(scenario_path))[0] + + with open(scenario_path) as file: + scenario_text = file.read() + + name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" + namespace = get_default_namespace() + tankpods = get_mission("tank") + tanks = [ + { + "tank": tank.metadata.name, + "chain": "regtest", + "rpc_host": tank.status.pod_ip, + "rpc_port": 18443, + "rpc_user": "user", + "rpc_password": "password", + "init_peers": [], + } + for tank in tankpods + ] + kubernetes_objects = [ + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "warnetjson", + "namespace": namespace, + }, + "data": {"warnet.json": json.dumps(tanks)}, + }, + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "scenariopy", + "namespace": namespace, + }, + "data": {"scenario.py": scenario_text}, + }, + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": name, + "namespace": namespace, + "labels": {"mission": "commander"}, + }, + "spec": { + "restartPolicy": "Never", + "containers": [ + { + "name": name, + "image": "bitcoindevproject/warnet-commander:latest", + "args": additional_args, + "volumeMounts": [ + { + "name": "warnetjson", + "mountPath": "warnet.json", + "subPath": "warnet.json", + }, + { + "name": "scenariopy", + "mountPath": "scenario.py", + "subPath": "scenario.py", + }, + ], + } + ], + "volumes": [ + {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, + {"name": "scenariopy", "configMap": {"name": "scenariopy"}}, + ], + }, + }, + ] + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump_all(kubernetes_objects, temp_file) + temp_file_path = temp_file.name + + if apply_kubernetes_yaml(temp_file_path): + print(f"Successfully started scenario: {scenario_name}") + print(f"Commander pod name: {name}") + else: + print(f"Failed to start scenario: {scenario_name}") + + os.unlink(temp_file_path) + diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py new file mode 100644 index 000000000..1c7e5dd7c --- /dev/null +++ b/src/warnet/deploy.py @@ -0,0 +1,106 @@ +import click +from pathlib import Path +import yaml +import tempfile +from .process import stream_command +from .k8s import get_default_namespace + +# Import necessary functions and variables from network.py and namespaces.py +from .network import NETWORK_FILE, DEFAULTS_FILE as NETWORK_DEFAULTS_FILE, BITCOIN_CHART_LOCATION as NETWORK_CHART_LOCATION +from .namespaces import NAMESPACES_FILE, DEFAULTS_FILE as NAMESPACES_DEFAULTS_FILE, BITCOIN_CHART_LOCATION as NAMESPACES_CHART_LOCATION + +HELM_COMMAND = "helm upgrade --install --create-namespace" + +def validate_directory(ctx, param, value): + directory = Path(value) + if not directory.is_dir(): + raise click.BadParameter(f"'{value}' is not a valid directory.") + if not (directory / NETWORK_FILE).exists() and not (directory / NAMESPACES_FILE).exists(): + raise click.BadParameter(f"'{value}' does not contain a valid network.yaml or namespaces.yaml file.") + return directory + +@click.command() +@click.argument('directory', type=click.Path(exists=True, file_okay=False, dir_okay=True), + callback=validate_directory) +def deploy(directory): + """Deploy a warnet with topology loaded from """ + directory = Path(directory) + + if (directory / NETWORK_FILE).exists(): + deploy_network(directory) + elif (directory / NAMESPACES_FILE).exists(): + deploy_namespaces(directory) + else: + click.echo("Error: Neither network.yaml nor namespaces.yaml found in the specified directory.") + +def deploy_network(directory: Path): + network_file_path = directory / NETWORK_FILE + defaults_file_path = directory / NETWORK_DEFAULTS_FILE + + with network_file_path.open() as f: + network_file = yaml.safe_load(f) + + namespace = get_default_namespace() + + for node in network_file["nodes"]: + click.echo(f"Deploying node: {node.get('name')}") + try: + temp_override_file_path = "" + node_name = node.get('name') + node_config_override = {k: v for k, v in node.items() if k != 'name'} + + cmd = f"{HELM_COMMAND} {node_name} {NETWORK_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" + + if node_config_override: + with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as temp_file: + yaml.dump(node_config_override, temp_file) + temp_override_file_path = Path(temp_file.name) + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") + return + except Exception as e: + click.echo(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() + +def deploy_namespaces(directory: Path): + namespaces_file_path = directory / NAMESPACES_FILE + defaults_file_path = directory / NAMESPACES_DEFAULTS_FILE + + with namespaces_file_path.open() as f: + namespaces_file = yaml.safe_load(f) + + names = [n.get("name") for n in namespaces_file["namespaces"]] + for n in names: + if not n.startswith("warnet-"): + click.echo(f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix.") + return + + for namespace in namespaces_file["namespaces"]: + click.echo(f"Deploying namespace: {namespace.get('name')}") + try: + temp_override_file_path = Path() + namespace_name = namespace.get('name') + namespace_config_override = {k: v for k, v in namespace.items() if k != 'name'} + + cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" + + if namespace_config_override: + with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as temp_file: + yaml.dump(namespace_config_override, temp_file) + temp_override_file_path = Path(temp_file.name) + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") + return + except Exception as e: + click.echo(f"Error: {e}") + return + finally: + if temp_override_file_path.exists(): + temp_override_file_path.unlink() diff --git a/src/warnet/main.py b/src/warnet/main.py index 332d6a91b..a2705c838 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -10,10 +10,11 @@ from .admin import admin from .bitcoin import bitcoin +from .deploy import deploy as deploy_command from .graph import graph from .image import image -from .network import copy_network_defaults, network -from .scenarios import scenarios +from .status import status as status_command +from .control import stop, down, run QUICK_START_PATH = files("resources.scripts").joinpath("quick_start.sh") @@ -24,11 +25,14 @@ def cli(): cli.add_command(bitcoin) +cli.add_command(deploy_command) cli.add_command(graph) cli.add_command(image) -cli.add_command(network) -cli.add_command(scenarios) +cli.add_command(status_command) cli.add_command(admin) +cli.add_command(stop) +cli.add_command(down) +cli.add_command(run) @cli.command() diff --git a/src/warnet/status.py b/src/warnet/status.py new file mode 100644 index 000000000..d7ead8190 --- /dev/null +++ b/src/warnet/status.py @@ -0,0 +1,63 @@ +import click +from rich import print +from rich.console import Console +from rich.table import Table +from rich.panel import Panel +from rich.text import Text + +from .k8s import get_mission + +@click.command() +def status(): + """Display the unified status of the Warnet network and active scenarios""" + console = Console() + + tanks = _get_tank_status() + scenarios = _get_active_scenarios() + + # Create a unified table + table = Table(title="Warnet Status", show_header=True, header_style="bold magenta") + table.add_column("Component", style="cyan") + table.add_column("Name", style="green") + table.add_column("Status", style="yellow") + + # Add tanks to the table + for tank in tanks: + table.add_row("Tank", tank["name"], tank["status"]) + + # Add a separator if there are both tanks and scenarios + if tanks and scenarios: + table.add_row("", "", "") + + # Add scenarios to the table + if scenarios: + for scenario in scenarios: + table.add_row("Scenario", scenario["name"], scenario["status"]) + else: + table.add_row("Scenario", "No active scenarios", "") + + # Create a panel to wrap the table + panel = Panel( + table, + title="Warnet Overview", + expand=False, + border_style="blue", + padding=(1, 1), + ) + + # Print the panel + console.print(panel) + + # Print summary + summary = Text() + summary.append(f"\nTotal Tanks: {len(tanks)}", style="bold cyan") + summary.append(f" | Active Scenarios: {len(scenarios)}", style="bold green") + console.print(summary) + +def _get_tank_status(): + tanks = get_mission("tank") + return [{"name": tank.metadata.name, "status": tank.status.phase.lower()} for tank in tanks] + +def _get_active_scenarios(): + commanders = get_mission("commander") + return [{"name": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] From 6125348536a4804b96a6ac601bfc1e18746e678e Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 22 Aug 2024 16:13:32 +0200 Subject: [PATCH 127/710] graph: generate graphs --- src/warnet/project.py | 92 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 src/warnet/project.py diff --git a/src/warnet/project.py b/src/warnet/project.py new file mode 100644 index 000000000..4bba1208d --- /dev/null +++ b/src/warnet/project.py @@ -0,0 +1,92 @@ +import os +import random + +import click +import yaml + +from .util import DEFAULT_TAG + + +@click.group(name="project") +def project(): + """Manage a new warnet project""" + + +@project.command() +@click.option("--project_name", prompt="Enter the project name", type=str) +@click.option("--num_nodes", prompt="How many nodes?", type=int) +@click.option("--num_connections", prompt="How many connections should each node have?", type=int) +def new(project_name, num_nodes, num_connections): + """ + Create a new project with a graph + """ + + # Create project directory + os.makedirs(project_name, exist_ok=True) + + # Generate network.yaml + nodes = [] + + for i in range(num_nodes): + node = {"name": f"tank-{i:04d}", "connect": []} + + # Add round-robin connection + next_node = (i + 1) % num_nodes + node["connect"].append(f"tank-{next_node:04d}") + + # Add random connections + available_nodes = list(range(num_nodes)) + available_nodes.remove(i) + if next_node in available_nodes: + available_nodes.remove(next_node) + + for _ in range(min(num_connections - 1, len(available_nodes))): + random_node = random.choice(available_nodes) + node["connect"].append(f"tank-{random_node:04d}") + available_nodes.remove(random_node) + + nodes.append(node) + + # Add image tag to the first node + nodes[0]["image"] = {"tag": "v0.20.0"} + + network_yaml_data = {"nodes": nodes} + + with open(os.path.join(project_name, "network.yaml"), "w") as f: + yaml.dump(network_yaml_data, f, default_flow_style=False) + + # Generate defaults.yaml + defaults_yaml_content = """ +chain: regtest + +collectLogs: true +metricsExport: true + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "27.0" + +config: | + dns=1 +""" + + with open(os.path.join(project_name, "defaults.yaml"), "w") as f: + f.write(defaults_yaml_content.strip()) + + click.echo( + f"Project '{project_name}' has been created with 'network.yaml' and 'defaults.yaml'." + ) From 7a202bda9c8515f700c35f1cb2aecbbc67b31f46 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 12:33:30 +0200 Subject: [PATCH 128/710] update quickstart --- src/warnet/main.py | 54 +++++++++++++++++++++++++++++-------------- src/warnet/network.py | 47 +++++++++++++++++++++++++++++-------- 2 files changed, 74 insertions(+), 27 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index a2705c838..88b524ba6 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -10,11 +10,12 @@ from .admin import admin from .bitcoin import bitcoin +from .control import down, run, stop from .deploy import deploy as deploy_command from .graph import graph from .image import image +from .network import copy_network_defaults, copy_scenario_defaults from .status import status as status_command -from .control import stop, down, run QUICK_START_PATH = files("resources.scripts").joinpath("quick_start.sh") @@ -36,47 +37,66 @@ def cli(): @cli.command() -def setup(): - """Check Warnet requirements are installed""" +def quickstart(): + """Setup warnet""" try: process = subprocess.Popen( ["/bin/bash", str(QUICK_START_PATH)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, - # This preserves colours from grant's lovely script! env=dict(os.environ, TERM="xterm-256color"), ) - for line in iter(process.stdout.readline, ""): - print(line, end="", flush=True) - + click.echo(line, nl=False) process.stdout.close() return_code = process.wait() - if return_code != 0: - print(f"Quick start script failed with return code {return_code}") + click.echo(f"Quick start script failed with return code {return_code}") + click.echo("Install missing requirements before proceeding") return False + + create_project = click.confirm("Do you want to create a new project?", default=True) + + if create_project: + default_path = os.path.abspath(os.getcwd()) + project_path = click.prompt( + "Enter the project directory path", + default=default_path, + type=click.Path(file_okay=False, dir_okay=True, resolve_path=True), + ) + + _create(project_path) + + click.echo("Setup completed successfully!") return True except Exception as e: - print(f"An error occurred while running the quick start script: {e}") + print(f"An error occurred while running the quick start script:\n\n{e}\n\n") + print("Please report this to https://github.com/bitcoin-dev-project/warnet/issues") return False @cli.command() -@click.argument("directory", type=Path) +@click.argument("directory", type=click.Path(file_okay=False, dir_okay=True, resolve_path=True)) def create(directory: Path): """Create a new warnet project in the specified directory""" - full_path = Path() - full_path = directory if directory.is_absolute() else directory.resolve() - if os.path.exists(directory): + _create(directory) + + +def _create(directory: Path): + full_path = Path(directory) + if full_path.exists(): richprint(f"[red]Error: Directory {full_path} already exists[/red]") return - copy_network_defaults(full_path) - richprint(f"[green]Copied network example files to {full_path / 'networks'}[/green]") - richprint(f"[green]Created warnet project structure in {full_path}[/green]") + try: + copy_network_defaults(full_path) + copy_scenario_defaults(full_path) + richprint(f"[green]Copied network example files to {full_path / 'networks'}[/green]") + richprint(f"[green]Created warnet project structure in {full_path}[/green]") + except Exception as e: + richprint(f"[red]Error creating project: {e}[/red]") @cli.command() diff --git a/src/warnet/network.py b/src/warnet/network.py index d160587e3..307ecffaa 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -12,9 +12,13 @@ from .k8s import delete_namespace, get_default_namespace, get_mission, get_pods from .process import stream_command -WAR_MANIFESTS = files("resources.manifests") -WARNET_NETWORK_DIR = files("resources.networks") -NETWORK_DIR = Path("resources.networks") +WAR_MANIFESTS_FILES = files("resources.manifests") +WAR_NETWORK_FILES = files("resources.networks") +WAR_SCENARIOS_FILES = files("resources.scenarios") + +WAR_NETWORK_DIR = WAR_NETWORK_FILES.name +WAR_SCENARIOS_DIR = WAR_SCENARIOS_FILES.name + DEFAULT_NETWORK = Path("6_node_bitcoin") NETWORK_FILE = "network.yaml" DEFAULTS_FILE = "node-defaults.yaml" @@ -51,10 +55,10 @@ def setup_logging_helm() -> bool: "helm repo add grafana https://grafana.github.io/helm-charts", "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", "helm repo update", - f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS}/loki_values.yaml loki grafana/loki --version 5.47.2", + f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS_FILES}/loki_values.yaml loki grafana/loki --version 5.47.2", "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", - f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS}/grafana_values.yaml", + f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS_FILES}/grafana_values.yaml", ] for command in helm_commands: @@ -66,11 +70,34 @@ def setup_logging_helm() -> bool: def copy_network_defaults(directory: Path): """Create the project structure for a warnet project""" - (directory / NETWORK_DIR / DEFAULT_NETWORK).mkdir(parents=True, exist_ok=True) - target_network_defaults = directory / NETWORK_DIR / DEFAULT_NETWORK / DEFAULTS_FILE - target_network_example = directory / NETWORK_DIR / DEFAULT_NETWORK / NETWORK_FILE - shutil.copy2(WARNET_NETWORK_DIR / DEFAULT_NETWORK / DEFAULTS_FILE, target_network_defaults) - shutil.copy2(WARNET_NETWORK_DIR / DEFAULT_NETWORK / NETWORK_FILE, target_network_example) + (directory / WAR_NETWORK_DIR / DEFAULT_NETWORK).mkdir(parents=True, exist_ok=True) + target_network_defaults = directory / WAR_NETWORK_DIR / DEFAULT_NETWORK / DEFAULTS_FILE + target_network_example = directory / WAR_NETWORK_DIR / DEFAULT_NETWORK / NETWORK_FILE + shutil.copy2(WAR_NETWORK_FILES / DEFAULT_NETWORK / DEFAULTS_FILE, target_network_defaults) + shutil.copy2(WAR_NETWORK_FILES / DEFAULT_NETWORK / NETWORK_FILE, target_network_example) + + +def copy_scenario_defaults(directory: Path): + """Create the project structure for a warnet project""" + target_dir = directory / WAR_SCENARIOS_DIR + target_dir.mkdir(parents=True, exist_ok=True) + print(f"Creating scenarios directory: {target_dir}") + + scenarios_path = WAR_SCENARIOS_FILES.joinpath() + + def should_copy(item: Path) -> bool: + return item.name not in ["__init__.py", "__pycache__", "commander.py"] + + for item in scenarios_path.iterdir(): + if should_copy(item): + if item.is_file(): + shutil.copy2(item, target_dir) + print(f"Copied file: {item.name}") + elif item.is_dir(): + shutil.copytree(item, target_dir / item.name, dirs_exist_ok=True) + print(f"Copied directory: {item.name}") + + print(f"Finished copying scenario files to {target_dir}") @network.command() From bac0546c96375c3ccfc56701d6750a4368e5e609 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 13:24:25 +0200 Subject: [PATCH 129/710] fix tests --- src/warnet/control.py | 58 +++++++++++++++++++------------- src/warnet/deploy.py | 67 ++++++++++++++++++++++++++++--------- src/warnet/project.py | 2 -- src/warnet/status.py | 6 ++-- test/dag_connection_test.py | 4 +-- test/logging_test.py | 9 +++-- test/rpc_test.py | 2 +- test/scenarios_test.py | 24 ++----------- test/test_base.py | 6 ++-- 9 files changed, 105 insertions(+), 73 deletions(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 45af40819..720ef008c 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -1,32 +1,39 @@ -import click -from rich import print -from rich.console import Console -from rich.table import Table import json import os import tempfile import time + +import click import yaml +from rich import print +from rich.console import Console +from rich.prompt import Confirm, Prompt +from rich.table import Table -from warnet import scenarios as SCENARIOS -from .k8s import get_mission, delete_namespace, get_default_namespace, apply_kubernetes_yaml, get_pods +from .k8s import ( + apply_kubernetes_yaml, + delete_namespace, + get_default_namespace, + get_mission, + get_pods, +) from .process import run_command, stream_command -from rich.prompt import Prompt, Confirm - console = Console() + def get_active_scenarios(): """Get list of active scenarios""" commanders = get_mission("commander") return [c.metadata.name for c in commanders] + @click.command() -@click.argument('scenario_name', required=False) +@click.argument("scenario_name", required=False) def stop(scenario_name): """Stop a running scenario or all scenarios""" active_scenarios = get_active_scenarios() - + if not active_scenarios: console.print("[bold red]No active scenarios found.[/bold red]") return @@ -40,32 +47,33 @@ def stop(scenario_name): table.add_row(str(idx), name) console.print(table) - - choices = [str(i) for i in range(1, len(active_scenarios) + 1)] + ['a', 'q'] + + choices = [str(i) for i in range(1, len(active_scenarios) + 1)] + ["a", "q"] choice = Prompt.ask( "[bold yellow]Enter the number of the scenario to stop, 'a' to stop all, or 'q' to quit[/bold yellow]", choices=choices, - show_choices=False + show_choices=False, ) - - if choice == 'q': + + if choice == "q": console.print("[bold blue]Operation cancelled.[/bold blue]") return - elif choice == 'a': + elif choice == "a": if Confirm.ask("[bold red]Are you sure you want to stop all scenarios?[/bold red]"): stop_all_scenarios(active_scenarios) else: console.print("[bold blue]Operation cancelled.[/bold blue]") return - + scenario_name = active_scenarios[int(choice) - 1] - + if scenario_name not in active_scenarios: console.print(f"[bold red]No active scenario found with name: {scenario_name}[/bold red]") return stop_scenario(scenario_name) + def stop_scenario(scenario_name): """Stop a single scenario""" cmd = f"kubectl delete pod {scenario_name}" @@ -74,6 +82,7 @@ def stop_scenario(scenario_name): else: console.print(f"[bold red]Failed to stop scenario: {scenario_name}[/bold red]") + def stop_all_scenarios(scenarios): """Stop all active scenarios""" with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"): @@ -81,6 +90,7 @@ def stop_all_scenarios(scenarios): stop_scenario(scenario) console.print("[bold green]All scenarios have been stopped.[/bold green]") + def list_active_scenarios(): """List all active scenarios""" commanders = get_mission("commander") @@ -132,21 +142,24 @@ def down(): console.print("[bold green]Warnet has been brought down.[/bold green]") + def get_active_network(namespace): """Get the name of the active network (Helm release) in the given namespace""" cmd = f"helm list --namespace {namespace} --output json" result = run_command(cmd) if result: import json + releases = json.loads(result) if releases: # Assuming the first release is the active network - return releases[0]['name'] + return releases[0]["name"] return None -@click.command() -@click.argument('scenario_file', type=click.Path(exists=True, file_okay=True, dir_okay=False)) -@click.argument('additional_args', nargs=-1, type=click.UNPROCESSED) + +@click.command(context_settings={"ignore_unknown_options": True}) +@click.argument("scenario_file", type=click.Path(exists=True, file_okay=True, dir_okay=False)) +@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) def run(scenario_file: str, additional_args: tuple[str]): """Run a scenario from a file""" scenario_path = os.path.abspath(scenario_file) @@ -236,4 +249,3 @@ def run(scenario_file: str, additional_args: tuple[str]): print(f"Failed to start scenario: {scenario_name}") os.unlink(temp_file_path) - diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 1c7e5dd7c..0c1ccb611 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -1,27 +1,52 @@ -import click +import tempfile from pathlib import Path + +import click import yaml -import tempfile -from .process import stream_command + from .k8s import get_default_namespace +from .namespaces import ( + BITCOIN_CHART_LOCATION as NAMESPACES_CHART_LOCATION, +) +from .namespaces import ( + DEFAULTS_FILE as NAMESPACES_DEFAULTS_FILE, +) +from .namespaces import ( + NAMESPACES_FILE, +) +from .network import ( + BITCOIN_CHART_LOCATION as NETWORK_CHART_LOCATION, +) +from .network import ( + DEFAULTS_FILE as NETWORK_DEFAULTS_FILE, +) # Import necessary functions and variables from network.py and namespaces.py -from .network import NETWORK_FILE, DEFAULTS_FILE as NETWORK_DEFAULTS_FILE, BITCOIN_CHART_LOCATION as NETWORK_CHART_LOCATION -from .namespaces import NAMESPACES_FILE, DEFAULTS_FILE as NAMESPACES_DEFAULTS_FILE, BITCOIN_CHART_LOCATION as NAMESPACES_CHART_LOCATION +from .network import ( + NETWORK_FILE, +) +from .process import stream_command HELM_COMMAND = "helm upgrade --install --create-namespace" + def validate_directory(ctx, param, value): directory = Path(value) if not directory.is_dir(): raise click.BadParameter(f"'{value}' is not a valid directory.") if not (directory / NETWORK_FILE).exists() and not (directory / NAMESPACES_FILE).exists(): - raise click.BadParameter(f"'{value}' does not contain a valid network.yaml or namespaces.yaml file.") + raise click.BadParameter( + f"'{value}' does not contain a valid network.yaml or namespaces.yaml file." + ) return directory + @click.command() -@click.argument('directory', type=click.Path(exists=True, file_okay=False, dir_okay=True), - callback=validate_directory) +@click.argument( + "directory", + type=click.Path(exists=True, file_okay=False, dir_okay=True), + callback=validate_directory, +) def deploy(directory): """Deploy a warnet with topology loaded from """ directory = Path(directory) @@ -31,7 +56,10 @@ def deploy(directory): elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: - click.echo("Error: Neither network.yaml nor namespaces.yaml found in the specified directory.") + click.echo( + "Error: Neither network.yaml nor namespaces.yaml found in the specified directory." + ) + def deploy_network(directory: Path): network_file_path = directory / NETWORK_FILE @@ -46,13 +74,15 @@ def deploy_network(directory: Path): click.echo(f"Deploying node: {node.get('name')}") try: temp_override_file_path = "" - node_name = node.get('name') - node_config_override = {k: v for k, v in node.items() if k != 'name'} + node_name = node.get("name") + node_config_override = {k: v for k, v in node.items() if k != "name"} cmd = f"{HELM_COMMAND} {node_name} {NETWORK_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" if node_config_override: - with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as temp_file: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".yaml", delete=False + ) as temp_file: yaml.dump(node_config_override, temp_file) temp_override_file_path = Path(temp_file.name) cmd = f"{cmd} -f {temp_override_file_path}" @@ -67,6 +97,7 @@ def deploy_network(directory: Path): if temp_override_file_path: Path(temp_override_file_path).unlink() + def deploy_namespaces(directory: Path): namespaces_file_path = directory / NAMESPACES_FILE defaults_file_path = directory / NAMESPACES_DEFAULTS_FILE @@ -77,20 +108,24 @@ def deploy_namespaces(directory: Path): names = [n.get("name") for n in namespaces_file["namespaces"]] for n in names: if not n.startswith("warnet-"): - click.echo(f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix.") + click.echo( + f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." + ) return for namespace in namespaces_file["namespaces"]: click.echo(f"Deploying namespace: {namespace.get('name')}") try: temp_override_file_path = Path() - namespace_name = namespace.get('name') - namespace_config_override = {k: v for k, v in namespace.items() if k != 'name'} + namespace_name = namespace.get("name") + namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" if namespace_config_override: - with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as temp_file: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".yaml", delete=False + ) as temp_file: yaml.dump(namespace_config_override, temp_file) temp_override_file_path = Path(temp_file.name) cmd = f"{cmd} -f {temp_override_file_path}" diff --git a/src/warnet/project.py b/src/warnet/project.py index 4bba1208d..004924cd3 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -4,8 +4,6 @@ import click import yaml -from .util import DEFAULT_TAG - @click.group(name="project") def project(): diff --git a/src/warnet/status.py b/src/warnet/status.py index d7ead8190..8aa5c95b0 100644 --- a/src/warnet/status.py +++ b/src/warnet/status.py @@ -1,12 +1,12 @@ import click -from rich import print from rich.console import Console -from rich.table import Table from rich.panel import Panel +from rich.table import Table from rich.text import Text from .k8s import get_mission + @click.command() def status(): """Display the unified status of the Warnet network and active scenarios""" @@ -54,10 +54,12 @@ def status(): summary.append(f" | Active Scenarios: {len(scenarios)}", style="bold green") console.print(summary) + def _get_tank_status(): tanks = get_mission("tank") return [{"name": tank.metadata.name, "status": tank.status.phase.lower()} for tank in tanks] + def _get_active_scenarios(): commanders = get_mission("commander") return [{"name": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 4982de0ca..1827b5e7a 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -20,13 +20,13 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network deploy {self.network_dir}")) + self.log.info(self.warcli(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() def run_connect_dag_scenario(self): self.log.info("Running connect_dag scenario") - self.warcli("scenarios run-file test/data/scenario_connect_dag.py") + self.warcli("run test/data/scenario_connect_dag.py") self.wait_for_all_scenarios() diff --git a/test/logging_test.py b/test/logging_test.py index c19fc888f..b9604222d 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -54,7 +54,7 @@ def start_logging(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network deploy {self.network_dir}")) + self.log.info(self.warcli(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() @@ -73,8 +73,11 @@ def make_grafana_api_request(self, ds_uid, start, metric): def test_prometheus_and_grafana(self): self.log.info("Starting network activity scenarios") - self.warcli("scenarios run miner_std --allnodes --interval=5 --mature") - self.warcli("scenarios run tx_flood --interval=1") + + miner_file = "resources/scenarios/miner_std.py" + tx_flood_file = "resources/scenarios/tx_flood.py" + self.warcli(f"run {miner_file} --allnodes --interval=5 --mature") + self.warcli(f"run {tx_flood_file} --interval=1") prometheus_ds = requests.get("http://localhost:3000/api/datasources/name/Prometheus") assert prometheus_ds.status_code == 200 diff --git a/test/rpc_test.py b/test/rpc_test.py index 5700b6647..2a2c4d7fb 100755 --- a/test/rpc_test.py +++ b/test/rpc_test.py @@ -24,7 +24,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network deploy {self.network_dir}")) + self.log.info(self.warcli(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() diff --git a/test/scenarios_test.py b/test/scenarios_test.py index a56714ecd..bebf4b3da 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -8,7 +8,6 @@ from warnet.k8s import delete_pod from warnet.process import run_command from warnet.scenarios import _active as scenarios_active -from warnet.scenarios import _available as scenarios_available class ScenariosTest(TestBase): @@ -25,23 +24,14 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network deploy {self.network_dir}")) + self.log.info(self.warcli(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() def test_scenarios(self): - self.check_available_scenarios() - self.run_and_check_miner_scenario() self.run_and_check_miner_scenario_from_file() self.run_and_check_scenario_from_file() - def check_available_scenarios(self): - self.log.info("Checking available scenarios") - # Use rpc instead of warcli so we get raw JSON object - scenarios = scenarios_available() - assert len(scenarios) == 4, f"Expected 4 available scenarios, got {len(scenarios)}" - self.log.info(f"Found {len(scenarios)} available scenarios") - def scenario_running(self, scenario_name: str): """Check that we are only running a single scenario of the correct name""" active = scenarios_active() @@ -57,21 +47,13 @@ def check_scenario_clean_exit(): return active[0]["status"] == "succeeded" self.log.info(f"Running scenario from: {scenario_file}") - self.warcli(f"scenarios run-file {scenario_file}") + self.warcli(f"run {scenario_file}") self.wait_for_predicate(lambda: check_scenario_clean_exit()) - def run_and_check_miner_scenario(self): - sc = "miner_std" - self.log.info(f"Running scenario {sc}") - self.warcli(f"scenarios run {sc} --allnodes --interval=1") - self.wait_for_predicate(lambda: self.scenario_running("commander-minerstd")) - self.wait_for_predicate(lambda: self.check_blocks(30)) - self.stop_scenario() - def run_and_check_miner_scenario_from_file(self): scenario_file = "resources/scenarios/miner_std.py" self.log.info(f"Running scenario from file: {scenario_file}") - self.warcli(f"scenarios run-file {scenario_file} --allnodes --interval=1") + self.warcli(f"run {scenario_file} --allnodes --interval=1") start = int(self.warcli("bitcoin rpc tank-0000 getblockcount")) self.wait_for_predicate(lambda: self.scenario_running("commander-minerstd")) self.wait_for_predicate(lambda: self.check_blocks(2, start=start)) diff --git a/test/test_base.py b/test/test_base.py index 4753cf2d7..15849eac5 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -11,9 +11,9 @@ from time import sleep from warnet import SRC_DIR +from warnet.control import get_active_scenarios from warnet.network import _connected as network_connected from warnet.network import _status as network_status -from warnet.scenarios import _active as scenarios_active class TestBase: @@ -45,7 +45,7 @@ def cleanup(self, signum=None, frame=None): try: self.log.info("Stopping network") if self.network: - self.warcli("network down") + self.warcli("down") self.wait_for_all_tanks_status(target="stopped", timeout=60, interval=1) except Exception as e: self.log.error(f"Error bringing network down: {e}") @@ -129,7 +129,7 @@ def wait_for_all_edges(self, timeout=20 * 60, interval=5): def wait_for_all_scenarios(self): def check_scenarios(): - scns = scenarios_active() + scns = get_active_scenarios() if len(scns) == 0: return True return all(s["status"] == "succeeded" for s in scns) From a6ac73fe2ca1deea9778fc26aa61165e0ecb493c Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 13:44:08 +0200 Subject: [PATCH 130/710] add pod exit staus helper and use in test_base --- src/warnet/k8s.py | 13 +++++++++++++ test/test_base.py | 8 +++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index 22adb96e0..e963d7c93 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -36,6 +36,19 @@ def get_mission(mission: str) -> list[V1PodList]: return crew +def get_pod_exit_status(pod_name): + try: + sclient = get_static_client() + pod = sclient.read_namespaced_pod(name=pod_name, namespace=get_default_namespace()) + for container_status in pod.status.container_statuses: + if container_status.state.terminated: + return container_status.state.terminated.exit_code + return None + except client.ApiException as e: + print(f"Exception when calling CoreV1Api->read_namespaced_pod: {e}") + return None + + def get_edges() -> any: sclient = get_static_client() configmap = sclient.read_namespaced_config_map(name="edges", namespace="warnet") diff --git a/test/test_base.py b/test/test_base.py index 15849eac5..80d384d4e 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -12,6 +12,7 @@ from warnet import SRC_DIR from warnet.control import get_active_scenarios +from warnet.k8s import get_pod_exit_status from warnet.network import _connected as network_connected from warnet.network import _status as network_status @@ -132,7 +133,12 @@ def check_scenarios(): scns = get_active_scenarios() if len(scns) == 0: return True - return all(s["status"] == "succeeded" for s in scns) + for s in scns: + exit_status = get_pod_exit_status(s) + self.log.debug(f"Scenario {s} exited with code {exit_status}") + if exit_status != 0: + return False + return True self.wait_for_predicate(check_scenarios) From d042f3a2f8f036f53b96469efd402e1c8a778b56 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 14:26:46 +0200 Subject: [PATCH 131/710] fix init and refactor --- src/warnet/main.py | 47 ++++++++++++++++++++----------------------- src/warnet/network.py | 35 +++++++++++++++----------------- 2 files changed, 38 insertions(+), 44 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 88b524ba6..69be14649 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -77,40 +77,37 @@ def quickstart(): return False -@cli.command() -@click.argument("directory", type=click.Path(file_okay=False, dir_okay=True, resolve_path=True)) -def create(directory: Path): - """Create a new warnet project in the specified directory""" - _create(directory) - - -def _create(directory: Path): - full_path = Path(directory) - if full_path.exists(): - richprint(f"[red]Error: Directory {full_path} already exists[/red]") - return +def create_warnet_project(directory: Path, check_empty: bool = False): + """Common function to create a warnet project""" + if check_empty and any(directory.iterdir()): + richprint("[yellow]Warning: Directory is not empty[/yellow]") + if not click.confirm("Do you want to continue?", default=True): + return try: - copy_network_defaults(full_path) - copy_scenario_defaults(full_path) - richprint(f"[green]Copied network example files to {full_path / 'networks'}[/green]") - richprint(f"[green]Created warnet project structure in {full_path}[/green]") + copy_network_defaults(directory) + copy_scenario_defaults(directory) + richprint(f"[green]Copied network example files to {directory / 'networks'}[/green]") + richprint(f"[green]Created warnet project structure in {directory}[/green]") except Exception as e: richprint(f"[red]Error creating project: {e}[/red]") +@cli.command() +@click.argument( + "directory", type=click.Path(file_okay=False, dir_okay=True, resolve_path=True, path_type=Path) +) +def create(directory: Path): + """Create a new warnet project in the specified directory""" + if directory.exists(): + richprint(f"[red]Error: Directory {directory} already exists[/red]") + return + create_warnet_project(directory) @cli.command() def init(): """Initialize a warnet project in the current directory""" - current_dir = os.getcwd() - if os.listdir(current_dir): - richprint("[yellow]Warning: Current directory is not empty[/yellow]") - if not click.confirm("Do you want to continue?", default=True): - return - - copy_network_defaults(current_dir) - richprint(f"[green]Copied network example files to {Path(current_dir) / 'networks'}[/green]") - richprint(f"[green]Created warnet project structure in {current_dir}[/green]") + current_dir = Path.cwd() + create_warnet_project(current_dir, check_empty=True) @cli.command() diff --git a/src/warnet/network.py b/src/warnet/network.py index 307ecffaa..0e3d5c692 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -68,27 +68,16 @@ def setup_logging_helm() -> bool: return True -def copy_network_defaults(directory: Path): - """Create the project structure for a warnet project""" - (directory / WAR_NETWORK_DIR / DEFAULT_NETWORK).mkdir(parents=True, exist_ok=True) - target_network_defaults = directory / WAR_NETWORK_DIR / DEFAULT_NETWORK / DEFAULTS_FILE - target_network_example = directory / WAR_NETWORK_DIR / DEFAULT_NETWORK / NETWORK_FILE - shutil.copy2(WAR_NETWORK_FILES / DEFAULT_NETWORK / DEFAULTS_FILE, target_network_defaults) - shutil.copy2(WAR_NETWORK_FILES / DEFAULT_NETWORK / NETWORK_FILE, target_network_example) - - -def copy_scenario_defaults(directory: Path): - """Create the project structure for a warnet project""" - target_dir = directory / WAR_SCENARIOS_DIR +def copy_defaults(directory: Path, target_subdir: str, source_path: Path, exclude_list: list[str]): + """Generic function to copy default files and directories""" + target_dir = directory / target_subdir target_dir.mkdir(parents=True, exist_ok=True) - print(f"Creating scenarios directory: {target_dir}") - - scenarios_path = WAR_SCENARIOS_FILES.joinpath() + print(f"Creating directory: {target_dir}") def should_copy(item: Path) -> bool: - return item.name not in ["__init__.py", "__pycache__", "commander.py"] + return item.name not in exclude_list - for item in scenarios_path.iterdir(): + for item in source_path.iterdir(): if should_copy(item): if item.is_file(): shutil.copy2(item, target_dir) @@ -97,7 +86,15 @@ def should_copy(item: Path) -> bool: shutil.copytree(item, target_dir / item.name, dirs_exist_ok=True) print(f"Copied directory: {item.name}") - print(f"Finished copying scenario files to {target_dir}") + print(f"Finished copying files to {target_dir}") + +def copy_network_defaults(directory: Path): + """Create the project structure for a warnet project's network""" + copy_defaults(directory, WAR_NETWORK_DIR, WAR_NETWORK_FILES.joinpath(), []) + +def copy_scenario_defaults(directory: Path): + """Create the project structure for a warnet project's scenarios""" + copy_defaults(directory, WAR_SCENARIOS_DIR, WAR_SCENARIOS_FILES.joinpath(), ["__init__.py", "__pycache__", "commander.py"]) @network.command() @@ -179,7 +176,7 @@ def _connected(): for peer in peerinfo: if peer["connection_type"] == "manual": manuals += 1 - # Even if more edges are specifed, bitcoind only allows + # Even if more edges are specified, bitcoind only allows # 8 manual outbound connections print("manual " + str(manuals)) From 992d0a00a6fe80864edef9e07a8151e1c8fb3c96 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 14:33:47 +0200 Subject: [PATCH 132/710] fixup quickstart --- src/warnet/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 69be14649..a1127f24f 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -66,7 +66,7 @@ def quickstart(): type=click.Path(file_okay=False, dir_okay=True, resolve_path=True), ) - _create(project_path) + create_warnet_project(Path(project_path)) click.echo("Setup completed successfully!") return True From 6850b4741b4b6059439f997bfa5b4517e897033d Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 23 Aug 2024 16:11:33 +0200 Subject: [PATCH 133/710] update quickstart --- src/warnet/main.py | 110 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 100 insertions(+), 10 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index a1127f24f..7f182ac2f 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -1,4 +1,5 @@ import os +import random import subprocess import sys from importlib.resources import files @@ -16,6 +17,7 @@ from .image import image from .network import copy_network_defaults, copy_scenario_defaults from .status import status as status_command +from .util import SUPPORTED_TAGS QUICK_START_PATH = files("resources.scripts").joinpath("quick_start.sh") @@ -57,19 +59,36 @@ def quickstart(): return False create_project = click.confirm("Do you want to create a new project?", default=True) + if not create_project: + click.echo("Setup completed successfully!") + return True + + default_path = os.path.abspath(os.getcwd()) + project_path = click.prompt( + "Enter the project directory path", + default=default_path, + type=click.Path(file_okay=False, dir_okay=True, resolve_path=True), + ) - if create_project: - default_path = os.path.abspath(os.getcwd()) - project_path = click.prompt( - "Enter the project directory path", - default=default_path, - type=click.Path(file_okay=False, dir_okay=True, resolve_path=True), - ) - + custom_network = click.confirm("Do you want to create a custom network?", default=True) + if not custom_network: create_warnet_project(Path(project_path)) + click.echo("Setup completed successfully!") + return True + + network_name = click.prompt( + "Enter the network name", + type=str, + ) + nodes = click.prompt("How many nodes would you like?", type=int) + connections = click.prompt("How many connects would you like each node to have?", type=int) + version = click.prompt( + "Which version would you like nodes to have by default?", + type=click.Choice(SUPPORTED_TAGS, case_sensitive=False), + ) - click.echo("Setup completed successfully!") - return True + create_warnet_project(Path(project_path)) + custom_graph(nodes, connections, version, Path(project_path) / "networks" / network_name) except Exception as e: print(f"An error occurred while running the quick start script:\n\n{e}\n\n") @@ -91,6 +110,8 @@ def create_warnet_project(directory: Path, check_empty: bool = False): richprint(f"[green]Created warnet project structure in {directory}[/green]") except Exception as e: richprint(f"[red]Error creating project: {e}[/red]") + raise e + @cli.command() @click.argument( @@ -103,6 +124,7 @@ def create(directory: Path): return create_warnet_project(directory) + @cli.command() def init(): """Initialize a warnet project in the current directory""" @@ -149,3 +171,71 @@ def auth(kube_config: str) -> None: if __name__ == "__main__": cli() + + +def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Path): + datadir.mkdir(parents=False, exist_ok=False) + # Generate network.yaml + nodes = [] + + for i in range(num_nodes): + node = {"name": f"tank-{i:04d}", "connect": []} + + # Add round-robin connection + next_node = (i + 1) % num_nodes + node["connect"].append(f"tank-{next_node:04d}") + + # Add random connections + available_nodes = list(range(num_nodes)) + available_nodes.remove(i) + if next_node in available_nodes: + available_nodes.remove(next_node) + + for _ in range(min(num_connections - 1, len(available_nodes))): + random_node = random.choice(available_nodes) + node["connect"].append(f"tank-{random_node:04d}") + available_nodes.remove(random_node) + + nodes.append(node) + + # Add image tag to the first node + nodes[0]["image"] = {"tag": "v0.20.0"} + + network_yaml_data = {"nodes": nodes} + + with open(os.path.join(datadir, "network.yaml"), "w") as f: + yaml.dump(network_yaml_data, f, default_flow_style=False) + + # Generate defaults.yaml + defaults_yaml_content = """ +chain: regtest + +collectLogs: true +metricsExport: true + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "27.0" + +config: | + dns=1 +""" + + with open(os.path.join(datadir, "defaults.yaml"), "w") as f: + f.write(defaults_yaml_content.strip()) + + click.echo(f"Project '{datadir}' has been created with 'network.yaml' and 'defaults.yaml'.") From 939687def61abcb8282267e8ecb59c0a6efb76e0 Mon Sep 17 00:00:00 2001 From: Grant Date: Fri, 23 Aug 2024 10:35:38 -0500 Subject: [PATCH 134/710] update auth command --- src/warnet/main.py | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 7f182ac2f..6b7b52f05 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -136,7 +136,7 @@ def init(): @click.argument("kube_config", type=str) def auth(kube_config: str) -> None: """ - Authorize access to a warnet cluster using a kube config file + Authenticate with a warnet cluster using a kube config file """ try: current_kubeconfig = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) @@ -144,29 +144,50 @@ def auth(kube_config: str) -> None: f"{current_kubeconfig}:{kube_config}" if current_kubeconfig else kube_config ) os.environ["KUBECONFIG"] = combined_kubeconfig - command = "kubectl config view --flatten" - result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True) + with open(kube_config) as file: + content = yaml.safe_load(file) + for elem in content: + print(elem) + cluster = content["clusters"][0] + user = content["users"][0] + user_name = user["name"] + user_token = user["user"]["token"] + context = content["contexts"][0] + flatten_cmd = "kubectl config view --flatten" + result_flatten = subprocess.run(flatten_cmd, shell=True, check=True, capture_output=True, text=True) except subprocess.CalledProcessError as e: print("Error occurred while executing kubectl config view --flatten:") print(e.stderr) sys.exit(1) - if result.returncode == 0: + if result_flatten.returncode == 0: with open(current_kubeconfig, "w") as file: - file.write(result.stdout) + file.write(result_flatten.stdout) print(f"Authorization file written to: {current_kubeconfig}") else: print("Could not create authorization file") - print(result.stderr) - sys.exit(result.returncode) + print(result_flatten.stderr) + sys.exit(result_flatten.returncode) + + try: + update_cmd = f"kubectl config set-credentials {user_name} --token {user_token}" + result_update = subprocess.run(update_cmd, shell=True, check=True, capture_output=True, text=True) + if result_update.returncode != 0: + print("Could not update authorization file") + print(result_flatten.stderr) + sys.exit(result_flatten.returncode) + except subprocess.CalledProcessError as e: + print("Error occurred while executing kubectl config view --flatten:") + print(e.stderr) + sys.exit(1) with open(current_kubeconfig) as file: contents = yaml.safe_load(file) print("\nUse the following command to switch to a new user:") print(" kubectl config use-context [user]\n") print("Available users:") - for context in contents["contexts"]: - print(f" {context['name']}") + for c in contents["contexts"]: + print(f" {c['name']}") if __name__ == "__main__": From c3c9eb22032948b7ffe338042693148a23c8518b Mon Sep 17 00:00:00 2001 From: Grant Date: Fri, 23 Aug 2024 10:38:26 -0500 Subject: [PATCH 135/710] make ruff happy --- src/warnet/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 6b7b52f05..4d27992a8 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -148,11 +148,11 @@ def auth(kube_config: str) -> None: content = yaml.safe_load(file) for elem in content: print(elem) - cluster = content["clusters"][0] + content["clusters"][0] user = content["users"][0] user_name = user["name"] user_token = user["user"]["token"] - context = content["contexts"][0] + content["contexts"][0] flatten_cmd = "kubectl config view --flatten" result_flatten = subprocess.run(flatten_cmd, shell=True, check=True, capture_output=True, text=True) except subprocess.CalledProcessError as e: From a6fc4e14ae269345457bc840dc233f878735a3d0 Mon Sep 17 00:00:00 2001 From: Grant Date: Fri, 23 Aug 2024 10:39:32 -0500 Subject: [PATCH 136/710] make ruff happy --- src/warnet/main.py | 8 ++++++-- src/warnet/network.py | 9 ++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 4d27992a8..f1f1e0c8a 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -154,7 +154,9 @@ def auth(kube_config: str) -> None: user_token = user["user"]["token"] content["contexts"][0] flatten_cmd = "kubectl config view --flatten" - result_flatten = subprocess.run(flatten_cmd, shell=True, check=True, capture_output=True, text=True) + result_flatten = subprocess.run( + flatten_cmd, shell=True, check=True, capture_output=True, text=True + ) except subprocess.CalledProcessError as e: print("Error occurred while executing kubectl config view --flatten:") print(e.stderr) @@ -171,7 +173,9 @@ def auth(kube_config: str) -> None: try: update_cmd = f"kubectl config set-credentials {user_name} --token {user_token}" - result_update = subprocess.run(update_cmd, shell=True, check=True, capture_output=True, text=True) + result_update = subprocess.run( + update_cmd, shell=True, check=True, capture_output=True, text=True + ) if result_update.returncode != 0: print("Could not update authorization file") print(result_flatten.stderr) diff --git a/src/warnet/network.py b/src/warnet/network.py index 0e3d5c692..f3735c6de 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -88,13 +88,20 @@ def should_copy(item: Path) -> bool: print(f"Finished copying files to {target_dir}") + def copy_network_defaults(directory: Path): """Create the project structure for a warnet project's network""" copy_defaults(directory, WAR_NETWORK_DIR, WAR_NETWORK_FILES.joinpath(), []) + def copy_scenario_defaults(directory: Path): """Create the project structure for a warnet project's scenarios""" - copy_defaults(directory, WAR_SCENARIOS_DIR, WAR_SCENARIOS_FILES.joinpath(), ["__init__.py", "__pycache__", "commander.py"]) + copy_defaults( + directory, + WAR_SCENARIOS_DIR, + WAR_SCENARIOS_FILES.joinpath(), + ["__init__.py", "__pycache__", "commander.py"], + ) @network.command() From 3acb613ba8182818159d3f0b9d72259325574d17 Mon Sep 17 00:00:00 2001 From: Grant Date: Fri, 23 Aug 2024 11:13:18 -0500 Subject: [PATCH 137/710] auth: auto-switch current-context and clean up errant items --- src/warnet/main.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index f1f1e0c8a..18bb084e4 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -146,13 +146,10 @@ def auth(kube_config: str) -> None: os.environ["KUBECONFIG"] = combined_kubeconfig with open(kube_config) as file: content = yaml.safe_load(file) - for elem in content: - print(elem) - content["clusters"][0] user = content["users"][0] user_name = user["name"] user_token = user["user"]["token"] - content["contexts"][0] + current_context = content["current-context"] flatten_cmd = "kubectl config view --flatten" result_flatten = subprocess.run( flatten_cmd, shell=True, check=True, capture_output=True, text=True @@ -187,11 +184,14 @@ def auth(kube_config: str) -> None: with open(current_kubeconfig) as file: contents = yaml.safe_load(file) - print("\nUse the following command to switch to a new user:") - print(" kubectl config use-context [user]\n") - print("Available users:") - for c in contents["contexts"]: - print(f" {c['name']}") + + with open(current_kubeconfig, "w") as file: + contents["current-context"] = current_context + yaml.safe_dump(contents, file) + + with open(current_kubeconfig) as file: + contents = yaml.safe_load(file) + print(f"\nWarcli's current context is now set to: {contents['current-context']}") if __name__ == "__main__": From ca5b046e7c665fdcbbe1a005206ce37c8fe61015 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 14:56:22 +0100 Subject: [PATCH 138/710] remove hardcoded test from quickstart --- src/warnet/main.py | 47 ++++++---------------- src/warnet/project.py | 90 ------------------------------------------- 2 files changed, 11 insertions(+), 126 deletions(-) delete mode 100644 src/warnet/project.py diff --git a/src/warnet/main.py b/src/warnet/main.py index 18bb084e4..aad48f2c1 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -204,7 +204,7 @@ def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Pa nodes = [] for i in range(num_nodes): - node = {"name": f"tank-{i:04d}", "connect": []} + node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} # Add round-robin connection next_node = (i + 1) % num_nodes @@ -223,44 +223,19 @@ def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Pa nodes.append(node) - # Add image tag to the first node - nodes[0]["image"] = {"tag": "v0.20.0"} - network_yaml_data = {"nodes": nodes} with open(os.path.join(datadir, "network.yaml"), "w") as f: yaml.dump(network_yaml_data, f, default_flow_style=False) # Generate defaults.yaml - defaults_yaml_content = """ -chain: regtest - -collectLogs: true -metricsExport: true - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -image: - repository: bitcoindevproject/bitcoin - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "27.0" - -config: | - dns=1 -""" - - with open(os.path.join(datadir, "defaults.yaml"), "w") as f: - f.write(defaults_yaml_content.strip()) - - click.echo(f"Project '{datadir}' has been created with 'network.yaml' and 'defaults.yaml'.") + default_yaml_path = files("resources.networks").joinpath("6_node_bitcoin/node-defaults.yaml") + with open(str(default_yaml_path)) as f: + defaults_yaml_content = f.read() + + with open(os.path.join(datadir, "node-defaults.yaml"), "w") as f: + f.write(defaults_yaml_content) + + click.echo( + f"Project '{datadir}' has been created with 'network.yaml' and 'node-defaults.yaml'." + ) diff --git a/src/warnet/project.py b/src/warnet/project.py deleted file mode 100644 index 004924cd3..000000000 --- a/src/warnet/project.py +++ /dev/null @@ -1,90 +0,0 @@ -import os -import random - -import click -import yaml - - -@click.group(name="project") -def project(): - """Manage a new warnet project""" - - -@project.command() -@click.option("--project_name", prompt="Enter the project name", type=str) -@click.option("--num_nodes", prompt="How many nodes?", type=int) -@click.option("--num_connections", prompt="How many connections should each node have?", type=int) -def new(project_name, num_nodes, num_connections): - """ - Create a new project with a graph - """ - - # Create project directory - os.makedirs(project_name, exist_ok=True) - - # Generate network.yaml - nodes = [] - - for i in range(num_nodes): - node = {"name": f"tank-{i:04d}", "connect": []} - - # Add round-robin connection - next_node = (i + 1) % num_nodes - node["connect"].append(f"tank-{next_node:04d}") - - # Add random connections - available_nodes = list(range(num_nodes)) - available_nodes.remove(i) - if next_node in available_nodes: - available_nodes.remove(next_node) - - for _ in range(min(num_connections - 1, len(available_nodes))): - random_node = random.choice(available_nodes) - node["connect"].append(f"tank-{random_node:04d}") - available_nodes.remove(random_node) - - nodes.append(node) - - # Add image tag to the first node - nodes[0]["image"] = {"tag": "v0.20.0"} - - network_yaml_data = {"nodes": nodes} - - with open(os.path.join(project_name, "network.yaml"), "w") as f: - yaml.dump(network_yaml_data, f, default_flow_style=False) - - # Generate defaults.yaml - defaults_yaml_content = """ -chain: regtest - -collectLogs: true -metricsExport: true - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -image: - repository: bitcoindevproject/bitcoin - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "27.0" - -config: | - dns=1 -""" - - with open(os.path.join(project_name, "defaults.yaml"), "w") as f: - f.write(defaults_yaml_content.strip()) - - click.echo( - f"Project '{project_name}' has been created with 'network.yaml' and 'defaults.yaml'." - ) From 734fbb358b61d495b5687f42bcc009cbfe3fe679 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 15:22:25 +0100 Subject: [PATCH 139/710] improve quickstart colours, add defaults --- src/warnet/main.py | 95 ++++++++++++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 32 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index aad48f2c1..3a4df53f9 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -7,7 +7,6 @@ import click import yaml -from rich import print as richprint from .admin import admin from .bitcoin import bitcoin @@ -17,7 +16,7 @@ from .image import image from .network import copy_network_defaults, copy_scenario_defaults from .status import status as status_command -from .util import SUPPORTED_TAGS +from .util import DEFAULT_TAG, SUPPORTED_TAGS QUICK_START_PATH = files("resources.scripts").joinpath("quick_start.sh") @@ -54,62 +53,92 @@ def quickstart(): process.stdout.close() return_code = process.wait() if return_code != 0: - click.echo(f"Quick start script failed with return code {return_code}") - click.echo("Install missing requirements before proceeding") + click.secho( + f"Quick start script failed with return code {return_code}", fg="red", bold=True + ) + click.secho("Install missing requirements before proceeding", fg="yellow") return False - create_project = click.confirm("Do you want to create a new project?", default=True) + create_project = click.confirm( + click.style("\nDo you want to create a new project?", fg="blue", bold=True), + default=True, + ) if not create_project: - click.echo("Setup completed successfully!") + click.secho("\nSetup completed successfully!", fg="green", bold=True) return True default_path = os.path.abspath(os.getcwd()) project_path = click.prompt( - "Enter the project directory path", + click.style("\nEnter the project directory path", fg="blue", bold=True), default=default_path, type=click.Path(file_okay=False, dir_okay=True, resolve_path=True), ) - custom_network = click.confirm("Do you want to create a custom network?", default=True) + custom_network = click.confirm( + click.style("\nDo you want to create a custom network?", fg="blue", bold=True), + default=True, + ) if not custom_network: create_warnet_project(Path(project_path)) - click.echo("Setup completed successfully!") + click.secho("\nSetup completed successfully!", fg="green", bold=True) return True network_name = click.prompt( - "Enter the network name", + click.style("\nEnter the network name", fg="blue", bold=True), type=str, ) - nodes = click.prompt("How many nodes would you like?", type=int) - connections = click.prompt("How many connects would you like each node to have?", type=int) + + nodes = click.prompt( + click.style("\nHow many nodes would you like?", fg="blue", bold=True), + type=int, + default=15, + ) + connections = click.prompt( + click.style( + "\nHow many connections would you like each node to have?", fg="blue", bold=True + ), + type=int, + default=8, + ) version = click.prompt( - "Which version would you like nodes to have by default?", + click.style( + "\nWhich version would you like nodes to be by default?", fg="blue", bold=True + ), type=click.Choice(SUPPORTED_TAGS, case_sensitive=False), + default=DEFAULT_TAG, ) + click.secho("\nCreating project structure...", fg="yellow", bold=True) create_warnet_project(Path(project_path)) - custom_graph(nodes, connections, version, Path(project_path) / "networks" / network_name) - + click.secho("\nGenerating custom network...", fg="yellow", bold=True) + custom_network_path = Path(project_path) / "networks" / network_name + custom_graph(nodes, connections, version, custom_network_path) + click.secho("\nSetup completed successfully!", fg="green", bold=True) + click.echo("\nRun the following command to deploy this network:") + click.echo(f"warcli deploy {custom_network_path}") except Exception as e: - print(f"An error occurred while running the quick start script:\n\n{e}\n\n") - print("Please report this to https://github.com/bitcoin-dev-project/warnet/issues") + click.secho(f"An error occurred while running the quick start script:\n\n{e}\n\n", fg="red") + click.secho( + "Please report this to https://github.com/bitcoin-dev-project/warnet/issues", + fg="yellow", + ) return False def create_warnet_project(directory: Path, check_empty: bool = False): """Common function to create a warnet project""" if check_empty and any(directory.iterdir()): - richprint("[yellow]Warning: Directory is not empty[/yellow]") + click.secho("Warning: Directory is not empty", fg="yellow") if not click.confirm("Do you want to continue?", default=True): return try: copy_network_defaults(directory) copy_scenario_defaults(directory) - richprint(f"[green]Copied network example files to {directory / 'networks'}[/green]") - richprint(f"[green]Created warnet project structure in {directory}[/green]") + click.echo(f"Copied network example files to {directory}/networks") + click.echo(f"Created warnet project structure in {directory}") except Exception as e: - richprint(f"[red]Error creating project: {e}[/red]") + click.secho(f"Error creating project: {e}", fg="red") raise e @@ -120,7 +149,7 @@ def create_warnet_project(directory: Path, check_empty: bool = False): def create(directory: Path): """Create a new warnet project in the specified directory""" if directory.exists(): - richprint(f"[red]Error: Directory {directory} already exists[/red]") + click.secho(f"Error: Directory {directory} already exists", fg="red") return create_warnet_project(directory) @@ -155,17 +184,17 @@ def auth(kube_config: str) -> None: flatten_cmd, shell=True, check=True, capture_output=True, text=True ) except subprocess.CalledProcessError as e: - print("Error occurred while executing kubectl config view --flatten:") - print(e.stderr) + click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") + click.secho(e.stderr, fg="red") sys.exit(1) if result_flatten.returncode == 0: with open(current_kubeconfig, "w") as file: file.write(result_flatten.stdout) - print(f"Authorization file written to: {current_kubeconfig}") + click.secho(f"Authorization file written to: {current_kubeconfig}", fg="green") else: - print("Could not create authorization file") - print(result_flatten.stderr) + click.secho("Could not create authorization file", fg="red") + click.secho(result_flatten.stderr, fg="red") sys.exit(result_flatten.returncode) try: @@ -174,12 +203,12 @@ def auth(kube_config: str) -> None: update_cmd, shell=True, check=True, capture_output=True, text=True ) if result_update.returncode != 0: - print("Could not update authorization file") - print(result_flatten.stderr) + click.secho("Could not update authorization file", fg="red") + click.secho(result_flatten.stderr, fg="red") sys.exit(result_flatten.returncode) except subprocess.CalledProcessError as e: - print("Error occurred while executing kubectl config view --flatten:") - print(e.stderr) + click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") + click.secho(e.stderr, fg="red") sys.exit(1) with open(current_kubeconfig) as file: @@ -191,7 +220,9 @@ def auth(kube_config: str) -> None: with open(current_kubeconfig) as file: contents = yaml.safe_load(file) - print(f"\nWarcli's current context is now set to: {contents['current-context']}") + click.secho( + f"\nWarcli's current context is now set to: {contents['current-context']}", fg="green" + ) if __name__ == "__main__": From b3f7eeb75c64e77e22dba4c228143d4f75ef91c8 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 20:32:20 +0100 Subject: [PATCH 140/710] add liveness check to test_logging.py --- test/logging_test.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test/logging_test.py b/test/logging_test.py index b9604222d..7a8810544 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -57,6 +57,20 @@ def setup_network(self): self.log.info(self.warcli(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() + self.wait_for_endpoint_ready() + + def wait_for_endpoint_ready(self): + self.log.info("Waiting for Grafana to be ready to receive API calls...") + + def check_endpoint(): + try: + response = requests.get("http://localhost:3000/login") + return response.status_code == 200 + except requests.RequestException: + return False + + self.wait_for_predicate(check_endpoint, timeout=120) + self.log.info("Grafana login endpoint returned status code 200") def make_grafana_api_request(self, ds_uid, start, metric): self.log.info("Making Grafana request...") From dcc0b0da2920a7b63f1b9eb9563fe1751c09ac1e Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 20:57:54 +0100 Subject: [PATCH 141/710] give minikube more power, add better logging on failures --- .github/workflows/test.yml | 7 ++++++- test/logging_test.py | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b82474a17..5dca23ea5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -34,7 +34,12 @@ jobs: steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 - - uses: medyagh/setup-minikube@master + - name: start minikube + uses: medyagh/setup-minikube@latest + id: minikube + with: + cpus: max + memory: 4000m - name: Install uv run: | curl -LsSf https://astral.sh/uv/install.sh | sh diff --git a/test/logging_test.py b/test/logging_test.py index 7a8810544..9a4f5bc3f 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -80,7 +80,10 @@ def make_grafana_api_request(self, ds_uid, start, metric): "to": "now", } reply = requests.post("http://localhost:3000/api/ds/query", json=data) - assert reply.status_code == 200 + if reply.status_code != 200: + self.log.error(f"Grafana API request failed with status code {reply.status_code}") + self.log.error(f"Response content: {reply.text}") + return None # Default ref ID is "A", only inspecting one "frame" return reply.json()["results"]["A"]["frames"][0]["data"]["values"] @@ -102,6 +105,9 @@ def test_prometheus_and_grafana(self): def get_five_values_for_metric(metric): data = self.make_grafana_api_request(prometheus_uid, start, metric) + if data is None: + self.log.info(f"Failed to get Grafana data for {metric}") + return False if len(data) < 1: self.log.info(f"No Grafana data yet for {metric}") return False From cea6b6eaa43752d5ed62b198c189c99499a2cd56 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 20:58:22 +0100 Subject: [PATCH 142/710] use uv action with caching --- .github/workflows/test.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5dca23ea5..9bfc35fe1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,14 +15,14 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - run: curl -LsSf https://astral.sh/uv/install.sh | sh + - uses: eifinger/setup-uv@v1 - run: uvx ruff check . ruff-format: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - run: curl -LsSf https://astral.sh/uv/install.sh | sh + - uses: eifinger/setup-uv@v1 - run: uvx ruff format . --check test: @@ -40,9 +40,7 @@ jobs: with: cpus: max memory: 4000m - - name: Install uv - run: | - curl -LsSf https://astral.sh/uv/install.sh | sh + - uses: eifinger/setup-uv@v1 - name: Install project run: uv sync --all-extras --dev - name: Run tests From b995157950a3b403c15dc47c77e4a41936ef92c7 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:44:09 +0100 Subject: [PATCH 143/710] Don't error on bringing down a pod which is not found --- src/warnet/control.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 720ef008c..8e94c9cc5 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -134,7 +134,7 @@ def down(): pods = get_pods() with console.status("[yellow]Cleaning up remaining pods...[/yellow]"): for pod in pods.items: - cmd = f"kubectl delete pod {pod.metadata.name}" + cmd = f"kubectl delete pod --ignore-not-found=true {pod.metadata.name}" if stream_command(cmd): console.print(f"[green]Deleted pod: {pod.metadata.name}[/green]") else: From 8d310267ef0b2bc1800555973cf74333045397d7 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:48:41 +0100 Subject: [PATCH 144/710] hide unimplemented graph command --- src/warnet/graph.py | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/src/warnet/graph.py b/src/warnet/graph.py index d03c4d334..1c91e1db2 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -2,33 +2,13 @@ import click -from .util import DEFAULT_TAG - -@click.group(name="graph") +@click.group(name="graph", hidden=True) def graph(): """Create and validate network graphs""" @graph.command() -@click.argument("number", type=int) -@click.option("--outfile", type=click.Path()) -@click.option("--version", type=str, default=DEFAULT_TAG) -@click.option("--bitcoin_conf", type=click.Path()) -@click.option("--random", is_flag=True) -def create(number: int, outfile: Path, version: str, bitcoin_conf: Path, random: bool = False): - """ - Create a cycle graph with nodes, and include 7 extra random outbounds per node. - Returns XML file as string with or without --outfile option - """ - raise Exception("Not Implemented") - - -@graph.command() -@click.argument("infile", type=click.Path()) -@click.option("--outfile", type=click.Path()) -@click.option("--cb", type=str) -@click.option("--ln_image", type=str) def import_json(infile: Path, outfile: Path, cb: str, ln_image: str): """ Create a cycle graph with nodes imported from lnd `describegraph` JSON file, From 1eb2c763b2ec8248b24c6957f3ab2ad94f2e6e33 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:48:50 +0100 Subject: [PATCH 145/710] run apidocs --- docs/warcli.md | 201 +++++++++++++++++++------------------------------ 1 file changed, 76 insertions(+), 125 deletions(-) diff --git a/docs/warcli.md b/docs/warcli.md index 915c15e8e..ef9ea9460 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -17,17 +17,79 @@ parameters in . ## API Commands -### `warcli help` -Display help information for the given [command] (and sub-command). - If no command is given, display help for the main CLI. +### `warcli auth` +Authenticate with a warnet cluster using a kube config file options: -| name | type | required | default | -|----------|--------|------------|-----------| -| commands | String | | | +| name | type | required | default | +|-------------|--------|------------|-----------| +| kube_config | String | yes | | + +### `warcli create` +Create a new warnet project in the specified directory + +options: +| name | type | required | default | +|-----------|--------|------------|-----------| +| directory | Path | yes | | + +### `warcli deploy` +Deploy a warnet with topology loaded from \ + +options: +| name | type | required | default | +|-----------|--------|------------|-----------| +| directory | Path | yes | | + +### `warcli down` +Bring down a running warnet + + +### `warcli init` +Initialize a warnet project in the current directory + + +### `warcli quickstart` +Setup warnet + + +### `warcli run` +Run a scenario from a file + +options: +| name | type | required | default | +|-----------------|--------|------------|-----------| +| scenario_file | Path | yes | | +| additional_args | String | | | + +### `warcli status` +Display the unified status of the Warnet network and active scenarios -### `warcli setup` -Check Warnet requirements are installed + +### `warcli stop` +Stop a running scenario or all scenarios + +options: +| name | type | required | default | +|---------------|--------|------------|-----------| +| scenario_name | String | | | + +## Admin + +### `warcli admin create` +Create a new warnet project in the specified directory + +options: +| name | type | required | default | +|-----------|--------|------------|-----------| +| directory | Func | yes | | + +### `warcli admin init` +Initialize a warnet project in the current directory + + +### `warcli admin namespaces` +Namespaces commands ## Bitcoin @@ -51,14 +113,14 @@ options: | no_sort | Bool | | False | ### `warcli bitcoin messages` -Fetch messages sent between \ and \ in [network] +Fetch messages sent between \ and \ in [chain] options: -| name | type | required | default | -|---------|--------|------------|-----------| -| tank_a | String | yes | | -| tank_b | String | yes | | -| network | String | | "regtest" | +| name | type | required | default | +|--------|--------|------------|-----------| +| tank_a | String | yes | | +| tank_b | String | yes | | +| chain | String | | "regtest" | ### `warcli bitcoin rpc` Call bitcoin-cli \ [params] on \ @@ -72,40 +134,12 @@ options: ## Graph -### `warcli graph create` -Create a cycle graph with \ nodes, and include 7 extra random outbounds per node. - Returns XML file as string with or without --outfile option - -options: -| name | type | required | default | -|--------------|--------|------------|-----------| -| number | Int | yes | | -| outfile | Path | | | -| version | String | | "27.0" | -| bitcoin_conf | Path | | | -| random | Bool | | False | - ### `warcli graph import-json` Create a cycle graph with nodes imported from lnd `describegraph` JSON file, and additionally include 7 extra random outbounds per node. Include lightning channels and their policies as well. Returns XML file as string with or without --outfile option. -options: -| name | type | required | default | -|----------|--------|------------|-----------| -| infile | Path | yes | | -| outfile | Path | | | -| cb | String | | | -| ln_image | String | | | - -### `warcli graph validate` -Validate a \ against the schema. - -options: -| name | type | required | default | -|--------|--------|------------|-----------| -| graph | Path | yes | | ## Image @@ -124,87 +158,4 @@ options: | arches | String | | | | action | String | | "load" | -## Namespaces - -### `warcli namespaces deploy` -Deploy namespaces with users from a \ - -options: -| name | type | required | default | -|------------|--------|------------|----------------------------| -| namespaces | String | | "two_namespaces_two_users" | - -### `warcli namespaces destroy` -Destroy a specific namespace or all warnet- prefixed namespaces - -options: -| name | type | required | default | -|-------------|--------|------------|-----------| -| destroy_all | Bool | | False | -| namespace | String | | | - -### `warcli namespaces list` -List all namespaces with 'warnet-' prefix - - -## Network - -### `warcli network connected` -Determine if all p2p connections defined in graph are established - - -### `warcli network down` -Bring down a running warnet - - -### `warcli network logs` -Get Kubernetes logs from the RPC server - -options: -| name | type | required | default | -|--------|--------|------------|-----------| -| follow | Bool | | False | - -### `warcli network start` -Start a warnet with topology loaded from \ into [network] - -options: -| name | type | required | default | -|--------------|--------|------------|------------------| -| network_name | String | | "6_node_bitcoin" | -| network | String | | "warnet" | -| logging | Bool | | False | - -### `warcli network status` -Return pod status - - -## Scenarios - -### `warcli scenarios active` -List running scenarios "name": "pid" pairs - - -### `warcli scenarios available` -List available scenarios in the Warnet Test Framework - - -### `warcli scenarios run` -Run \ from the Warnet Test Framework with optional arguments - -options: -| name | type | required | default | -|-----------------|--------|------------|-----------| -| scenario | String | yes | | -| additional_args | String | | | - -### `warcli scenarios run-file` -Run \ from the Warnet Test Framework with optional arguments - -options: -| name | type | required | default | -|-----------------|--------|------------|-----------| -| scenario_path | String | yes | | -| additional_args | String | | | - From 8ba9370905996b3ccd3e653142fadf767c095c43 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:51:03 +0100 Subject: [PATCH 146/710] fix apidocs autocommit --- .github/workflows/apidocs.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/apidocs.yml b/.github/workflows/apidocs.yml index 860f25e61..64625f0bc 100644 --- a/.github/workflows/apidocs.yml +++ b/.github/workflows/apidocs.yml @@ -29,3 +29,7 @@ jobs: run: | source .venv/bin/activate python3 resources/scripts/apidocs.py + + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: Update apidocs on ${{ github.head_ref }} From 52cdb28011eee2be183b7e226bd546a179f7da0e Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:44:39 +0100 Subject: [PATCH 147/710] make entire down pretty with console.status --- src/warnet/control.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 8e94c9cc5..95fc56124 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -112,13 +112,12 @@ def list_active_scenarios(): @click.command() def down(): """Bring down a running warnet""" - console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") - - # Delete warnet-logging namespace - if delete_namespace("warnet-logging"): - console.print("[green]Warnet logging deleted[/green]") - else: - console.print("[red]Warnet logging NOT deleted[/red]") + with console.status("[bold yellow]Bringing down the warnet...[/bold yellow]"): + # Delete warnet-logging namespace + if delete_namespace("warnet-logging"): + console.print("[green]Warnet logging deleted[/green]") + else: + console.print("[red]Warnet logging NOT deleted[/red]") # Uninstall tanks tanks = get_mission("tank") From e577442cca4a23067604ab0aa8757e2997cc4f7f Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:45:01 +0100 Subject: [PATCH 148/710] have stream_command raise exception --- src/warnet/process.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/warnet/process.py b/src/warnet/process.py index 2cd4129f5..fd5b2a04b 100644 --- a/src/warnet/process.py +++ b/src/warnet/process.py @@ -10,7 +10,7 @@ def run_command(command: str) -> str: return result.stdout -def stream_command(command: str, env=None) -> bool: +def stream_command(command: str) -> bool: process = subprocess.Popen( ["/bin/bash", "-c", command], stdout=subprocess.PIPE, @@ -27,6 +27,5 @@ def stream_command(command: str, env=None) -> bool: return_code = process.wait() if return_code != 0: - print(f"Command failed with return code {return_code}") - return False + raise Exception(process.stderr) return True From b4e8693e1a15a7edef46be35db1f1be458a06203 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Sun, 25 Aug 2024 18:24:33 -0400 Subject: [PATCH 149/710] clean up tests --- .github/workflows/test.yml | 7 ++- test/build_branch_test.py | 57 -------------------- test/data/build_v24_test.graphml | 28 ---------- test/data/permutations.graphml | 93 -------------------------------- test/data/services.graphml | 0 test/graph_test.py | 0 test/onion_test.py | 71 ------------------------ 7 files changed, 5 insertions(+), 251 deletions(-) delete mode 100755 test/build_branch_test.py delete mode 100644 test/data/build_v24_test.graphml delete mode 100644 test/data/permutations.graphml delete mode 100644 test/data/services.graphml delete mode 100644 test/graph_test.py delete mode 100755 test/onion_test.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9bfc35fe1..f47d94b49 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -29,8 +29,11 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - # test: [scenarios_test.py, rpc_test.py, graph_test.py, ln_test.py, dag_connection_test.py, logging_test.py] - test: [scenarios_test.py, rpc_test.py, dag_connection_test.py, logging_test.py] + test: + - dag_connection_test.py + - logging_test.py + - rpc_test.py + - scenarios_test.py steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/test/build_branch_test.py b/test/build_branch_test.py deleted file mode 100755 index bbce564ce..000000000 --- a/test/build_branch_test.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 - -import json -import os -from pathlib import Path - -from test_base import TestBase - - -class BuildBranchTest(TestBase): - def __init__(self): - super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "build_v24_test.graphml" - - def run_test(self): - self.start_server() - try: - self.setup_network() - self.wait_for_p2p_connections() - self.check_build_flags() - finally: - self.stop_server() - - def setup_network(self): - self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) - self.wait_for_all_tanks_status(target="running", timeout=10 * 60) - self.wait_for_all_edges() - - def wait_for_p2p_connections(self): - self.log.info("Waiting for P2P connections") - self.wait_for_predicate(self.check_peers, timeout=5 * 60) - - def check_peers(self): - info0 = json.loads(self.warcli("bitcoin rpc 0 getpeerinfo")) - info1 = json.loads(self.warcli("bitcoin rpc 1 getpeerinfo")) - self.log.debug( - f"Waiting for both nodes to get one peer: node0: {len(info0)}, node1: {len(info1)}" - ) - return len(info0) == 1 and len(info1) == 1 - - def check_build_flags(self): - self.log.info("Checking build flags") - release_help = self.get_tank(0).exec("bitcoind -h") - build_help = self.get_tank(1).exec("bitcoind -h") - - assert "zmqpubhashblock" in release_help, "zmqpubhashblock not found in release help" - assert ( - "zmqpubhashblock" not in build_help - ), "zmqpubhashblock found in build help, but it shouldn't be" - - self.log.info("Build flags check passed") - - -if __name__ == "__main__": - test = BuildBranchTest() - test.run_test() diff --git a/test/data/build_v24_test.graphml b/test/data/build_v24_test.graphml deleted file mode 100644 index d55c3611c..000000000 --- a/test/data/build_v24_test.graphml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - - - - - - - - - - - - - - 27.0 - - - bitcoin/bitcoin#24.x - --disable-zmq - - - - diff --git a/test/data/permutations.graphml b/test/data/permutations.graphml deleted file mode 100644 index 0c4686f61..000000000 --- a/test/data/permutations.graphml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - - - - - - - - - - - - - - 27.0 - - - - False - False - - - bitcoindevproject/bitcoin:26.0 - - - - False - False - - - 27.0 - - - - False - False - - - 27.0 - - - - False - False - - - 27.0 - - - - False - False - - - 27.0 - - - - False - False - - - 27.0 - - - - False - False - - - 27.0 - - - - False - False - - - - - - - - - - - - \ No newline at end of file diff --git a/test/data/services.graphml b/test/data/services.graphml deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/graph_test.py b/test/graph_test.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/onion_test.py b/test/onion_test.py deleted file mode 100755 index 7f7454b60..000000000 --- a/test/onion_test.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 - -import json -import os -from pathlib import Path - -from test_base import TestBase - - -class OnionTest(TestBase): - def __init__(self): - super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "12_node_ring.graphml" - self.onion_addr = None - - def run_test(self): - self.start_server() - try: - self.setup_network() - self.test_reachability() - self.test_onion_peer_connection() - finally: - self.stop_server() - - def setup_network(self): - self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) - self.wait_for_all_tanks_status(target="running") - self.wait_for_all_edges() - - def test_reachability(self): - self.log.info("Checking IPv4 and onion reachability") - self.wait_for_predicate(self.check_reachability, timeout=10 * 60) - - def check_reachability(self): - try: - info = json.loads(self.warcli("bitcoin rpc 0 getnetworkinfo")) - for net in info["networks"]: - if net["name"] == "ipv4" and not net["reachable"]: - return False - if net["name"] == "onion" and not net["reachable"]: - return False - if len(info["localaddresses"]) != 2: - return False - for addr in info["localaddresses"]: - assert "100." in addr["address"] or ".onion" in addr["address"] - if ".onion" in addr["address"]: - self.onion_addr = addr["address"] - return True - except Exception as e: - self.log.error(f"Error checking reachability: {e}") - return False - - def test_onion_peer_connection(self): - self.log.info("Attempting addnode to onion peer") - self.warcli(f"bitcoin rpc 1 addnode {self.onion_addr} add") - # Might take up to 10 minutes - self.wait_for_predicate(self.check_onion_peer, timeout=10 * 60) - - def check_onion_peer(self): - peers = json.loads(self.warcli("bitcoin rpc 0 getpeerinfo")) - for peer in peers: - self.log.debug(f"Checking peer: {peer['network']} {peer['addr']}") - if peer["network"] == "onion": - return True - return False - - -if __name__ == "__main__": - test = OnionTest() - test.run_test() From c0224db10b967cc51432bae81177267fb294a95b Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Sun, 25 Aug 2024 19:55:50 -0400 Subject: [PATCH 150/710] charts: only use conf file sections prior to v0.17 --- .github/workflows/test.yml | 1 + .../charts/bitcoincore/templates/_helpers.tpl | 12 ++++ .../bitcoincore/templates/configmap.yaml | 3 + resources/charts/bitcoincore/values.yaml | 3 - test/conf_test.py | 58 +++++++++++++++++ test/data/bitcoin_conf/network.yaml | 64 +++++++++++++++++++ test/data/bitcoin_conf/node-defaults.yaml | 4 ++ 7 files changed, 142 insertions(+), 3 deletions(-) create mode 100755 test/conf_test.py create mode 100644 test/data/bitcoin_conf/network.yaml create mode 100644 test/data/bitcoin_conf/node-defaults.yaml diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f47d94b49..a84bfbab2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,6 +30,7 @@ jobs: strategy: matrix: test: + - conf_test.py - dag_connection_test.py - logging_test.py - rpc_test.py diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index 25622c4e8..f22ca65e4 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -55,3 +55,15 @@ Create the name of the service account to use {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} + + +{{/* +Add network section heading in bitcoin.conf after v0.17.0 +*/}} +{{- define "bitcoincore.check_semver.regtest" -}} +{{- $tag := .Values.image.tag | trimPrefix "v" -}} +{{- $version := semverCompare ">=0.17.0" $tag -}} +{{- if $version -}} +[regtest] +{{- end -}} +{{- end -}} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index ea21616f0..37952ff48 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -7,6 +7,9 @@ metadata: data: bitcoin.conf: | {{- if eq .Values.chain "regtest" }} + regtest=1 + + {{ template "bitcoincore.check_semver.regtest" . }} {{- tpl .Values.regtestConfig . | nindent 4 }} {{- end }} {{- .Values.baseConfig | nindent 4 }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index aac305ec6..275e11cd3 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -111,9 +111,6 @@ metricsExport: false prometheusMetricsPort: 9332 regtestConfig: | - regtest=1 - - [regtest] rpcuser=user rpcpassword=password rpcport=18443 diff --git a/test/conf_test.py b/test/conf_test.py new file mode 100755 index 000000000..97b9b0fcc --- /dev/null +++ b/test/conf_test.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import json +import os +import re +from pathlib import Path + +from test_base import TestBase + +from warnet.k8s import get_mission + + +class ConfTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "bitcoin_conf" + + def run_test(self): + try: + self.setup_network() + self.check_uacomment() + finally: + self.stop_server() + + def setup_network(self): + self.log.info("Setting up network") + self.log.info(self.warcli(f"deploy {self.network_dir}")) + self.wait_for_all_tanks_status(target="running") + + def check_uacomment(self): + tanks = get_mission("tank") + + def get_uacomment(): + for tank in tanks[::-1]: + try: + name = tank.metadata.name + info = json.loads(self.warcli(f"bitcoin rpc {name} getnetworkinfo")) + subver = info["subversion"] + + # Regex pattern to match the uacomment inside parentheses + # e.g. /Satoshi:27.0.0(tank-0027)/ + pattern = r"\(([^)]+)\)" + match = re.search(pattern, subver) + if match: + uacomment = match.group(1) + assert uacomment == name + else: + return False + except Exception: + return False + return True + + self.wait_for_predicate(get_uacomment) + + +if __name__ == "__main__": + test = ConfTest() + test.run_test() diff --git a/test/data/bitcoin_conf/network.yaml b/test/data/bitcoin_conf/network.yaml new file mode 100644 index 000000000..0ceb8b059 --- /dev/null +++ b/test/data/bitcoin_conf/network.yaml @@ -0,0 +1,64 @@ +nodes: + - name: tank-0016 + image: + tag: "v0.16.1" + connect: + - tank-0017 + config: + uacomment=tank-0016 + - name: tank-0017 + image: + tag: "v0.17.0" + connect: + - tank-0019 + config: + uacomment=tank-0017 + - name: tank-0019 + image: + tag: "v0.19.2" + connect: + - tank-0020 + config: + uacomment=tank-0019 + - name: tank-0020 + image: + tag: "v0.20.0" + connect: + - tank-0021 + config: + uacomment=tank-0020 + - name: tank-0021 + image: + tag: "v0.21.1" + connect: + - tank-0024 + config: + uacomment=tank-0021 + - name: tank-0024 + image: + tag: "24.2" + connect: + - tank-0025 + config: + uacomment=tank-0024 + - name: tank-0025 + image: + tag: "25.1" + connect: + - tank-0026 + config: + uacomment=tank-0025 + - name: tank-0026 + image: + tag: "26.0" + connect: + - tank-0027 + config: + uacomment=tank-0026 + - name: tank-0027 + image: + tag: "27.0" + connect: + - tank-0016 + config: + uacomment=tank-0027 \ No newline at end of file diff --git a/test/data/bitcoin_conf/node-defaults.yaml b/test/data/bitcoin_conf/node-defaults.yaml new file mode 100644 index 000000000..7e021cad1 --- /dev/null +++ b/test/data/bitcoin_conf/node-defaults.yaml @@ -0,0 +1,4 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" From 0ef609d3022dcc1d1f8c896d374b5ea692759fc4 Mon Sep 17 00:00:00 2001 From: Grant Date: Sat, 24 Aug 2024 11:09:46 -0500 Subject: [PATCH 151/710] do try..except in grep-logs Adding try..excepts to prevent the user from seeing stack traces --- src/warnet/bitcoin.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index 9b9d2676c..80b1fa005 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -1,9 +1,11 @@ import os import re +import sys from datetime import datetime from io import BytesIO import click +from urllib3.exceptions import MaxRetryError from test_framework.messages import ser_uint256 from test_framework.p2p import MESSAGEMAP @@ -55,7 +57,11 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): Grep combined bitcoind logs using regex """ - tanks = get_mission("tank") + try: + tanks = get_mission("tank") + except MaxRetryError as e: + print(f"{e}") + sys.exit(1) matching_logs = [] From 4b81ae2e9643a07ddb8531852d12833fcb819a23 Mon Sep 17 00:00:00 2001 From: Grant Date: Sat, 24 Aug 2024 11:10:15 -0500 Subject: [PATCH 152/710] do try..except on rpc --- src/warnet/bitcoin.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index 80b1fa005..a82d04f56 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -27,7 +27,12 @@ def rpc(tank: str, method: str, params: str): """ Call bitcoin-cli [params] on """ - print(_rpc(tank, method, params)) + try: + result = _rpc(tank, method, params) + except Exception as e: + print(f"{e}") + sys.exit(1) + print(result) def _rpc(tank: str, method: str, params: str): From b6f1619199359249248d85f6809816d14f67aacc Mon Sep 17 00:00:00 2001 From: Grant Date: Sat, 24 Aug 2024 11:10:27 -0500 Subject: [PATCH 153/710] do try..except on debug_log --- src/warnet/bitcoin.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index a82d04f56..d466eef1d 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -50,7 +50,10 @@ def debug_log(tank: str): Fetch the Bitcoin Core debug log from """ cmd = f"kubectl logs {tank}" - print(run_command(cmd)) + try: + print(run_command(cmd)) + except Exception as e: + print(f"{e}") @bitcoin.command() From c67154a7b1f46cf2b2e743affc489ad97597b132 Mon Sep 17 00:00:00 2001 From: Grant Date: Sat, 24 Aug 2024 11:10:51 -0500 Subject: [PATCH 154/710] do try..except on get_pods --- src/warnet/k8s.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index e963d7c93..421ed77cd 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -24,7 +24,11 @@ def get_dynamic_client() -> DynamicClient: def get_pods() -> V1PodList: sclient = get_static_client() - return sclient.list_namespaced_pod(get_default_namespace()) + try: + pod_list: V1PodList = sclient.list_namespaced_pod(get_default_namespace()) + except Exception as e: + raise e + return pod_list def get_mission(mission: str) -> list[V1PodList]: From 87ea1ffb826435fd8888cbbaa19c2655f426a0cc Mon Sep 17 00:00:00 2001 From: josibake Date: Mon, 19 Aug 2024 15:50:35 +0200 Subject: [PATCH 155/710] tune gcc to be less resource hungry fix OOM on docker desktop when building custom images fixes #441 --- resources/images/bitcoin/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/images/bitcoin/Dockerfile b/resources/images/bitcoin/Dockerfile index 2e4a7d1e2..6efba5335 100644 --- a/resources/images/bitcoin/Dockerfile +++ b/resources/images/bitcoin/Dockerfile @@ -43,7 +43,7 @@ RUN set -ex \ && ./autogen.sh \ && ./configure \ LDFLAGS=-L`ls -d /opt/db*`/lib/ \ - CPPFLAGS=-I`ls -d /opt/db*`/include/ \ + CPPFLAGS="-g0 -I`ls -d /opt/db*`/include/ --param ggc-min-expand=1 --param ggc-min-heapsize=32768" \ --prefix=${BITCOIN_PREFIX} \ ${BUILD_ARGS} \ && make -j$(nproc) \ From 8e8d7d1510e3464ea5ab00420f68208a1a68ee27 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 25 Aug 2024 21:12:42 +0100 Subject: [PATCH 156/710] Don't restart bitcoin pods by default --- resources/charts/bitcoincore/templates/pod.yaml | 1 + resources/charts/bitcoincore/values.yaml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 082ae7961..7135dd893 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -14,6 +14,7 @@ metadata: annotations: init_peers: "{{ .Values.connect | len }}" spec: + restartPolicy: "{{ .Values.restartPolicy }}" {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 4 }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index aac305ec6..92e1692d5 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -3,6 +3,8 @@ # Declare variables to be passed into your templates. namespace: warnet +restartPolicy: Never + image: repository: bitcoindevproject/bitcoin pullPolicy: IfNotPresent From 7c15e840ce1bf7c914598969412abdafdf085e2a Mon Sep 17 00:00:00 2001 From: Grant Date: Mon, 26 Aug 2024 12:07:41 -0500 Subject: [PATCH 157/710] add inquirer --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 69e2e2b93..f194ee359 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,7 @@ dependencies = [ "click==8.1.7", "docker==7.1.0", "flask==3.0.3", + "inquirer==3.4.0", "kubernetes==30.1.0", "rich==13.7.1", "tabulate==0.9.0", From 332413b094719534feb2e3386b39aec21b510739 Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 27 Aug 2024 15:24:26 -0500 Subject: [PATCH 158/710] fix `warcli down` namespace --- src/warnet/control.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 95fc56124..c9a9b9295 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -123,7 +123,7 @@ def down(): tanks = get_mission("tank") with console.status("[yellow]Uninstalling tanks...[/yellow]"): for tank in tanks: - cmd = f"helm uninstall {tank.metadata.name}" + cmd = f"helm uninstall {tank.metadata.name} --namespace {get_default_namespace()}" if stream_command(cmd): console.print(f"[green]Uninstalled tank: {tank.metadata.name}[/green]") else: From 900920e0f0b4a46e599a7d877cd9e12da88491d4 Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 27 Aug 2024 15:40:47 -0500 Subject: [PATCH 159/710] add namespace to scenario deletion --- src/warnet/control.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index c9a9b9295..61ac1e534 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -133,7 +133,7 @@ def down(): pods = get_pods() with console.status("[yellow]Cleaning up remaining pods...[/yellow]"): for pod in pods.items: - cmd = f"kubectl delete pod --ignore-not-found=true {pod.metadata.name}" + cmd = f"kubectl delete pod --ignore-not-found=true {pod.metadata.name} -n {get_default_namespace()}" if stream_command(cmd): console.print(f"[green]Deleted pod: {pod.metadata.name}[/green]") else: From 35fc9ed8cf12c78bd724ee2cacfab98c05167c8a Mon Sep 17 00:00:00 2001 From: Grant Date: Mon, 26 Aug 2024 12:07:59 -0500 Subject: [PATCH 160/710] add `logs` to warcli The idea is to lift kubectl's `logs` command up into warcli. Since we will have a many hard-to-type pod names, we can use inquirer to check with the user about which pod they would like to query. ensure pod_name has values handle zero pods condition add --follow flag add try..except to `warcli logs` add log logic --- src/warnet/main.py | 53 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/warnet/main.py b/src/warnet/main.py index 3a4df53f9..b8b44c534 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -1,3 +1,4 @@ +import json import os import random import subprocess @@ -6,7 +7,12 @@ from pathlib import Path import click +import inquirer import yaml +from inquirer.themes import GreenPassion + +from warnet.k8s import get_default_namespace +from warnet.process import run_command, stream_command from .admin import admin from .bitcoin import bitcoin @@ -225,6 +231,53 @@ def auth(kube_config: str) -> None: ) +@cli.command() +@click.argument("pod_name", type=str, default="") +@click.option("--follow", "-f", is_flag=True, default=False, help="Follow logs") +def logs(pod_name: str, follow: bool): + """Show the logs of a pod""" + follow_flag = "--follow" if follow else "" + namespace = get_default_namespace() + + if pod_name: + try: + command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" + stream_command(command) + return + except Exception as e: + print(f"Could not find the pod {pod_name}: {e}") + + try: + pods = run_command(f"kubectl get pods -n {namespace} -o json") + pods = json.loads(pods) + pod_list = [item["metadata"]["name"] for item in pods["items"]] + except Exception as e: + print(f"Could not fetch any pods in namespace {namespace}: {e}") + return + + if not pod_list: + print(f"Could not fetch any pods in namespace {namespace}") + return + + q = [ + inquirer.List( + name="pod", + message="Please choose a pod", + choices=pod_list, + ) + ] + selected = inquirer.prompt(q, theme=GreenPassion()) + if selected: + pod_name = selected["pod"] + try: + command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" + stream_command(command) + except Exception as e: + print(f"Please consider waiting for the pod to become available. Encountered: {e}") + else: + pass # cancelled by user + + if __name__ == "__main__": cli() From 576f67f9dffce2ce882dd8e64139ef63b7eea4b8 Mon Sep 17 00:00:00 2001 From: Grant Date: Mon, 26 Aug 2024 20:25:03 -0500 Subject: [PATCH 161/710] perform `uv lock` --- uv.lock | 177 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 91 insertions(+), 86 deletions(-) diff --git a/uv.lock b/uv.lock index 3df666887..586994544 100644 --- a/uv.lock +++ b/uv.lock @@ -2,12 +2,12 @@ version = 1 requires-python = ">=3.11" [[package]] -name = "attrs" -version = "24.2.0" +name = "ansicon" +version = "1.89.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fc/0f/aafca9af9315aee06a89ffde799a10a582fe8de76c563ee80bbcdc08b3fb/attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346", size = 792678 } +sdist = { url = "https://files.pythonhosted.org/packages/b6/e2/1c866404ddbd280efedff4a9f15abfe943cb83cde6e895022370f3a61f85/ansicon-1.89.0.tar.gz", hash = "sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1", size = 67312 } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/21/5b6702a7f963e95456c0de2d495f67bf5fd62840ac655dc451586d23d39a/attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2", size = 63001 }, + { url = "https://files.pythonhosted.org/packages/75/f9/f1c10e223c7b56a38109a3f2eb4e7fe9a757ea3ed3a166754fb30f65e466/ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec", size = 63675 }, ] [[package]] @@ -19,6 +19,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b9/fa/123043af240e49752f1c4bd24da5053b6bd00cad78c2be53c0d1e8b975bc/backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34", size = 30181 }, ] +[[package]] +name = "blessed" +version = "1.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinxed", marker = "platform_system == 'Windows'" }, + { name = "six" }, + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/ae/92e9968ad23205389ec6bd82e2d4fca3817f1cdef34e10aa8d529ef8b1d7/blessed-1.20.0.tar.gz", hash = "sha256:2cdd67f8746e048f00df47a2880f4d6acbcdb399031b604e34ba8f71d5787680", size = 6655612 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/98/584f211c3a4bb38f2871fa937ee0cc83c130de50c955d6c7e2334dbf4acb/blessed-1.20.0-py2.py3-none-any.whl", hash = "sha256:0c542922586a265e699188e52d5f5ac5ec0dd517e5a1041d90d2bbf23f906058", size = 58372 }, +] + [[package]] name = "blinker" version = "1.8.2" @@ -217,6 +231,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, ] +[[package]] +name = "editor" +version = "1.6.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "runs" }, + { name = "xmod" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/92/734a4ab345914259cb6146fd36512608ea42be16195375c379046f33283d/editor-1.6.6.tar.gz", hash = "sha256:bb6989e872638cd119db9a4fce284cd8e13c553886a1c044c6b8d8a160c871f8", size = 3197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/c2/4bc8cd09b14e28ce3f406a8b05761bed0d785d1ca8c2a5c6684d884c66a2/editor-1.6.6-py3-none-any.whl", hash = "sha256:e818e6913f26c2a81eadef503a2741d7cca7f235d20e217274a009ecd5a74abf", size = 4017 }, +] + [[package]] name = "flask" version = "3.0.3" @@ -268,6 +295,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269 }, ] +[[package]] +name = "inquirer" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blessed" }, + { name = "editor" }, + { name = "readchar" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/06/ef91eb8f3feafb736aa33dcb278fc9555d17861aa571b684715d095db24d/inquirer-3.4.0.tar.gz", hash = "sha256:8edc99c076386ee2d2204e5e3653c2488244e82cb197b2d498b3c1b5ffb25d0b", size = 14472 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/b2/be907c8c0f8303bc4b10089f5470014c3bf3521e9b8d3decf3037fd94725/inquirer-3.4.0-py3-none-any.whl", hash = "sha256:bb0ec93c833e4ce7b51b98b1644b0a4d2bb39755c39787f6a504e4fee7a11b60", size = 18077 }, +] + [[package]] name = "itsdangerous" version = "2.2.0" @@ -335,30 +376,15 @@ wheels = [ ] [[package]] -name = "jsonschema" -version = "4.23.0" +name = "jinxed" +version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "attrs" }, - { name = "jsonschema-specifications" }, - { name = "referencing" }, - { name = "rpds-py" }, + { name = "ansicon", marker = "platform_system == 'Windows'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778 } +sdist = { url = "https://files.pythonhosted.org/packages/20/d0/59b2b80e7a52d255f9e0ad040d2e826342d05580c4b1d7d7747cfb8db731/jinxed-1.3.0.tar.gz", hash = "sha256:1593124b18a41b7a3da3b078471442e51dbad3d77b4d4f2b0c26ab6f7d660dbf", size = 80981 } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462 }, -] - -[[package]] -name = "jsonschema-specifications" -version = "2023.12.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "referencing" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f8/b9/cc0cc592e7c195fb8a650c1d5990b10175cf13b4c97465c72ec841de9e4b/jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc", size = 13983 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ee/07/44bd408781594c4d0a027666ef27fab1e441b109dc3b76b4f836f8fd04fe/jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c", size = 18482 }, + { url = "https://files.pythonhosted.org/packages/27/e3/0e0014d6ab159d48189e92044ace13b1e1fe9aa3024ba9f4e8cf172aa7c2/jinxed-1.3.0-py2.py3-none-any.whl", hash = "sha256:b993189f39dc2d7504d802152671535b06d380b26d78070559551cbf92df4fc5", size = 33085 }, ] [[package]] @@ -625,6 +651,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, ] +[[package]] +name = "readchar" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/31/2934981710c63afa9c58947d2e676093ce4bb6c7ce60aac2fcc4be7d98d0/readchar-4.2.0.tar.gz", hash = "sha256:44807cbbe377b72079fea6cba8aa91c809982d7d727b2f0dbb2d1a8084914faa", size = 9691 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/6f/ca076ad4d18b3d33c31c304fb7e68dd9ce2bfdb49fb8874611ad7c55e969/readchar-4.2.0-py3-none-any.whl", hash = "sha256:2a587a27c981e6d25a518730ad4c88c429c315439baa6fda55d7a8b3ac4cb62a", size = 9349 }, +] + [[package]] name = "readme-renderer" version = "44.0" @@ -639,19 +674,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310 }, ] -[[package]] -name = "referencing" -version = "0.35.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "attrs" }, - { name = "rpds-py" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/99/5b/73ca1f8e72fff6fa52119dbd185f73a907b1989428917b24cff660129b6d/referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c", size = 62991 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/59/2056f61236782a2c86b33906c025d4f4a0b17be0161b63b70fd9e8775d36/referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de", size = 26684 }, -] - [[package]] name = "requests" version = "2.32.3" @@ -714,53 +736,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/87/67/a37f6214d0e9fe57f6ae54b2956d550ca8365857f42a1ce0392bb21d9410/rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222", size = 240681 }, ] -[[package]] -name = "rpds-py" -version = "0.20.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/55/64/b693f262791b818880d17268f3f8181ef799b0d187f6f731b1772e05a29a/rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121", size = 25814 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ab/2a/191374c52d7be0b056cc2a04d718d2244c152f915d4a8d2db2aacc526189/rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489", size = 318369 }, - { url = "https://files.pythonhosted.org/packages/0e/6a/2c9fdcc6d235ac0d61ec4fd9981184689c3e682abd05e3caa49bccb9c298/rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318", size = 311303 }, - { url = "https://files.pythonhosted.org/packages/d2/b2/725487d29633f64ef8f9cbf4729111a0b61702c8f8e94db1653930f52cce/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db", size = 366424 }, - { url = "https://files.pythonhosted.org/packages/7a/8c/668195ab9226d01b7cf7cd9e59c1c0be1df05d602df7ec0cf46f857dcf59/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5", size = 368359 }, - { url = "https://files.pythonhosted.org/packages/52/28/356f6a39c1adeb02cf3e5dd526f5e8e54e17899bef045397abcfbf50dffa/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5", size = 394886 }, - { url = "https://files.pythonhosted.org/packages/a2/65/640fb1a89080a8fb6f4bebd3dafb65a2edba82e2e44c33e6eb0f3e7956f1/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6", size = 432416 }, - { url = "https://files.pythonhosted.org/packages/a7/e8/85835077b782555d6b3416874b702ea6ebd7db1f145283c9252968670dd5/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209", size = 354819 }, - { url = "https://files.pythonhosted.org/packages/4f/87/1ac631e923d65cbf36fbcfc6eaa702a169496de1311e54be142f178e53ee/rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3", size = 373282 }, - { url = "https://files.pythonhosted.org/packages/e4/ce/cb316f7970189e217b998191c7cf0da2ede3d5437932c86a7210dc1e9994/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272", size = 541540 }, - { url = "https://files.pythonhosted.org/packages/90/d7/4112d7655ec8aff168ecc91d4ceb51c557336edde7e6ccf6463691a2f253/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad", size = 547640 }, - { url = "https://files.pythonhosted.org/packages/ab/44/4f61d64dfed98cc71623f3a7fcb612df636a208b4b2c6611eaa985e130a9/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58", size = 525555 }, - { url = "https://files.pythonhosted.org/packages/35/f2/a862d81eacb21f340d584cd1c749c289979f9a60e9229f78bffc0418a199/rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0", size = 199338 }, - { url = "https://files.pythonhosted.org/packages/cc/ec/77d0674f9af4872919f3738018558dd9d37ad3f7ad792d062eadd4af7cba/rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c", size = 213585 }, - { url = "https://files.pythonhosted.org/packages/89/b7/f9682c5cc37fcc035f4a0fc33c1fe92ec9cbfdee0cdfd071cf948f53e0df/rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6", size = 321468 }, - { url = "https://files.pythonhosted.org/packages/b8/ad/fc82be4eaceb8d444cb6fc1956ce972b3a0795104279de05e0e4131d0a47/rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b", size = 313062 }, - { url = "https://files.pythonhosted.org/packages/0e/1c/6039e80b13a08569a304dc13476dc986352dca4598e909384db043b4e2bb/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739", size = 370168 }, - { url = "https://files.pythonhosted.org/packages/dc/c9/5b9aa35acfb58946b4b785bc8e700ac313669e02fb100f3efa6176a83e81/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c", size = 371376 }, - { url = "https://files.pythonhosted.org/packages/7b/dd/0e0dbeb70d8a5357d2814764d467ded98d81d90d3570de4fb05ec7224f6b/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee", size = 397200 }, - { url = "https://files.pythonhosted.org/packages/e4/da/a47d931eb688ccfd77a7389e45935c79c41e8098d984d87335004baccb1d/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96", size = 426824 }, - { url = "https://files.pythonhosted.org/packages/0f/f7/a59a673594e6c2ff2dbc44b00fd4ecdec2fc399bb6a7bd82d612699a0121/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4", size = 357967 }, - { url = "https://files.pythonhosted.org/packages/5f/61/3ba1905396b2cb7088f9503a460b87da33452da54d478cb9241f6ad16d00/rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef", size = 378905 }, - { url = "https://files.pythonhosted.org/packages/08/31/6d0df9356b4edb0a3a077f1ef714e25ad21f9f5382fc490c2383691885ea/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821", size = 546348 }, - { url = "https://files.pythonhosted.org/packages/ae/15/d33c021de5cb793101df9961c3c746dfc476953dbbf5db337d8010dffd4e/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940", size = 553152 }, - { url = "https://files.pythonhosted.org/packages/70/2d/5536d28c507a4679179ab15aa0049440e4d3dd6752050fa0843ed11e9354/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174", size = 528807 }, - { url = "https://files.pythonhosted.org/packages/e3/62/7ebe6ec0d3dd6130921f8cffb7e34afb7f71b3819aa0446a24c5e81245ec/rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139", size = 200993 }, - { url = "https://files.pythonhosted.org/packages/ec/2f/b938864d66b86a6e4acadefdc56de75ef56f7cafdfd568a6464605457bd5/rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585", size = 214458 }, - { url = "https://files.pythonhosted.org/packages/99/32/43b919a0a423c270a838ac2726b1c7168b946f2563fd99a51aaa9692d00f/rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29", size = 321465 }, - { url = "https://files.pythonhosted.org/packages/58/a9/c4d899cb28e9e47b0ff12462e8f827381f243176036f17bef9c1604667f2/rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91", size = 312900 }, - { url = "https://files.pythonhosted.org/packages/8f/90/9e51670575b5dfaa8c823369ef7d943087bfb73d4f124a99ad6ef19a2b26/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24", size = 370973 }, - { url = "https://files.pythonhosted.org/packages/fc/c1/523f2a03f853fc0d4c1acbef161747e9ab7df0a8abf6236106e333540921/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7", size = 370890 }, - { url = "https://files.pythonhosted.org/packages/51/ca/2458a771f16b0931de4d384decbe43016710bc948036c8f4562d6e063437/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9", size = 397174 }, - { url = "https://files.pythonhosted.org/packages/00/7d/6e06807f6305ea2408b364efb0eef83a6e21b5e7b5267ad6b473b9a7e416/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8", size = 426449 }, - { url = "https://files.pythonhosted.org/packages/8c/d1/6c9e65260a819a1714510a7d69ac1d68aa23ee9ce8a2d9da12187263c8fc/rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879", size = 357698 }, - { url = "https://files.pythonhosted.org/packages/5d/fb/ecea8b5286d2f03eec922be7173a03ed17278944f7c124348f535116db15/rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f", size = 378530 }, - { url = "https://files.pythonhosted.org/packages/e3/e3/ac72f858957f52a109c588589b73bd2fad4a0fc82387fb55fb34aeb0f9cd/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c", size = 545753 }, - { url = "https://files.pythonhosted.org/packages/b2/a4/a27683b519d5fc98e4390a3b130117d80fd475c67aeda8aac83c0e8e326a/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2", size = 552443 }, - { url = "https://files.pythonhosted.org/packages/a1/ed/c074d248409b4432b1ccb2056974175fa0af2d1bc1f9c21121f80a358fa3/rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57", size = 528380 }, - { url = "https://files.pythonhosted.org/packages/d5/bd/04caf938895d2d78201e89c0c8a94dfd9990c34a19ff52fb01d0912343e3/rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a", size = 200540 }, - { url = "https://files.pythonhosted.org/packages/95/cc/109eb8b9863680411ae703664abacaa035820c7755acc9686d5dd02cdd2e/rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2", size = 214111 }, -] - [[package]] name = "rsa" version = "4.9" @@ -773,6 +748,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/49/97/fa78e3d2f65c02c8e1268b9aba606569fe97f6c8f7c2d74394553347c145/rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", size = 34315 }, ] +[[package]] +name = "runs" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "xmod" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/6d/b9aace390f62db5d7d2c77eafce3d42774f27f1829d24fa9b6f598b3ef71/runs-1.2.2.tar.gz", hash = "sha256:9dc1815e2895cfb3a48317b173b9f1eac9ba5549b36a847b5cc60c3bf82ecef1", size = 5474 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/d6/17caf2e4af1dec288477a0cbbe4a96fbc9b8a28457dce3f1f452630ce216/runs-1.2.2-py3-none-any.whl", hash = "sha256:0980dcbc25aba1505f307ac4f0e9e92cbd0be2a15a1e983ee86c24c87b839dfd", size = 7033 }, +] + [[package]] name = "secretstorage" version = "3.3.3" @@ -841,7 +828,7 @@ dependencies = [ { name = "click" }, { name = "docker" }, { name = "flask" }, - { name = "jsonschema" }, + { name = "inquirer" }, { name = "kubernetes" }, { name = "pyyaml" }, { name = "rich" }, @@ -860,7 +847,7 @@ requires-dist = [ { name = "click", specifier = "==8.1.7" }, { name = "docker", specifier = "==7.1.0" }, { name = "flask", specifier = "==3.0.3" }, - { name = "jsonschema", specifier = "==4.23.0" }, + { name = "inquirer", specifier = "==3.4.0" }, { name = "kubernetes", specifier = "==30.1.0" }, { name = "pyyaml", specifier = "==6.0.2" }, { name = "rich", specifier = "==13.7.1" }, @@ -868,6 +855,15 @@ requires-dist = [ { name = "twine", marker = "extra == 'build'" }, ] +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] + [[package]] name = "websocket-client" version = "1.8.0" @@ -889,6 +885,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4b/84/997bbf7c2bf2dc3f09565c6d0b4959fefe5355c18c4096cfd26d83e0785b/werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c", size = 227554 }, ] +[[package]] +name = "xmod" +version = "1.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/b2/e3edc608823348e628a919e1d7129e641997afadd946febdd704aecc5881/xmod-1.8.1.tar.gz", hash = "sha256:38c76486b9d672c546d57d8035df0beb7f4a9b088bc3fb2de5431ae821444377", size = 3988 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/6b/0dc75b64a764ea1cb8e4c32d1fb273c147304d4e5483cd58be482dc62e45/xmod-1.8.1-py3-none-any.whl", hash = "sha256:a24e9458a4853489042522bdca9e50ee2eac5ab75c809a91150a8a7f40670d48", size = 4610 }, +] + [[package]] name = "zipp" version = "3.20.0" From 05da76daf3a74d51676fe46c744b18061a73a7a6 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 26 Aug 2024 22:23:37 +0100 Subject: [PATCH 162/710] rename warcli cmd to warnet --- docs/connecting-local-nodes.md | 10 ++-- docs/lightning.md | 12 ++--- docs/logging_monitoring.md | 10 ++-- docs/quickrun.md | 14 +++--- docs/running.md | 10 ++-- docs/scenarios.md | 16 +++---- docs/warcli.md | 46 +++++++++---------- pyproject.toml | 2 +- resources/charts/namespaces/values.yaml | 2 +- .../namespace-defaults.yaml | 2 +- resources/scripts/apidocs.py | 2 +- resources/scripts/setup_minikube.sh | 4 +- src/warnet/main.py | 4 +- test/conf_test.py | 4 +- test/dag_connection_test.py | 4 +- test/ln_test.py | 32 ++++++------- test/logging_test.py | 6 +-- test/rpc_test.py | 22 ++++----- test/scenarios_test.py | 10 ++-- test/test_base.py | 8 ++-- 20 files changed, 110 insertions(+), 110 deletions(-) diff --git a/docs/connecting-local-nodes.md b/docs/connecting-local-nodes.md index 50455e704..96407b3f2 100644 --- a/docs/connecting-local-nodes.md +++ b/docs/connecting-local-nodes.md @@ -7,8 +7,8 @@ ### Run Warnet network ```shell -warcli cluster deploy -warcli network start +warnet cluster deploy +warnet network start ``` ### Install Telepresence @@ -71,9 +71,9 @@ telepresence intercept local-bitcoind --port 18444 -- bitcoind --regtest --datad ### Connect to local bitcoind from cluster ```shell -warcli bitcoin rpc 0 addnode "local-bitcoind:18444" "onetry" +warnet bitcoin rpc 0 addnode "local-bitcoind:18444" "onetry" # Check that the local node was added -warcli bitcoin rpc 0 getpeerinfo +warnet bitcoin rpc 0 getpeerinfo ``` ### Disconnect and remove Telepresence @@ -85,4 +85,4 @@ telepresence quit -s telepresent helm uninstall # Remove Telepresence from your computer sudo rm /usr/local/bin/telepresence -``` \ No newline at end of file +``` diff --git a/docs/lightning.md b/docs/lightning.md index 2ae634851..72404bc4c 100644 --- a/docs/lightning.md +++ b/docs/lightning.md @@ -42,22 +42,22 @@ data directory: [ln.graphml](../test/data/ln.graphml) ## Running the Lightning network -When warnet is started with `warcli network start` the bitcoin containers will +When warnet is started with `warnet network start` the bitcoin containers will be started first followed by the lightning node containers. It may require a few automatic restarts before the lightning nodes start up and connect to their -corresponding bitcoin nodes. Use `warcli network status` to monitor container status +corresponding bitcoin nodes. Use `warnet network status` to monitor container status and wait for all containers to be `running`. To create the lightning channels specified in the graph file, run the included scenario: -`warcli scenarios run ln_init` +`warnet scenarios run ln_init` This [scenario](../src/scenarios/ln_init.py) will generate blocks, fund the wallets in the bitcoin nodes, and open the channels from the graph. Each of these steps requires some waiting as transactions are confirmed in the warnet blockchain and lightning nodes gossip their channel announcements to each other. -Use `warcli scenarios active` to monitor the status of the scenario. When it is +Use `warnet scenarios active` to monitor the status of the scenario. When it is complete the subprocess will exit and it will indicate `Active: False`. At that point, the lightning network is ready for activity. @@ -66,7 +66,7 @@ point, the lightning network is ready for activity. Warnet can export data required to run [sim-ln](https://github.com/bitcoin-dev-project/sim-ln) with a warnet network. -With a network running, execute: `warcli network export` with optional argument +With a network running, execute: `warnet network export` with optional argument `--network=` (default is "warnet"). This will copy all lightning node credentials like SSL certificates and macaroons into a local directory as well as generate a JSON file required by sim-ln. @@ -74,7 +74,7 @@ well as generate a JSON file required by sim-ln. Example (see sim-ln docs for exact API): ``` -$ warcli network export +$ warnet network export /Users/bitcoin-dev-project/.warnet/warnet/warnet/simln $ ls /Users/bitcoin-dev-project/.warnet/warnet/warnet/simln diff --git a/docs/logging_monitoring.md b/docs/logging_monitoring.md index c205806aa..a0142c642 100644 --- a/docs/logging_monitoring.md +++ b/docs/logging_monitoring.md @@ -14,7 +14,7 @@ Examples of information provided: - what scenarios are running - warnet RPC requests -Commands: `warcli network logs` or `warcli network logs --follow`. +Commands: `warnet network logs` or `warnet network logs --follow`. See more details in [warcli](/docs/warcli.md#warcli-network-logs) @@ -25,7 +25,7 @@ These are tank level or pod level log output from a Bitcoin Core node, useful fo Example: ```sh -$ warcli bitcoin debug-log 0 +$ warnet bitcoin debug-log 0 2023-10-11T17:54:39.616974Z Bitcoin Core version v25.0.0 (release build) @@ -38,12 +38,12 @@ For logs of lightning nodes, kubectl is required. ### Aggregated logs from all nodes -Aggregated logs can be searched using `warcli bitcoin grep-logs` with regex patterns. +Aggregated logs can be searched using `warnet bitcoin grep-logs` with regex patterns. Example: ```sh -$ warcli bitcoin grep-logs 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d +$ warnet bitcoin grep-logs 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.716582Z [miner] AddToWallet 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d newupdate warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.717787Z [miner] Submitting wtx 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d to mempool for relay @@ -77,7 +77,7 @@ It might take a couple minutes to get the pod running. If you see `error: unable The Grafana dashboard (and API) will be accessible without requiring authentication at `http://localhost:3000`. -The `install_logging` script will need to be installed before starting the network in order to collect the information for monitoring and metrics. Restart the network with `warcli network down && warcli network up` if necessary. +The `install_logging` script will need to be installed before starting the network in order to collect the information for monitoring and metrics. Restart the network with `warnet network down && warnet network up` if necessary. ### Prometheus diff --git a/docs/quickrun.md b/docs/quickrun.md index 321e7f92b..2bb4197f5 100644 --- a/docs/quickrun.md +++ b/docs/quickrun.md @@ -29,36 +29,36 @@ pip install -e . ## Running > [!TIP] -> When developing locally add the `--dev` flag to `warcli cluster deploy` to enable dev mode with hot-reloading server. +> When developing locally add the `--dev` flag to `warnet cluster deploy` to enable dev mode with hot-reloading server. ### Using minikube To run a local cluster using minikube: ```bash -warcli cluster setup-minikube +warnet cluster setup-minikube -warcli cluster deploy +warnet cluster deploy ``` ### Other cluster types -If not using minikube (e.g. using Docker Desktop or a managed cluster), `warcli` commands will operate natively on the current Kubernetes context, so you can simply run: +If not using minikube (e.g. using Docker Desktop or a managed cluster), `warnet` commands will operate natively on the current Kubernetes context, so you can simply run: ```bash -warcli cluster deploy +warnet cluster deploy ``` ...to deploy warnet to your cluster. -`warcli cluster deploy` also automatically configures port forwarding to the Server in the cluster. +`warnet cluster deploy` also automatically configures port forwarding to the Server in the cluster. ## Stopping To tear down the cluster: ```bash -warcli cluster teardown +warnet cluster teardown ``` ## Log location diff --git a/docs/running.md b/docs/running.md index 04339a329..9d947fe01 100644 --- a/docs/running.md +++ b/docs/running.md @@ -8,31 +8,31 @@ See more details in [warcli](/docs/warcli.md), examples: To start the server run: ```bash -warcli cluster deploy +warnet cluster deploy ``` Start a network from a graph file: ```bash -warcli network start resources/graphs/default.graphml +warnet network start resources/graphs/default.graphml ``` Make sure all tanks are running with: ```bash -warcli network status +warnet network status ``` Check if the edges of the graph (bitcoin p2p connections) are complete: ```bash -warcli network connected +warnet network connected ``` _Optional_ Check out the logs with: ```bash -warcli network logs -f +warnet network logs -f ``` If that looks all good, give [scenarios](/docs/scenarios.md) a try. diff --git a/docs/scenarios.md b/docs/scenarios.md index acdc470b2..1cd57c2c3 100644 --- a/docs/scenarios.md +++ b/docs/scenarios.md @@ -10,16 +10,16 @@ See [`src/warnet/scenarios`](../src/warnet/scenarios) for examples of how these To see available scenarios (loaded from the default directory): ```bash -warcli scenarios available +warnet scenarios available ``` -Once a scenario is selected it can be run with `warcli scenarios run [--network=warnet] [scenario_params]`. +Once a scenario is selected it can be run with `warnet scenarios run [--network=warnet] [scenario_params]`. The [`miner_std`](../src/warnet/scenarios/miner_std.py) scenario is a good one to start with as it automates block generation: ```bash # Have all nodes generate a block 5 seconds apart in a round-robin -warcli scenarios run miner_std --allnodes --interval=5 +warnet scenarios run miner_std --allnodes --interval=5 ``` This will run the scenario in a background thread on the server until it exits or is stopped by the user. @@ -27,20 +27,20 @@ This will run the scenario in a background thread on the server until it exits o Active scenarios can be listed and terminated by PID: ```bash -$ warcli scenarios available +$ warnet scenarios available miner_std Generate blocks over time. Options: [--allnodes | --interval= | --mature] sens_relay Send a transaction using sensitive relay tx_flood Generate 100 blocks with 100 TXs each -$ warcli scenarios run tx_flood +$ warnet scenarios run tx_flood Running scenario tx_flood with PID 14683 in the background... -$ warcli scenarios active +$ warnet scenarios active ┃ Active ┃ Cmd ┃ Network ┃ Pid ┃ Return_code ┃ ┡━━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━━━┩ │ True │ tx_flood │ warnet │ 14683 │ None ┃ -$ warcli scenarios stop 14683 +$ warnet scenarios stop 14683 Stopped scenario with PID 14683. ``` @@ -50,5 +50,5 @@ You can write your own scenario file locally and upload it to the server with the [run-file](/docs/warcli.md#warcli-scenarios-run-file) command (example): ```bash -warcli scenarios run-file /home/me/bitcoin_attack.py +warnet scenarios run-file /home/me/bitcoin_attack.py ``` diff --git a/docs/warcli.md b/docs/warcli.md index ef9ea9460..eed82fdf3 100644 --- a/docs/warcli.md +++ b/docs/warcli.md @@ -1,23 +1,23 @@ -# `warcli` +# `warnet` The command-line interface tool for Warnet. -Once `warnet` is running it can be interacted with using the cli tool `warcli`. +Once `warnet` is running it can be interacted with using the cli tool `warnet`. -Most `warcli` commands accept a `--network` option, which allows you to specify +Most `warnet` commands accept a `--network` option, which allows you to specify the network you want to control. This is set by default to `--network="warnet"` to simplify default operation. -Execute `warcli --help` or `warcli help` to see a list of command categories. +Execute `warnet --help` or `warnet help` to see a list of command categories. Help text is provided, with optional parameters in [square brackets] and required parameters in . -`warcli` commands are organized in a hierarchy of categories and subcommands. +`warnet` commands are organized in a hierarchy of categories and subcommands. ## API Commands -### `warcli auth` +### `warnet auth` Authenticate with a warnet cluster using a kube config file options: @@ -25,7 +25,7 @@ options: |-------------|--------|------------|-----------| | kube_config | String | yes | | -### `warcli create` +### `warnet create` Create a new warnet project in the specified directory options: @@ -33,7 +33,7 @@ options: |-----------|--------|------------|-----------| | directory | Path | yes | | -### `warcli deploy` +### `warnet deploy` Deploy a warnet with topology loaded from \ options: @@ -41,19 +41,19 @@ options: |-----------|--------|------------|-----------| | directory | Path | yes | | -### `warcli down` +### `warnet down` Bring down a running warnet -### `warcli init` +### `warnet init` Initialize a warnet project in the current directory -### `warcli quickstart` +### `warnet quickstart` Setup warnet -### `warcli run` +### `warnet run` Run a scenario from a file options: @@ -62,11 +62,11 @@ options: | scenario_file | Path | yes | | | additional_args | String | | | -### `warcli status` +### `warnet status` Display the unified status of the Warnet network and active scenarios -### `warcli stop` +### `warnet stop` Stop a running scenario or all scenarios options: @@ -76,7 +76,7 @@ options: ## Admin -### `warcli admin create` +### `warnet admin create` Create a new warnet project in the specified directory options: @@ -84,17 +84,17 @@ options: |-----------|--------|------------|-----------| | directory | Func | yes | | -### `warcli admin init` +### `warnet admin init` Initialize a warnet project in the current directory -### `warcli admin namespaces` +### `warnet admin namespaces` Namespaces commands ## Bitcoin -### `warcli bitcoin debug-log` +### `warnet bitcoin debug-log` Fetch the Bitcoin Core debug log from \ options: @@ -102,7 +102,7 @@ options: |--------|--------|------------|-----------| | tank | String | yes | | -### `warcli bitcoin grep-logs` +### `warnet bitcoin grep-logs` Grep combined bitcoind logs using regex \ options: @@ -112,7 +112,7 @@ options: | show_k8s_timestamps | Bool | | False | | no_sort | Bool | | False | -### `warcli bitcoin messages` +### `warnet bitcoin messages` Fetch messages sent between \ and \ in [chain] options: @@ -122,7 +122,7 @@ options: | tank_b | String | yes | | | chain | String | | "regtest" | -### `warcli bitcoin rpc` +### `warnet bitcoin rpc` Call bitcoin-cli \ [params] on \ options: @@ -134,7 +134,7 @@ options: ## Graph -### `warcli graph import-json` +### `warnet graph import-json` Create a cycle graph with nodes imported from lnd `describegraph` JSON file, and additionally include 7 extra random outbounds per node. Include lightning channels and their policies as well. @@ -143,7 +143,7 @@ Create a cycle graph with nodes imported from lnd `describegraph` JSON file, ## Image -### `warcli image build` +### `warnet image build` Build bitcoind and bitcoin-cli from \ at \ as \:\. Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. diff --git a/pyproject.toml b/pyproject.toml index f194ee359..ef8390fbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ ] [project.scripts] -warcli = "warnet.main:cli" +warnet = "warnet.main:cli" [project.urls] Homepage = "https://warnet.dev" diff --git a/resources/charts/namespaces/values.yaml b/resources/charts/namespaces/values.yaml index a09f671dd..c28d2d0df 100644 --- a/resources/charts/namespaces/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -1,5 +1,5 @@ users: - - name: warcli-user + - name: warnet-user roles: - pod-viewer - pod-manager diff --git a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml index a09f671dd..c28d2d0df 100644 --- a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml @@ -1,5 +1,5 @@ users: - - name: warcli-user + - name: warnet-user roles: - pod-viewer - pod-manager diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index 42607de66..ef7e00902 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -16,7 +16,7 @@ def print_cmd(cmd, super=""): global doc - doc += f"### `warcli{super} {cmd['name']}`" + "\n" + doc += f"### `warnet{super} {cmd['name']}`" + "\n" doc += cmd["help"].strip().replace("<", "\\<") + "\n" if len(cmd["params"]) > 1: doc += "\noptions:\n" diff --git a/resources/scripts/setup_minikube.sh b/resources/scripts/setup_minikube.sh index 7f1cd234e..f835eed7c 100755 --- a/resources/scripts/setup_minikube.sh +++ b/resources/scripts/setup_minikube.sh @@ -116,6 +116,6 @@ eval "$MINIKUBE_CMD" echo print_message "" "Next, run the following command to deploy warnet" "" -print_message "" " warcli cluster deploy" "$BOLD" -print_partial_message " After that, run " "warcli network start" " to start the network." "$BOLD" +print_message "" " warnet cluster deploy" "$BOLD" +print_partial_message " After that, run " "warnet network start" " to start the network." "$BOLD" diff --git a/src/warnet/main.py b/src/warnet/main.py index b8b44c534..02e6f3930 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -121,7 +121,7 @@ def quickstart(): custom_graph(nodes, connections, version, custom_network_path) click.secho("\nSetup completed successfully!", fg="green", bold=True) click.echo("\nRun the following command to deploy this network:") - click.echo(f"warcli deploy {custom_network_path}") + click.echo(f"warnet deploy {custom_network_path}") except Exception as e: click.secho(f"An error occurred while running the quick start script:\n\n{e}\n\n", fg="red") click.secho( @@ -227,7 +227,7 @@ def auth(kube_config: str) -> None: with open(current_kubeconfig) as file: contents = yaml.safe_load(file) click.secho( - f"\nWarcli's current context is now set to: {contents['current-context']}", fg="green" + f"\nwarnet's current context is now set to: {contents['current-context']}", fg="green" ) diff --git a/test/conf_test.py b/test/conf_test.py index 97b9b0fcc..c6495ada6 100755 --- a/test/conf_test.py +++ b/test/conf_test.py @@ -24,7 +24,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"deploy {self.network_dir}")) + self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") def check_uacomment(self): @@ -34,7 +34,7 @@ def get_uacomment(): for tank in tanks[::-1]: try: name = tank.metadata.name - info = json.loads(self.warcli(f"bitcoin rpc {name} getnetworkinfo")) + info = json.loads(self.warnet(f"bitcoin rpc {name} getnetworkinfo")) subver = info["subversion"] # Regex pattern to match the uacomment inside parentheses diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 1827b5e7a..195ae0e7e 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -20,13 +20,13 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"deploy {self.network_dir}")) + self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() def run_connect_dag_scenario(self): self.log.info("Running connect_dag scenario") - self.warcli("run test/data/scenario_connect_dag.py") + self.warnet("run test/data/scenario_connect_dag.py") self.wait_for_all_scenarios() diff --git a/test/ln_test.py b/test/ln_test.py index 3534dab27..22a86d1ba 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -28,7 +28,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"network start {self.graph_file_path}")) + self.log.info(self.warnet(f"network start {self.graph_file_path}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() @@ -41,8 +41,8 @@ def get_cb_forwards(self, index): def run_ln_init_scenario(self): self.log.info("Running LN Init scenario") - self.warcli("bitcoin rpc 0 getblockcount") - self.warcli("scenarios run ln_init") + self.warnet("bitcoin rpc 0 getblockcount") + self.warnet("scenarios run ln_init") self.wait_for_all_scenarios() scenario_return_code = self.get_scenario_return_code("ln_init") if scenario_return_code != 0: @@ -50,9 +50,9 @@ def run_ln_init_scenario(self): def test_channel_policies(self): self.log.info("Ensuring node-level channel policy settings") - node2pub, node2host = json.loads(self.warcli("ln rpc 2 getinfo"))["uris"][0].split("@") - chan_id = json.loads(self.warcli("ln rpc 2 listchannels"))["channels"][0]["chan_id"] - chan = json.loads(self.warcli(f"ln rpc 2 getchaninfo {chan_id}")) + node2pub, node2host = json.loads(self.warnet("ln rpc 2 getinfo"))["uris"][0].split("@") + chan_id = json.loads(self.warnet("ln rpc 2 listchannels"))["channels"][0]["chan_id"] + chan = json.loads(self.warnet(f"ln rpc 2 getchaninfo {chan_id}")) # node_1 or node_2 is tank 2 with its non-default --bitcoin.timelockdelta=33 if chan["node1_policy"]["time_lock_delta"] != 33: @@ -65,15 +65,15 @@ def test_channel_policies(self): def test_ln_payment_0_to_2(self): self.log.info("Test LN payment from 0 -> 2") - inv = json.loads(self.warcli("ln rpc 2 addinvoice --amt=2000"))["payment_request"] + inv = json.loads(self.warnet("ln rpc 2 addinvoice --amt=2000"))["payment_request"] self.log.info(f"Got invoice from node 2: {inv}") self.log.info("Paying invoice from node 0...") - self.log.info(self.warcli(f"ln rpc 0 payinvoice -f {inv}")) + self.log.info(self.warnet(f"ln rpc 0 payinvoice -f {inv}")) self.wait_for_predicate(self.check_invoice_settled) self.log.info("Ensuring channel-level channel policy settings: source") - payment = json.loads(self.warcli("ln rpc 0 listpayments"))["payments"][0] + payment = json.loads(self.warnet("ln rpc 0 listpayments"))["payments"][0] assert ( payment["fee_msat"] == "5506" ), f"Expected fee_msat to be 5506, got {payment['fee_msat']}" @@ -83,26 +83,26 @@ def test_ln_payment_0_to_2(self): def test_ln_payment_2_to_0(self): self.log.info("Test LN payment from 2 -> 0") - inv = json.loads(self.warcli("ln rpc 0 addinvoice --amt=1000"))["payment_request"] + inv = json.loads(self.warnet("ln rpc 0 addinvoice --amt=1000"))["payment_request"] self.log.info(f"Got invoice from node 0: {inv}") self.log.info("Paying invoice from node 2...") - self.log.info(self.warcli(f"ln rpc 2 payinvoice -f {inv}")) + self.log.info(self.warnet(f"ln rpc 2 payinvoice -f {inv}")) self.wait_for_predicate(lambda: self.check_invoices(0) == 1) self.log.info("Ensuring channel-level channel policy settings: target") - payment = json.loads(self.warcli("ln rpc 2 listpayments"))["payments"][0] + payment = json.loads(self.warnet("ln rpc 2 listpayments"))["payments"][0] assert ( payment["fee_msat"] == "2213" ), f"Expected fee_msat to be 2213, got {payment['fee_msat']}" def test_simln(self): self.log.info("Engaging simln") - node2pub, _ = json.loads(self.warcli("ln rpc 2 getinfo"))["uris"][0].split("@") + node2pub, _ = json.loads(self.warnet("ln rpc 2 getinfo"))["uris"][0].split("@") activity = [ {"source": "ln-0", "destination": node2pub, "interval_secs": 1, "amount_msat": 2000} ] - self.warcli( + self.warnet( f"network export --exclude=[1] --activity={json.dumps(activity).replace(' ', '')}" ) self.wait_for_predicate(lambda: self.check_invoices(2) > 1) @@ -110,14 +110,14 @@ def test_simln(self): assert self.check_invoices(1) == 0, "Expected no invoices for node 1" def check_invoice_settled(self): - invs = json.loads(self.warcli("ln rpc 2 listinvoices"))["invoices"] + invs = json.loads(self.warnet("ln rpc 2 listinvoices"))["invoices"] if len(invs) > 0 and invs[0]["state"] == "SETTLED": self.log.info("Invoice settled") return True return False def check_invoices(self, index): - invs = json.loads(self.warcli(f"ln rpc {index} listinvoices"))["invoices"] + invs = json.loads(self.warnet(f"ln rpc {index} listinvoices"))["invoices"] settled = sum(1 for inv in invs if inv["state"] == "SETTLED") self.log.debug(f"Node {index} has {settled} settled invoices") return settled diff --git a/test/logging_test.py b/test/logging_test.py index 9a4f5bc3f..5f547fb53 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -54,7 +54,7 @@ def start_logging(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"deploy {self.network_dir}")) + self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() self.wait_for_endpoint_ready() @@ -93,8 +93,8 @@ def test_prometheus_and_grafana(self): miner_file = "resources/scenarios/miner_std.py" tx_flood_file = "resources/scenarios/tx_flood.py" - self.warcli(f"run {miner_file} --allnodes --interval=5 --mature") - self.warcli(f"run {tx_flood_file} --interval=1") + self.warnet(f"run {miner_file} --allnodes --interval=5 --mature") + self.warnet(f"run {tx_flood_file} --interval=1") prometheus_ds = requests.get("http://localhost:3000/api/datasources/name/Prometheus") assert prometheus_ds.status_code == 200 diff --git a/test/rpc_test.py b/test/rpc_test.py index 2a2c4d7fb..9cc0ee336 100755 --- a/test/rpc_test.py +++ b/test/rpc_test.py @@ -24,40 +24,40 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"deploy {self.network_dir}")) + self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() def test_rpc_commands(self): self.log.info("Testing basic RPC commands") - self.warcli("bitcoin rpc tank-0000 getblockcount") - self.warcli("bitcoin rpc tank-0001 createwallet miner") - self.warcli("bitcoin rpc tank-0001 -generate 101") - self.wait_for_predicate(lambda: "101" in self.warcli("bitcoin rpc tank-0000 getblockcount")) + self.warnet("bitcoin rpc tank-0000 getblockcount") + self.warnet("bitcoin rpc tank-0001 createwallet miner") + self.warnet("bitcoin rpc tank-0001 -generate 101") + self.wait_for_predicate(lambda: "101" in self.warnet("bitcoin rpc tank-0000 getblockcount")) def test_transaction_propagation(self): self.log.info("Testing transaction propagation") address = "bcrt1qthmht0k2qnh3wy7336z05lu2km7emzfpm3wg46" - txid = self.warcli(f"bitcoin rpc tank-0001 sendtoaddress {address} 0.1") - self.wait_for_predicate(lambda: txid in self.warcli("bitcoin rpc tank-0000 getrawmempool")) + txid = self.warnet(f"bitcoin rpc tank-0001 sendtoaddress {address} 0.1") + self.wait_for_predicate(lambda: txid in self.warnet("bitcoin rpc tank-0000 getrawmempool")) - node_log = self.warcli("bitcoin debug-log tank-0001") + node_log = self.warnet("bitcoin debug-log tank-0001") assert txid in node_log, "Transaction ID not found in node log" - all_logs = self.warcli(f"bitcoin grep-logs {txid}") + all_logs = self.warnet(f"bitcoin grep-logs {txid}") count = all_logs.count("Enqueuing TransactionAddedToMempool") assert count > 1, f"Transaction not propagated to enough nodes (count: {count})" def test_message_exchange(self): self.log.info("Testing message exchange between nodes") - msgs = self.warcli("bitcoin messages tank-0000 tank-0001") + msgs = self.warnet("bitcoin messages tank-0000 tank-0001") assert "verack" in msgs, "VERACK message not found in exchange" def test_address_manager(self): self.log.info("Testing address manager") def got_addrs(): - addrman = json.loads(self.warcli("bitcoin rpc tank-0000 getrawaddrman")) + addrman = json.loads(self.warnet("bitcoin rpc tank-0000 getrawaddrman")) for key in ["tried", "new"]: obj = addrman[key] keys = list(obj.keys()) diff --git a/test/scenarios_test.py b/test/scenarios_test.py index bebf4b3da..30578f9b1 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -24,7 +24,7 @@ def run_test(self): def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warcli(f"deploy {self.network_dir}")) + self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() @@ -47,20 +47,20 @@ def check_scenario_clean_exit(): return active[0]["status"] == "succeeded" self.log.info(f"Running scenario from: {scenario_file}") - self.warcli(f"run {scenario_file}") + self.warnet(f"run {scenario_file}") self.wait_for_predicate(lambda: check_scenario_clean_exit()) def run_and_check_miner_scenario_from_file(self): scenario_file = "resources/scenarios/miner_std.py" self.log.info(f"Running scenario from file: {scenario_file}") - self.warcli(f"run {scenario_file} --allnodes --interval=1") - start = int(self.warcli("bitcoin rpc tank-0000 getblockcount")) + self.warnet(f"run {scenario_file} --allnodes --interval=1") + start = int(self.warnet("bitcoin rpc tank-0000 getblockcount")) self.wait_for_predicate(lambda: self.scenario_running("commander-minerstd")) self.wait_for_predicate(lambda: self.check_blocks(2, start=start)) self.stop_scenario() def check_blocks(self, target_blocks, start: int = 0): - count = int(self.warcli("bitcoin rpc tank-0000 getblockcount")) + count = int(self.warnet("bitcoin rpc tank-0000 getblockcount")) self.log.debug(f"Current block count: {count}, target: {start + target_blocks}") try: diff --git a/test/test_base.py b/test/test_base.py index 80d384d4e..9326cc9a7 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -46,7 +46,7 @@ def cleanup(self, signum=None, frame=None): try: self.log.info("Stopping network") if self.network: - self.warcli("down") + self.warnet("down") self.wait_for_all_tanks_status(target="stopped", timeout=60, interval=1) except Exception as e: self.log.error(f"Error bringing network down: {e}") @@ -66,9 +66,9 @@ def assert_log_msgs(self): ), f"Log assertion failed. Expected message not found: {self.log_expected_msgs}" self.log_msg_assertions_passed = False - def warcli(self, cmd): - self.log.debug(f"Executing warcli command: {cmd}") - command = ["warcli"] + cmd.split() + def warnet(self, cmd): + self.log.debug(f"Executing warnet command: {cmd}") + command = ["warnet"] + cmd.split() proc = run(command, capture_output=True) if proc.stderr: raise Exception(proc.stderr.decode().strip()) From 322ed31d932824f05e36e3af018b66d58fd5332b Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 26 Aug 2024 22:53:22 +0100 Subject: [PATCH 163/710] update docs to reflect new usage --- README.md | 2 +- docs/developer-notes.md | 21 ++------ docs/install.md | 5 ++ docs/logging_monitoring.md | 4 +- docs/quickrun.md | 35 ++----------- docs/running.md | 2 +- docs/scaling.md | 3 ++ docs/scenarios.md | 95 +++++++++++++++++++++-------------- docs/{warcli.md => warnet.md} | 6 +-- resources/scripts/apidocs.py | 2 +- 10 files changed, 77 insertions(+), 98 deletions(-) rename docs/{warcli.md => warnet.md} (94%) diff --git a/README.md b/README.md index ff5b7ebb6..07955f494 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Quick Run](/docs/quickrun.md) - [Running Warnet](/docs/running.md) - [Network Topology](/docs/graph.md) -- [CLI Commands](/docs/warcli.md) +- [CLI Commands](/docs/warnet.md) - [Scenarios](/docs/scenarios.md) - [Monitoring](/docs/logging_monitoring.md) - [Lightning Network](/docs/lightning.md) diff --git a/docs/developer-notes.md b/docs/developer-notes.md index 47e22e812..0c51bc8b3 100644 --- a/docs/developer-notes.md +++ b/docs/developer-notes.md @@ -1,21 +1,8 @@ # Developer notes -## Kubernetes +This project primarily uses the `uv` python packaging tool: https://docs.astral.sh/uv/ along with the sister formatter/linter `ruff` https://docs.astral.sh/ruff/ -Kubernetes is running the RPC server as a `statefulSet` which is pulled from a -container image on a registry. This means that (local) changes to the RPC -server are **not** reflected on the RPC server when running in Kubernetes, -unless you **also** push an updated image to a registry and update the -Kubernetes config files. +With `uv` installed you can add/remove dependencies using `uv add ` or `uv remove . +This will update the [`uv.lock`](https://docs.astral.sh/uv/guides/projects/#uvlock) file automatically. -To help with this a helper script is provided: [build-k8s-rpc.sh](../scripts/build-k8s-rpc.sh). - -This script can be run in the following way: - -```bash -DOCKER_REGISTRY=bitcoindevproject/warnet-rpc TAG=0.1 ./scripts/build-k8s-rpc.sh Dockerfile_prod -``` - -You can optionally specify `LATEST=1` to also include the `latest` tag on docker hub. - -Once a new image has been pushed, it should be referenced in manifests/warnet-rpc-statefulset.yaml in the `image` field. +`uv` can also run tools (like `ruff`) without external installation, simply run `uvx ruff check .` or `uvx ruff format .` to use a uv-managed format/lint on the project. diff --git a/docs/install.md b/docs/install.md index e04d58267..e6c4bcdfd 100644 --- a/docs/install.md +++ b/docs/install.md @@ -46,6 +46,11 @@ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin If you have never used Docker before you may need to take a few more steps to run the Docker daemon on your system. The Docker daemon MUST be running before stating Warnet. +#### Managing Kubernetes cluster + +The use of a k8s cluster management tool is highly recommended. +We like to use `k9s`: https://k9scli.io/ + ##### Linux - [Check Docker user/group permissions](https://stackoverflow.com/a/48957722/1653320) diff --git a/docs/logging_monitoring.md b/docs/logging_monitoring.md index a0142c642..07699469f 100644 --- a/docs/logging_monitoring.md +++ b/docs/logging_monitoring.md @@ -16,7 +16,7 @@ Examples of information provided: Commands: `warnet network logs` or `warnet network logs --follow`. -See more details in [warcli](/docs/warcli.md#warcli-network-logs) +See more details in [warnet](/docs/warnet.md#warnet-network-logs) ### Bitcoin Core logs @@ -54,7 +54,7 @@ warnet_test_uhynisdj_tank_000007: 2023-10-11T17:44:52.173199Z [validation] Enque ... (etc) ``` -See more details in [warcli](/docs/warcli.md#warcli-bitcoin-grep-logs) +See more details in [warnet](/docs/warnet.md#warnet-bitcoin-grep-logs) ## Monitoring and Metrics diff --git a/docs/quickrun.md b/docs/quickrun.md index 2bb4197f5..5168563f4 100644 --- a/docs/quickrun.md +++ b/docs/quickrun.md @@ -28,39 +28,10 @@ pip install -e . ## Running -> [!TIP] -> When developing locally add the `--dev` flag to `warnet cluster deploy` to enable dev mode with hot-reloading server. - -### Using minikube - -To run a local cluster using minikube: - -```bash -warnet cluster setup-minikube - -warnet cluster deploy -``` - -### Other cluster types - -If not using minikube (e.g. using Docker Desktop or a managed cluster), `warnet` commands will operate natively on the current Kubernetes context, so you can simply run: +To get started simply run: ```bash -warnet cluster deploy +warnet quickstart ``` -...to deploy warnet to your cluster. - -`warnet cluster deploy` also automatically configures port forwarding to the Server in the cluster. - -## Stopping - -To tear down the cluster: - -```bash -warnet cluster teardown -``` - -## Log location - -If the `$XDG_STATE_HOME` environment variable is set, the server will log to a file `$XDG_STATE_HOME/warnet/warnet.log`, otherwise it will use `$HOME/.warnet/warnet.log`. +This will check you have the required dependencies and guide you through setting up and deploying your first network. diff --git a/docs/running.md b/docs/running.md index 9d947fe01..42b48c79c 100644 --- a/docs/running.md +++ b/docs/running.md @@ -3,7 +3,7 @@ Warnet runs a server which can be used to manage multiple networks. On Kubernetes this runs as a `statefulSet` in the cluster. -See more details in [warcli](/docs/warcli.md), examples: +See more details in [warnet](/docs/warnet.md), examples: To start the server run: diff --git a/docs/scaling.md b/docs/scaling.md index 066827122..3ae91886b 100644 --- a/docs/scaling.md +++ b/docs/scaling.md @@ -1,5 +1,8 @@ # Running large networks +> [NOTE] +> These changes are not required on multi-host/managed clusters + When running a large number of containers on a single host machine, the system may run out of various resources. We recommend setting the following values in /etc/sysctl.conf: diff --git a/docs/scenarios.md b/docs/scenarios.md index 1cd57c2c3..8b113473e 100644 --- a/docs/scenarios.md +++ b/docs/scenarios.md @@ -5,50 +5,67 @@ with some modifications: most notably that `self.nodes[]` represents an array of containerized `bitcoind` nodes ("tanks"). Scenario files are run with a python interpreter inside the server and can control many nodes in the network simultaneously. -See [`src/warnet/scenarios`](../src/warnet/scenarios) for examples of how these can be written. +See [`resources/scenarios/`](../resources/scenarios/) for examples of how these can be written. +When creating a new network default scenarios will be copied into your project directory for convenience. -To see available scenarios (loaded from the default directory): +A scenario can be run with `warnet run [optional_params]`. -```bash -warnet scenarios available -``` - -Once a scenario is selected it can be run with `warnet scenarios run [--network=warnet] [scenario_params]`. - -The [`miner_std`](../src/warnet/scenarios/miner_std.py) scenario is a good one to start with as it automates block generation: +The [`miner_std`](../resources/scenarios/miner_std.py) scenario is a good one to start with as it automates block generation: ```bash -# Have all nodes generate a block 5 seconds apart in a round-robin -warnet scenarios run miner_std --allnodes --interval=5 -``` - -This will run the scenario in a background thread on the server until it exits or is stopped by the user. - -Active scenarios can be listed and terminated by PID: - -```bash -$ warnet scenarios available -miner_std Generate blocks over time. Options: [--allnodes | --interval= | --mature] -sens_relay Send a transaction using sensitive relay -tx_flood Generate 100 blocks with 100 TXs each - -$ warnet scenarios run tx_flood -Running scenario tx_flood with PID 14683 in the background... - -$ warnet scenarios active - ┃ Active ┃ Cmd ┃ Network ┃ Pid ┃ Return_code ┃ - ┡━━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━━━┩ - │ True │ tx_flood │ warnet │ 14683 │ None ┃ - -$ warnet scenarios stop 14683 -Stopped scenario with PID 14683. +₿ warnet run build55/scenarios/miner_std.py --allnodes --interval=10 +configmap/warnetjson configured +configmap/scenariopy configured +pod/commander-minerstd-1724708498 created +Successfully started scenario: miner_std +Commander pod name: commander-minerstd-1724708498 + +₿ warnet status +╭──────────────────── Warnet Overview ────────────────────╮ +│ │ +│ Warnet Status │ +│ ┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┓ │ +│ ┃ Component ┃ Name ┃ Status ┃ │ +│ ┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━┩ │ +│ │ Tank │ tank-0001 │ running │ │ +│ │ Tank │ tank-0002 │ running │ │ +│ │ Tank │ tank-0003 │ running │ │ +│ │ Tank │ tank-0004 │ running │ │ +│ │ Tank │ tank-0005 │ running │ │ +│ │ Tank │ tank-0006 │ running │ │ +│ │ │ │ │ │ +│ │ Scenario │ commander-minerstd-1724708498 │ pending │ │ +│ └───────────┴───────────────────────────────┴─────────┘ │ +│ │ +╰─────────────────────────────────────────────────────────╯ + +Total Tanks: 6 | Active Scenarios: 1 + +₿ warnet stop commander-minerstd-1724708498 +pod "commander-minerstd-1724708498" deleted +Successfully stopped scenario: commander-minerstd-1724708498 + +₿ warnet status +╭─────────────── Warnet Overview ───────────────╮ +│ │ +│ Warnet Status │ +│ ┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┓ │ +│ ┃ Component ┃ Name ┃ Status ┃ │ +│ ┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━┩ │ +│ │ Tank │ tank-0001 │ running │ │ +│ │ Tank │ tank-0002 │ running │ │ +│ │ Tank │ tank-0003 │ running │ │ +│ │ Tank │ tank-0004 │ running │ │ +│ │ Tank │ tank-0005 │ running │ │ +│ │ Tank │ tank-0006 │ running │ │ +│ │ Scenario │ No active scenarios │ │ │ +│ └───────────┴─────────────────────┴─────────┘ │ +│ │ +╰───────────────────────────────────────────────╯ + +Total Tanks: 6 | Active Scenarios: 0 ``` ## Running a custom scenario -You can write your own scenario file locally and upload it to the server with -the [run-file](/docs/warcli.md#warcli-scenarios-run-file) command (example): - -```bash -warnet scenarios run-file /home/me/bitcoin_attack.py -``` +You can write your own scenario file and run it in the same way. diff --git a/docs/warcli.md b/docs/warnet.md similarity index 94% rename from docs/warcli.md rename to docs/warnet.md index eed82fdf3..fc485b26c 100644 --- a/docs/warcli.md +++ b/docs/warnet.md @@ -4,11 +4,7 @@ The command-line interface tool for Warnet. Once `warnet` is running it can be interacted with using the cli tool `warnet`. -Most `warnet` commands accept a `--network` option, which allows you to specify -the network you want to control. This is set by default to `--network="warnet"` -to simplify default operation. - -Execute `warnet --help` or `warnet help` to see a list of command categories. +Execute `warnet --help` to see a list of command categories. Help text is provided, with optional parameters in [square brackets] and required parameters in . diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index ef7e00902..79d664e85 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -9,7 +9,7 @@ from warnet.main import cli -file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "warcli.md" +file_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / ".." / "docs" / "warnet.md" doc = "" From e2926996e63e220bca7ea2c79c98f02bb04d229f Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 26 Aug 2024 22:56:32 +0100 Subject: [PATCH 164/710] update running docs to quickstart --- README.md | 3 +-- docs/{quickrun.md => quickstart.md} | 0 docs/running.md | 38 ----------------------------- 3 files changed, 1 insertion(+), 40 deletions(-) rename docs/{quickrun.md => quickstart.md} (100%) delete mode 100644 docs/running.md diff --git a/README.md b/README.md index 07955f494..2d8735ea3 100644 --- a/README.md +++ b/README.md @@ -16,8 +16,7 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. ## Documentation - [Installation](/docs/install.md) -- [Quick Run](/docs/quickrun.md) -- [Running Warnet](/docs/running.md) +- [Quick Start](/docs/quickstart.md) - [Network Topology](/docs/graph.md) - [CLI Commands](/docs/warnet.md) - [Scenarios](/docs/scenarios.md) diff --git a/docs/quickrun.md b/docs/quickstart.md similarity index 100% rename from docs/quickrun.md rename to docs/quickstart.md diff --git a/docs/running.md b/docs/running.md deleted file mode 100644 index 42b48c79c..000000000 --- a/docs/running.md +++ /dev/null @@ -1,38 +0,0 @@ -# Running Warnet - -Warnet runs a server which can be used to manage multiple networks. On Kubernetes -this runs as a `statefulSet` in the cluster. - -See more details in [warnet](/docs/warnet.md), examples: - -To start the server run: - -```bash -warnet cluster deploy -``` - -Start a network from a graph file: - -```bash -warnet network start resources/graphs/default.graphml -``` - -Make sure all tanks are running with: - -```bash -warnet network status -``` - -Check if the edges of the graph (bitcoin p2p connections) are complete: - -```bash -warnet network connected -``` - -_Optional_ Check out the logs with: - -```bash -warnet network logs -f -``` - -If that looks all good, give [scenarios](/docs/scenarios.md) a try. From 417a7985e5e80272b196cc97f1e37d353e37a026 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 26 Aug 2024 22:57:14 +0100 Subject: [PATCH 165/710] remove graph.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 2d8735ea3..c97e8450a 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,6 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Installation](/docs/install.md) - [Quick Start](/docs/quickstart.md) -- [Network Topology](/docs/graph.md) - [CLI Commands](/docs/warnet.md) - [Scenarios](/docs/scenarios.md) - [Monitoring](/docs/logging_monitoring.md) From 1623c97a21b31744af98fd49c5a8edb28c77e8ef Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 27 Aug 2024 08:21:46 +0100 Subject: [PATCH 166/710] add warcli script entry point alias This is useful for entrenched legacy users :P --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ef8390fbe..7cc76308e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ dependencies = [ [project.scripts] warnet = "warnet.main:cli" +warcli = "warnet.main:cli" [project.urls] Homepage = "https://warnet.dev" From 948bc6fce92d2d4eb910520e0873bddb06729467 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 2 Sep 2024 16:37:49 +0100 Subject: [PATCH 167/710] version 1.0.0 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7cc76308e..cb9f39384 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "warnet" -version = "0.10.0" +version = "1.0.0" description = "Monitor and analyze the emergent behaviours of bitcoin networks" readme = "README.md" requires-python = ">=3.11" From 4abfbfa19aca50caac3dcae7be0e95497b88716a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 2 Sep 2024 16:46:52 +0100 Subject: [PATCH 168/710] revert dist publish disable --- .github/workflows/publish-dist.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/publish-dist.yml b/.github/workflows/publish-dist.yml index 0c59f8a37..b0dad4cf7 100644 --- a/.github/workflows/publish-dist.yml +++ b/.github/workflows/publish-dist.yml @@ -29,9 +29,7 @@ jobs: publish-to-pypi: name: >- Publish Python 🐍 distribution 📦 to PyPI - # if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes - # DISABLE FOR REWRITE - if: false + if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes needs: - build runs-on: ubuntu-latest From 5fbcd63ebbc5b02ace4a4aa1730a74bf3efad768 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Tue, 3 Sep 2024 17:29:34 +0100 Subject: [PATCH 169/710] add visual explainer for config value propagation (#513) --- README.md | 1 + docs/config.md | 51 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 docs/config.md diff --git a/README.md b/README.md index c97e8450a..6e050e7c5 100644 --- a/README.md +++ b/README.md @@ -23,5 +23,6 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Lightning Network](/docs/lightning.md) - [Scaling](/docs/scaling.md) - [Connecting to local nodes](https://github.com/bitcoin-dev-project/warnet/blob/main/docs/) +- [Understanding network configuration](/docs/config.md) ![warnet-art](https://raw.githubusercontent.com/bitcoin-dev-project/warnet/main/docs/machines.webp) diff --git a/docs/config.md b/docs/config.md new file mode 100644 index 000000000..8ef3af9ec --- /dev/null +++ b/docs/config.md @@ -0,0 +1,51 @@ +# Configuration value propagation + +This flowchart illustrates the process of how values for the Bitcoin Core module are handled and deployed using Helm in a Kubernetes environment. + +The process is similar for other modules (e.g. fork-observer), but may differ slightly in filenames. + +- The process starts with the `values.yaml` file, which contains default values for the Helm chart. +- There's a decision point to check if user-provided values are available. + These are found in the following files: + - For config applied to all nodes: `/node-defaults.yaml` + - For network and per-node config: `/network.yaml` + +> [!TIP] +> `values.yaml` can be overridden by `node-defaults.yaml` which can be overridden in turn by `network.yaml`. + +- If user-provided values exist, they override the defaults from `values.yaml`. If not, the default values are used. +- The resulting set of values (either default or overridden) becomes the final set of values used for deployment. +- These final values are then passed to the Helm templates. +- The templates (`configmap.yaml`, `service.yaml`, `servicemonitor.yaml`, and `pod.yaml`) use these values to generate the Kubernetes resource definitions. +- Helm renders these templates, substituting the values into the appropriate places. +- The rendering process produces the final Kubernetes manifest files. +- Helm then applies these rendered manifests to the Kubernetes cluster. +- Kubernetes processes these manifests and creates or updates the corresponding resources in the cluster. +- The process ends with the resources being deployed or updated in the Kubernetes cluster. + +```mermaid + graph TD + A[Start] --> B[values.yaml] + subgraph User Configuration [user config: bottom overrides top] + C[node-defaults.yaml] + D[network.yaml] + end + B --> C + C --> D + D --> F[Final values] + F --> I[Templates] + I --> J[configmap.yaml] + I --> K[service.yaml] + I --> L[servicemonitor.yaml] + I --> M[pod.yaml] + J --> N[Helm renders templates] + K --> N + L --> N + M --> N + N --> O[Rendered Kubernetes manifests] + O --> P[Helm applies manifests to Kubernetes] + P --> Q[Kubernetes creates/updates resources] + Q --> R[Resources deployed/updated in cluster] +``` + +Users should only concern themselves therefore with setting configuration in the `/[network|node-defaults].yaml` files. From ba62009a99bde4a62e10ff0ac70fc0b67e1a318b Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 27 Aug 2024 10:45:26 +0100 Subject: [PATCH 170/710] use inquirer for quickstart --- src/warnet/main.py | 168 ++++++++++++++++++++++++++++++++------------- 1 file changed, 121 insertions(+), 47 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 02e6f3930..8b19cf8a1 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -47,6 +47,7 @@ def cli(): def quickstart(): """Setup warnet""" try: + # Requirements checks process = subprocess.Popen( ["/bin/bash", str(QUICK_START_PATH)], stdout=subprocess.PIPE, @@ -54,9 +55,10 @@ def quickstart(): universal_newlines=True, env=dict(os.environ, TERM="xterm-256color"), ) - for line in iter(process.stdout.readline, ""): - click.echo(line, nl=False) - process.stdout.close() + if process.stdout: + for line in iter(process.stdout.readline, ""): + click.echo(line, nl=False) + process.stdout.close() return_code = process.wait() if return_code != 0: click.secho( @@ -65,67 +67,139 @@ def quickstart(): click.secho("Install missing requirements before proceeding", fg="yellow") return False - create_project = click.confirm( - click.style("\nDo you want to create a new project?", fg="blue", bold=True), - default=True, - ) - if not create_project: + # New project setup + questions = [ + inquirer.Confirm( + "create_project", + message=click.style("Do you want to create a new project?", fg="blue", bold=True), + default=True, + ), + ] + answers = inquirer.prompt(questions) + if answers is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + if not answers["create_project"]: click.secho("\nSetup completed successfully!", fg="green", bold=True) return True - default_path = os.path.abspath(os.getcwd()) - project_path = click.prompt( - click.style("\nEnter the project directory path", fg="blue", bold=True), - default=default_path, - type=click.Path(file_okay=False, dir_okay=True, resolve_path=True), - ) - - custom_network = click.confirm( - click.style("\nDo you want to create a custom network?", fg="blue", bold=True), - default=True, - ) - if not custom_network: - create_warnet_project(Path(project_path)) + # Custom project setup + questions = [ + inquirer.Path( + "project_path", + message=click.style("Enter the project directory path", fg="blue", bold=True), + path_type=inquirer.Path.DIRECTORY, + exists=False, + ), + inquirer.Confirm( + "custom_network", + message=click.style( + "Do you want to create a custom network?", fg="blue", bold=True + ), + default=True, + ), + ] + proj_answers = inquirer.prompt(questions) + if proj_answers is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + if not proj_answers["custom_network"]: + create_warnet_project(Path(proj_answers["project_path"])) click.secho("\nSetup completed successfully!", fg="green", bold=True) + click.echo( + "\nRun the following command to deploy this network using the default demo network:" + ) + click.echo(f"warcli deploy {proj_answers['project_path']}/networks/6_node_bitcoin") return True + answers.update(proj_answers) - network_name = click.prompt( - click.style("\nEnter the network name", fg="blue", bold=True), - type=str, - ) - - nodes = click.prompt( - click.style("\nHow many nodes would you like?", fg="blue", bold=True), - type=int, - default=15, - ) - connections = click.prompt( - click.style( - "\nHow many connections would you like each node to have?", fg="blue", bold=True + # Custom network configuration + questions = [ + inquirer.Text( + "network_name", message=click.style("Enter your network name", fg="blue", bold=True) ), - type=int, - default=8, - ) - version = click.prompt( - click.style( - "\nWhich version would you like nodes to be by default?", fg="blue", bold=True + inquirer.List( + "nodes", + message=click.style("How many nodes would you like?", fg="blue", bold=True), + choices=["8", "12", "20", "50", "other"], + default="12", ), - type=click.Choice(SUPPORTED_TAGS, case_sensitive=False), - default=DEFAULT_TAG, - ) + inquirer.List( + "connections", + message=click.style( + "How many addnode connections would you like each node to have?", + fg="blue", + bold=True, + ), + choices=["0", "1", "2", "8", "12", "other"], + default="8", + ), + inquirer.List( + "version", + message=click.style( + "Which version would you like nodes to be by default?", fg="blue", bold=True + ), + choices=SUPPORTED_TAGS, + default=DEFAULT_TAG, + ), + ] + + net_answers = inquirer.prompt(questions) + if net_answers is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + + if net_answers["nodes"] == "other": + custom_nodes = inquirer.prompt( + [ + inquirer.Text( + "nodes", + message=click.style("Enter the number of nodes", fg="blue", bold=True), + validate=lambda _, x: int(x) > 0, + ) + ] + ) + if custom_nodes is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + net_answers["nodes"] = custom_nodes["nodes"] + + if net_answers["connections"] == "other": + custom_connections = inquirer.prompt( + [ + inquirer.Text( + "connections", + message=click.style( + "Enter the number of connections", fg="blue", bold=True + ), + validate=lambda _, x: int(x) >= 0, + ) + ] + ) + if custom_connections is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + net_answers["connections"] = custom_connections["connections"] + answers.update(net_answers) click.secho("\nCreating project structure...", fg="yellow", bold=True) - create_warnet_project(Path(project_path)) + create_warnet_project(Path(answers["project_path"])) click.secho("\nGenerating custom network...", fg="yellow", bold=True) - custom_network_path = Path(project_path) / "networks" / network_name - custom_graph(nodes, connections, version, custom_network_path) + custom_network_path = Path(answers["project_path"]) / "networks" / answers["network_name"] + custom_graph( + int(answers["nodes"]), + int(answers["connections"]), + answers["version"], + custom_network_path, + ) click.secho("\nSetup completed successfully!", fg="green", bold=True) click.echo("\nRun the following command to deploy this network:") click.echo(f"warnet deploy {custom_network_path}") except Exception as e: + click.echo(f"{e}\n\n") click.secho(f"An error occurred while running the quick start script:\n\n{e}\n\n", fg="red") click.secho( - "Please report this to https://github.com/bitcoin-dev-project/warnet/issues", + "Please report the above context to https://github.com/bitcoin-dev-project/warnet/issues", fg="yellow", ) return False From c8020cfaa8c8a314bccba0136b7d5213283d7621 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 16:03:26 +0100 Subject: [PATCH 171/710] quickstart: don't permit A -> B -> A connections --- src/warnet/main.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 8b19cf8a1..03c3c1bbb 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -127,7 +127,7 @@ def quickstart(): inquirer.List( "connections", message=click.style( - "How many addnode connections would you like each node to have?", + "How many connections would you like each node to have?", fg="blue", bold=True, ), @@ -137,7 +137,7 @@ def quickstart(): inquirer.List( "version", message=click.style( - "Which version would you like nodes to be by default?", fg="blue", bold=True + "Which version would you like nodes to run by default?", fg="blue", bold=True ), choices=SUPPORTED_TAGS, default=DEFAULT_TAG, @@ -360,6 +360,7 @@ def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Pa datadir.mkdir(parents=False, exist_ok=False) # Generate network.yaml nodes = [] + connections = set() for i in range(num_nodes): node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} @@ -367,6 +368,7 @@ def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Pa # Add round-robin connection next_node = (i + 1) % num_nodes node["connect"].append(f"tank-{next_node:04d}") + connections.add((i, next_node)) # Add random connections available_nodes = list(range(num_nodes)) @@ -376,8 +378,11 @@ def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Pa for _ in range(min(num_connections - 1, len(available_nodes))): random_node = random.choice(available_nodes) - node["connect"].append(f"tank-{random_node:04d}") - available_nodes.remove(random_node) + # Avoid circular loops of A -> B -> A + if (random_node, i) not in connections: + node["connect"].append(f"tank-{random_node:04d}") + connections.add((i, random_node)) + available_nodes.remove(random_node) nodes.append(node) From b228f5a1bc28ba73fede009460cbf049e8f7d311 Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 3 Sep 2024 09:34:56 -0500 Subject: [PATCH 172/710] use `with` for subprocess --- src/warnet/main.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 03c3c1bbb..9de04edaa 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -48,24 +48,23 @@ def quickstart(): """Setup warnet""" try: # Requirements checks - process = subprocess.Popen( + with subprocess.Popen( ["/bin/bash", str(QUICK_START_PATH)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=dict(os.environ, TERM="xterm-256color"), - ) - if process.stdout: - for line in iter(process.stdout.readline, ""): - click.echo(line, nl=False) - process.stdout.close() - return_code = process.wait() - if return_code != 0: - click.secho( - f"Quick start script failed with return code {return_code}", fg="red", bold=True - ) - click.secho("Install missing requirements before proceeding", fg="yellow") - return False + ) as process: + if process.stdout: + for line in iter(process.stdout.readline, ""): + click.echo(line, nl=False) + return_code = process.wait() + if return_code != 0: + click.secho( + f"Quick start script failed with return code {return_code}", fg="red", bold=True + ) + click.secho("Install missing requirements before proceeding", fg="yellow") + return False # New project setup questions = [ From 2a9af4ff0b770c4fa807fa504f21b92d302a4b1a Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 3 Sep 2024 10:29:45 -0500 Subject: [PATCH 173/710] validate network name --- src/warnet/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 9de04edaa..75d194968 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -115,7 +115,8 @@ def quickstart(): # Custom network configuration questions = [ inquirer.Text( - "network_name", message=click.style("Enter your network name", fg="blue", bold=True) + "network_name", message=click.style("Enter your network name", fg="blue", bold=True), + validate=lambda _, x: len(x) > 0 ), inquirer.List( "nodes", From ef8e708247a7940c0488dffbe9f69c1870e0ac1b Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 3 Sep 2024 10:30:02 -0500 Subject: [PATCH 174/710] expand project path This means if a user does ~/mydir, it won't explicitly create a directory called `~` --- src/warnet/main.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 75d194968..f605d9046 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -103,7 +103,8 @@ def quickstart(): click.secho("Setup cancelled by user.", fg="yellow") return False if not proj_answers["custom_network"]: - create_warnet_project(Path(proj_answers["project_path"])) + project_path = Path(os.path.expanduser(proj_answers["project_path"])) + create_warnet_project(project_path) click.secho("\nSetup completed successfully!", fg="green", bold=True) click.echo( "\nRun the following command to deploy this network using the default demo network:" @@ -183,9 +184,10 @@ def quickstart(): answers.update(net_answers) click.secho("\nCreating project structure...", fg="yellow", bold=True) - create_warnet_project(Path(answers["project_path"])) + project_path = Path(os.path.expanduser(proj_answers["project_path"])) + create_warnet_project(project_path) click.secho("\nGenerating custom network...", fg="yellow", bold=True) - custom_network_path = Path(answers["project_path"]) / "networks" / answers["network_name"] + custom_network_path = project_path / "networks" / answers["network_name"] custom_graph( int(answers["nodes"]), int(answers["connections"]), From aab9443d729b34326da7391c10f8206865f9f125 Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 3 Sep 2024 10:45:03 -0500 Subject: [PATCH 175/710] fix ruff format --- src/warnet/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index f605d9046..f0bfd74d7 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -116,8 +116,9 @@ def quickstart(): # Custom network configuration questions = [ inquirer.Text( - "network_name", message=click.style("Enter your network name", fg="blue", bold=True), - validate=lambda _, x: len(x) > 0 + "network_name", + message=click.style("Enter your network name", fg="blue", bold=True), + validate=lambda _, x: len(x) > 0, ), inquirer.List( "nodes", From 6ed9090314600021d5faee5c87b50a996ba4e970 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 3 Sep 2024 14:02:54 -0400 Subject: [PATCH 176/710] commander: remove lie --- resources/images/commander/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile index ed93d4103..715248294 100644 --- a/resources/images/commander/Dockerfile +++ b/resources/images/commander/Dockerfile @@ -4,7 +4,6 @@ FROM python:3.12-slim # Python dependencies #RUN pip install --no-cache-dir prometheus_client -# Prometheus exporter script for bitcoind COPY src/warnet/scenarios/commander.py / COPY src/test_framework /test_framework From 06cef5a380562b4e1fc0df42ea6557953c3b9dc7 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 10:30:24 +0200 Subject: [PATCH 177/710] remove unused scenarios.py --- src/warnet/scenarios.py | 214 ---------------------------------------- 1 file changed, 214 deletions(-) delete mode 100644 src/warnet/scenarios.py diff --git a/src/warnet/scenarios.py b/src/warnet/scenarios.py deleted file mode 100644 index 8ce115444..000000000 --- a/src/warnet/scenarios.py +++ /dev/null @@ -1,214 +0,0 @@ -import importlib -import json -import os -import pkgutil -import sys -import tempfile -import time -from importlib.resources import files - -import click -import yaml -from rich import print -from rich.console import Console -from rich.table import Table - -from .k8s import apply_kubernetes_yaml, get_default_namespace, get_mission - - -@click.group(name="scenarios") -def scenarios(): - """Manage scenarios on a network""" - - -@scenarios.command() -def available(): - """ - List available scenarios in the Warnet Test Framework - """ - console = Console() - scenario_list = _available() - - # Create the table - table = Table(show_header=True, header_style="bold") - table.add_column("Name") - table.add_column("Description") - - for scenario in scenario_list: - table.add_row(*scenario) - console.print(table) - - -def _available(): - scenarios_dir = files("resources.scenarios") - sys.path.append(scenarios_dir) - - try: - scenario_list = [] - package_name = "resources.scenarios" - for _, name, _ in pkgutil.iter_modules([scenarios_dir]): - module_name = f"{package_name}.{name}" - try: - m = importlib.import_module(module_name) - if hasattr(m, "cli_help"): - scenario_list.append((name, m.cli_help())) - except Exception as e: - print(f"Error importing module {module_name}: {e}") - finally: - sys.path.remove(scenarios_dir) - - return scenario_list - - -@scenarios.command(context_settings={"ignore_unknown_options": True}) -@click.argument("scenario", type=str) -@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -def run(scenario: str, additional_args: tuple[str]): - """ - Run from the Warnet Test Framework with optional arguments - """ - - # Use importlib.resources to get the scenario path - scenario_package = "resources.scenarios" - scenario_filename = f"{scenario}.py" - - # Ensure the scenario file exists within the package - with importlib.resources.path(scenario_package, scenario_filename) as scenario_path: - scenario_path = str(scenario_path) # Convert Path object to string - return run_scenario(scenario_path, additional_args) - - -@scenarios.command(context_settings={"ignore_unknown_options": True}) -@click.argument("scenario_path", type=str) -@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -def run_file(scenario_path: str, additional_args: tuple[str]): - """ - Start with optional arguments - """ - if not scenario_path.endswith(".py"): - print("Error. Currently only python scenarios are supported") - sys.exit(1) - return run_scenario(scenario_path, additional_args) - - -def run_scenario(scenario_path: str, additional_args: tuple[str]): - if not os.path.exists(scenario_path): - raise Exception(f"Scenario file not found at {scenario_path}.") - - with open(scenario_path) as file: - scenario_text = file.read() - - scenario_name = os.path.splitext(os.path.basename(scenario_path))[0] - - name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" - namespace = get_default_namespace() - tankpods = get_mission("tank") - tanks = [ - { - "tank": tank.metadata.name, - "chain": "regtest", - "rpc_host": tank.status.pod_ip, - "rpc_port": 18443, - "rpc_user": "user", - "rpc_password": "password", - "init_peers": [], - } - for tank in tankpods - ] - kubernetes_objects = [] - kubernetes_objects.extend( - [ - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "warnetjson", - "namespace": namespace, - }, - "data": {"warnet.json": json.dumps(tanks)}, - }, - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "scenariopy", - "namespace": namespace, - }, - "data": {"scenario.py": scenario_text}, - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": name, - "namespace": namespace, - "labels": {"mission": "commander"}, - }, - "spec": { - "restartPolicy": "Never", - "containers": [ - { - "name": name, - "image": "bitcoindevproject/warnet-commander:latest", - "args": additional_args, - "volumeMounts": [ - { - "name": "warnetjson", - "mountPath": "warnet.json", - "subPath": "warnet.json", - }, - { - "name": "scenariopy", - "mountPath": "scenario.py", - "subPath": "scenario.py", - }, - ], - } - ], - "volumes": [ - {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, - {"name": "scenariopy", "configMap": {"name": "scenariopy"}}, - ], - }, - }, - ] - ) - with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: - yaml.dump_all(kubernetes_objects, temp_file) - temp_file_path = temp_file.name - apply_kubernetes_yaml(temp_file_path) - - -@scenarios.command() -def active(): - """ - List running scenarios "name": "pid" pairs - """ - commanders = _active() - if len(commanders) == 0: - print("No scenarios running") - return - - table = Table(show_header=True, header_style="bold") - table.add_column("Commander") - table.add_column("Status") - - for commander in commanders: - table.add_row(commander["commander"], commander["status"]) - - console = Console() - console.print(table) - - -def _active() -> list[str]: - commanders = get_mission("commander") - return [{"commander": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] - - -@scenarios.command() -@click.argument("pid", type=int) -def stop(pid: int): - """ - Stop scenario - """ - pass From f70dca2a52e2fc84480cbc136af08c13cd236c73 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Wed, 4 Sep 2024 11:49:25 +0100 Subject: [PATCH 178/710] scenarios: only print 'outer' help (#522) Close #512 --- resources/scenarios/commander.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index e6fdde71d..b478c13ba 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -131,6 +131,14 @@ def setup(self): self.success = TestStatus.PASSED def parse_args(self): + # Only print "outer" args from parent class when using --help + help_parser = argparse.ArgumentParser(usage="%(prog)s [options]") + self.add_options(help_parser) + help_args, _ = help_parser.parse_known_args() + if help_args.help: + help_parser.print_help() + sys.exit(0) + previous_releases_path = "" parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument( From 087b2b576429bf04975e744f7c37450b48ff3950 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Wed, 4 Sep 2024 11:50:54 +0100 Subject: [PATCH 179/710] use built commander image in CI (#523) --- .github/workflows/test.yml | 38 +++++++++++++++++++++++++++ resources/images/commander/Dockerfile | 2 +- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a84bfbab2..f3c05cac7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -25,7 +25,34 @@ jobs: - uses: eifinger/setup-uv@v1 - run: uvx ruff format . --check + build-image: + needs: [ruff, ruff-format] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and export + uses: docker/build-push-action@v5 + with: + file: resources/images/commander/Dockerfile + context: . + tags: bitcoindevproject/warnet-commander:latest + cache-from: type=gha + cache-to: type=gha,mode=max + outputs: type=docker,dest=/tmp/commander.tar + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: commander + path: /tmp/commander.tar + test: + needs: [build-image] runs-on: ubuntu-latest strategy: matrix: @@ -44,9 +71,20 @@ jobs: with: cpus: max memory: 4000m + - name: Download commander artifact + uses: actions/download-artifact@v4 + with: + name: commander + path: /tmp - uses: eifinger/setup-uv@v1 - name: Install project run: uv sync --all-extras --dev + - name: Install commander image + run: | + echo loading commander image into minikube docker + eval $(minikube -p minikube docker-env) + docker load --input /tmp/commander.tar + docker image ls -a - name: Run tests run: | source .venv/bin/activate diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile index 715248294..3a8314c21 100644 --- a/resources/images/commander/Dockerfile +++ b/resources/images/commander/Dockerfile @@ -4,7 +4,7 @@ FROM python:3.12-slim # Python dependencies #RUN pip install --no-cache-dir prometheus_client -COPY src/warnet/scenarios/commander.py / +COPY resources/scenarios/commander.py / COPY src/test_framework /test_framework # -u: force the stdout and stderr streams to be unbuffered From fbf0bc1d648ccce2a94388ee821754a5a6d26dae Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 2 Sep 2024 15:19:02 +0100 Subject: [PATCH 180/710] add forkobserver Adds an optional fork-observer module. --- resources/charts/bitcoincore/values.yaml | 3 +- resources/charts/fork-observer/.helmignore | 23 ++++ resources/charts/fork-observer/Chart.yaml | 24 ++++ .../charts/fork-observer/templates/NOTES.txt | 1 + .../fork-observer/templates/_helpers.tpl | 57 +++++++++ .../fork-observer/templates/configmap.yaml | 11 ++ .../charts/fork-observer/templates/pod.yaml | 48 ++++++++ .../fork-observer/templates/service.yaml | 16 +++ resources/charts/fork-observer/values.yaml | 113 ++++++++++++++++++ .../networks/6_node_bitcoin/network.yaml | 3 +- .../6_node_bitcoin/node-defaults.yaml | 2 +- resources/networks/node-defaults.yaml | 29 +++++ src/warnet/deploy.py | 52 ++++++++ src/warnet/main.py | 17 ++- src/warnet/network.py | 1 + 15 files changed, 394 insertions(+), 6 deletions(-) create mode 100644 resources/charts/fork-observer/.helmignore create mode 100644 resources/charts/fork-observer/Chart.yaml create mode 100644 resources/charts/fork-observer/templates/NOTES.txt create mode 100644 resources/charts/fork-observer/templates/_helpers.tpl create mode 100644 resources/charts/fork-observer/templates/configmap.yaml create mode 100644 resources/charts/fork-observer/templates/pod.yaml create mode 100644 resources/charts/fork-observer/templates/service.yaml create mode 100644 resources/charts/fork-observer/values.yaml create mode 100644 resources/networks/node-defaults.yaml diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 1a3fafe6a..00c639eb5 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -118,6 +118,7 @@ regtestConfig: | rpcport=18443 rpcallowip=0.0.0.0/0 rpcbind=0.0.0.0 + rest=1 baseConfig: | checkmempool=0 @@ -134,4 +135,4 @@ baseConfig: | config: "" -connect: [] \ No newline at end of file +connect: [] diff --git a/resources/charts/fork-observer/.helmignore b/resources/charts/fork-observer/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/charts/fork-observer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/charts/fork-observer/Chart.yaml b/resources/charts/fork-observer/Chart.yaml new file mode 100644 index 000000000..a8676c16f --- /dev/null +++ b/resources/charts/fork-observer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: fork-observer +description: A Helm chart for fork-observer + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/fork-observer/templates/NOTES.txt b/resources/charts/fork-observer/templates/NOTES.txt new file mode 100644 index 000000000..80e50940d --- /dev/null +++ b/resources/charts/fork-observer/templates/NOTES.txt @@ -0,0 +1 @@ +Fork-observer is watching you diff --git a/resources/charts/fork-observer/templates/_helpers.tpl b/resources/charts/fork-observer/templates/_helpers.tpl new file mode 100644 index 000000000..8ff2e9aed --- /dev/null +++ b/resources/charts/fork-observer/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "fork-observer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fork-observer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fork-observer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "fork-observer.labels" -}} +helm.sh/chart: {{ include "fork-observer.chart" . }} +{{ include "fork-observer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "fork-observer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "fork-observer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "fork-observer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "fork-observer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/fork-observer/templates/configmap.yaml b/resources/charts/fork-observer/templates/configmap.yaml new file mode 100644 index 000000000..8fb4dcad6 --- /dev/null +++ b/resources/charts/fork-observer/templates/configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "fork-observer.fullname" . }} + labels: + {{- include "fork-observer.labels" . | nindent 4 }} +data: + config.toml: | + {{- .Values.configQueryInterval | nindent 4 }} + {{- .Values.baseConfig | nindent 4 }} + {{- .Values.config | nindent 8 }} diff --git a/resources/charts/fork-observer/templates/pod.yaml b/resources/charts/fork-observer/templates/pod.yaml new file mode 100644 index 000000000..908543806 --- /dev/null +++ b/resources/charts/fork-observer/templates/pod.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "fork-observer.fullname" . }} + labels: + {{- include "fork-observer.labels" . | nindent 4 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + app: {{ include "fork-observer.fullname" . }} +spec: + restartPolicy: "{{ .Values.restartPolicy }}" + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 4 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 4 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 8 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: web + containerPort: {{ .Values.port }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 8 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 8 }} + resources: + {{- toYaml .Values.resources | nindent 8 }} + volumeMounts: + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} + - mountPath: /app/config.toml + name: config + subPath: config.toml + volumes: + {{- with .Values.volumes }} + {{- toYaml . | nindent 4 }} + {{- end }} + - configMap: + name: {{ include "fork-observer.fullname" . }} + name: config diff --git a/resources/charts/fork-observer/templates/service.yaml b/resources/charts/fork-observer/templates/service.yaml new file mode 100644 index 000000000..91615e15c --- /dev/null +++ b/resources/charts/fork-observer/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "fork-observer.fullname" . }} + labels: + {{- include "fork-observer.labels" . | nindent 4 }} + app: {{ include "fork-observer.fullname" . }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.port }} + targetPort: web + protocol: TCP + name: rpc + selector: + {{- include "fork-observer.selectorLabels" . | nindent 4 }} diff --git a/resources/charts/fork-observer/values.yaml b/resources/charts/fork-observer/values.yaml new file mode 100644 index 000000000..513c97744 --- /dev/null +++ b/resources/charts/fork-observer/values.yaml @@ -0,0 +1,113 @@ +# Default values for fork-observer. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +namespace: warnet + +restartPolicy: Always + +image: + repository: b10c/fork-observer + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +podLabels: + app: "warnet" + mission: "observer" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +livenessProbe: + exec: + command: + - pidof + - fork-observer + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 +readinessProbe: + failureThreshold: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 2323 + timeoutSeconds: 1 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +port: 2323 + +configQueryInterval: | + # Interval for checking for new blocks + query_interval = 20 + +baseConfig: | + # Database path of the key value store. Will be created if non-existing. + database_path = "db" + + # path to the location of the static www files + www_path = "./www" + + # Webserver listen address + address = "0.0.0.0:2323" + + # Custom footer for the site. + footer_html = """ +
+
+ Warnet fork-observer +
+
+ """ + + [[networks]] + id = 0xDEADBE + name = "Warnet" + description = "A Warnet" + min_fork_height = 0 + max_interesting_heights = 100 + [pool_identification] + enable = false + +config: "" diff --git a/resources/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml index 192f56a4e..14203fe2f 100644 --- a/resources/networks/6_node_bitcoin/network.yaml +++ b/resources/networks/6_node_bitcoin/network.yaml @@ -27,4 +27,5 @@ nodes: - name: tank-0005 connect: - tank-0006 - - name: tank-0006 \ No newline at end of file + - name: tank-0006 +fork_observer: true diff --git a/resources/networks/6_node_bitcoin/node-defaults.yaml b/resources/networks/6_node_bitcoin/node-defaults.yaml index 332541819..8ecb0c79f 100644 --- a/resources/networks/6_node_bitcoin/node-defaults.yaml +++ b/resources/networks/6_node_bitcoin/node-defaults.yaml @@ -23,4 +23,4 @@ image: config: | dns=1 - debug=rpc \ No newline at end of file + debug=rpc diff --git a/resources/networks/node-defaults.yaml b/resources/networks/node-defaults.yaml new file mode 100644 index 000000000..fed5a0043 --- /dev/null +++ b/resources/networks/node-defaults.yaml @@ -0,0 +1,29 @@ +chain: regtest + +collectLogs: true +metricsExport: false + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "27.0" + +config: | + dns=1 + debug=rpc + rpcauth=forkobserver:ef3d61b7ffecd81ffaaa7ae43091543d$a10bc25b5ca8910ebbee9b1538d38242b09d0a165e220f9a47535eccc4c089a4 + rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo + rpcwhitelistdefault=0 diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 0c1ccb611..1008374f4 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -23,6 +23,7 @@ # Import necessary functions and variables from network.py and namespaces.py from .network import ( + FORK_OBSERVER_CHART, NETWORK_FILE, ) from .process import stream_command @@ -53,6 +54,7 @@ def deploy(directory): if (directory / NETWORK_FILE).exists(): deploy_network(directory) + deploy_fork_observer(directory) elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: @@ -61,6 +63,56 @@ def deploy(directory): ) +def deploy_fork_observer(directory: Path): + network_file_path = directory / NETWORK_FILE + with network_file_path.open() as f: + network_file = yaml.safe_load(f) + + # Only start if configured in the network file + if not network_file.get("fork_observer", False): + return + + namespace = get_default_namespace() + cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace}" + + temp_override_file_path = "" + override_string = "" + + # Add an entry for each node in the graph + # TODO: should this be moved into a chart, and only have substituted name and rpc_host values + for i, node in enumerate(network_file["nodes"]): + node_name = node.get("name") + node_config = f""" +[[networks.nodes]] +id = {i} +name = "{node_name}" +description = "A node. Just A node." +rpc_host = "{node_name}" +rpc_port = 18443 +rpc_user = "forkobserver" +rpc_password = "tabconf2024" +""" + + override_string += node_config + # End loop + + # Create yaml string using multi-line string format + override_string = override_string.strip() + v = {"config": override_string} + yaml_string = yaml.dump(v, default_style="|", default_flow_style=False) + + # Dump to yaml tempfile + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + temp_file.write(yaml_string) + temp_override_file_path = Path(temp_file.name) + + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") + return + + def deploy_network(directory: Path): network_file_path = directory / NETWORK_FILE defaults_file_path = directory / NETWORK_DEFAULTS_FILE diff --git a/src/warnet/main.py b/src/warnet/main.py index f0bfd74d7..40f1a63dc 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -183,6 +183,13 @@ def quickstart(): return False net_answers["connections"] = custom_connections["connections"] answers.update(net_answers) + fork_observer = click.prompt( + click.style( + "\nWould you like to enable fork-observer on the network?", fg="blue", bold=True + ), + type=bool, + default=True, + ) click.secho("\nCreating project structure...", fg="yellow", bold=True) project_path = Path(os.path.expanduser(proj_answers["project_path"])) @@ -194,6 +201,7 @@ def quickstart(): int(answers["connections"]), answers["version"], custom_network_path, + fork_observer, ) click.secho("\nSetup completed successfully!", fg="green", bold=True) click.echo("\nRun the following command to deploy this network:") @@ -359,7 +367,9 @@ def logs(pod_name: str, follow: bool): cli() -def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Path): +def custom_graph( + num_nodes: int, num_connections: int, version: str, datadir: Path, fork_observer: bool +): datadir.mkdir(parents=False, exist_ok=False) # Generate network.yaml nodes = [] @@ -390,12 +400,13 @@ def custom_graph(num_nodes: int, num_connections: int, version: str, datadir: Pa nodes.append(node) network_yaml_data = {"nodes": nodes} + network_yaml_data["fork_observer"] = fork_observer with open(os.path.join(datadir, "network.yaml"), "w") as f: yaml.dump(network_yaml_data, f, default_flow_style=False) - # Generate defaults.yaml - default_yaml_path = files("resources.networks").joinpath("6_node_bitcoin/node-defaults.yaml") + # Generate node-defaults.yaml + default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") with open(str(default_yaml_path)) as f: defaults_yaml_content = f.read() diff --git a/src/warnet/network.py b/src/warnet/network.py index f3735c6de..71d8b5598 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -24,6 +24,7 @@ DEFAULTS_FILE = "node-defaults.yaml" HELM_COMMAND = "helm upgrade --install --create-namespace" BITCOIN_CHART_LOCATION = str(files("resources.charts").joinpath("bitcoincore")) +FORK_OBSERVER_CHART = str(files("resources.charts").joinpath("fork-observer")) @click.group(name="network") From 70f17ae358da49f08a66ee0900803a0b8b52a14b Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 13:24:09 +0100 Subject: [PATCH 181/710] hook FO configQueryInterval to custom graph create --- .../networks/6_node_bitcoin/network.yaml | 4 ++- src/warnet/deploy.py | 3 +- src/warnet/main.py | 33 +++++++++++++++++-- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/resources/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml index 14203fe2f..4ca444eb2 100644 --- a/resources/networks/6_node_bitcoin/network.yaml +++ b/resources/networks/6_node_bitcoin/network.yaml @@ -28,4 +28,6 @@ nodes: connect: - tank-0006 - name: tank-0006 -fork_observer: true +fork_observer: + enabled: true + configQueryInterval: 20 diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 1008374f4..110af8ae0 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -69,7 +69,7 @@ def deploy_fork_observer(directory: Path): network_file = yaml.safe_load(f) # Only start if configured in the network file - if not network_file.get("fork_observer", False): + if not network_file.get("fork_observer", {}).get("enabled", False): return namespace = get_default_namespace() @@ -99,6 +99,7 @@ def deploy_fork_observer(directory: Path): # Create yaml string using multi-line string format override_string = override_string.strip() v = {"config": override_string} + v["configQueryinterval"] = network_file.get("fork_observer", {}).get("configQueryinterval", 20) yaml_string = yaml.dump(v, default_style="|", default_flow_style=False) # Dump to yaml tempfile diff --git a/src/warnet/main.py b/src/warnet/main.py index 40f1a63dc..3d166665f 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -190,10 +190,22 @@ def quickstart(): type=bool, default=True, ) + fork_observer_query_interval = 20 + if fork_observer: + fork_observer_query_interval = click.prompt( + click.style( + "\nHow often would you like fork-observer to query node status (seconds)?", + fg="blue", + bold=True, + ), + type=int, + default=20, + ) click.secho("\nCreating project structure...", fg="yellow", bold=True) project_path = Path(os.path.expanduser(proj_answers["project_path"])) create_warnet_project(project_path) + click.secho("\nGenerating custom network...", fg="yellow", bold=True) custom_network_path = project_path / "networks" / answers["network_name"] custom_graph( @@ -202,10 +214,17 @@ def quickstart(): answers["version"], custom_network_path, fork_observer, + fork_observer_query_interval, ) click.secho("\nSetup completed successfully!", fg="green", bold=True) - click.echo("\nRun the following command to deploy this network:") + + click.echo( + f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." + ) + + click.echo("\nWhen you're ready, run the following command to deploy this network:") click.echo(f"warnet deploy {custom_network_path}") + except Exception as e: click.echo(f"{e}\n\n") click.secho(f"An error occurred while running the quick start script:\n\n{e}\n\n", fg="red") @@ -368,7 +387,12 @@ def logs(pod_name: str, follow: bool): def custom_graph( - num_nodes: int, num_connections: int, version: str, datadir: Path, fork_observer: bool + num_nodes: int, + num_connections: int, + version: str, + datadir: Path, + fork_observer: bool, + fork_obs_query_interval: int, ): datadir.mkdir(parents=False, exist_ok=False) # Generate network.yaml @@ -400,7 +424,10 @@ def custom_graph( nodes.append(node) network_yaml_data = {"nodes": nodes} - network_yaml_data["fork_observer"] = fork_observer + network_yaml_data["fork_observer"] = { + "enabled": fork_observer, + "configQueryInterval": fork_obs_query_interval, + } with open(os.path.join(datadir, "network.yaml"), "w") as f: yaml.dump(network_yaml_data, f, default_flow_style=False) From 06d45f48e6958e6a488dace8c8cad6878e6ce244 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 20:23:11 +0100 Subject: [PATCH 182/710] document fork-observer --- docs/quickstart.md | 10 ++++++++++ src/warnet/main.py | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/docs/quickstart.md b/docs/quickstart.md index 5168563f4..0b6c1454f 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -35,3 +35,13 @@ warnet quickstart ``` This will check you have the required dependencies and guide you through setting up and deploying your first network. + +## fork-observer + +If you enabled [fork-observer](https://github.com/0xB10C/fork-observer), you must forward the port from the cluster to your local machine: + +```bash +kubectl port-forward fork-observer 2323 +``` + +And then the GUI can be accessed via `localhost:2323` in a web browser. diff --git a/src/warnet/main.py b/src/warnet/main.py index 3d166665f..539404ebc 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -221,6 +221,12 @@ def quickstart(): click.echo( f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." ) + if fork_observer: + click.echo( + "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" + "`kubectl port-forward fork-observer 2323`\n" + "fork-observer will then be available at web address: localhost:2323" + ) click.echo("\nWhen you're ready, run the following command to deploy this network:") click.echo(f"warnet deploy {custom_network_path}") From 0af78d9ac0c41fe39144fd77d84b8ecccd2f9009 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 21:08:02 +0100 Subject: [PATCH 183/710] update rpcauth --- resources/networks/node-defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/networks/node-defaults.yaml b/resources/networks/node-defaults.yaml index fed5a0043..55fdbbd04 100644 --- a/resources/networks/node-defaults.yaml +++ b/resources/networks/node-defaults.yaml @@ -24,6 +24,6 @@ image: config: | dns=1 debug=rpc - rpcauth=forkobserver:ef3d61b7ffecd81ffaaa7ae43091543d$a10bc25b5ca8910ebbee9b1538d38242b09d0a165e220f9a47535eccc4c089a4 + rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo rpcwhitelistdefault=0 From 1b3aafb0ec87a3c21758e455c4cc784144bdbea1 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 21:08:17 +0100 Subject: [PATCH 184/710] deploy: add debug flag --- src/warnet/deploy.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 110af8ae0..9087c6a1a 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -48,13 +48,14 @@ def validate_directory(ctx, param, value): type=click.Path(exists=True, file_okay=False, dir_okay=True), callback=validate_directory, ) -def deploy(directory): +@click.option("--debug", is_flag=True) +def deploy(directory, debug): """Deploy a warnet with topology loaded from """ directory = Path(directory) if (directory / NETWORK_FILE).exists(): - deploy_network(directory) - deploy_fork_observer(directory) + deploy_network(directory, debug) + deploy_fork_observer(directory, debug) elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: @@ -63,7 +64,7 @@ def deploy(directory): ) -def deploy_fork_observer(directory: Path): +def deploy_fork_observer(directory: Path, debug: bool): network_file_path = directory / NETWORK_FILE with network_file_path.open() as f: network_file = yaml.safe_load(f) @@ -74,6 +75,8 @@ def deploy_fork_observer(directory: Path): namespace = get_default_namespace() cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace}" + if debug: + cmd += " --debug" temp_override_file_path = "" override_string = "" @@ -114,7 +117,7 @@ def deploy_fork_observer(directory: Path): return -def deploy_network(directory: Path): +def deploy_network(directory: Path, debug: bool = False): network_file_path = directory / NETWORK_FILE defaults_file_path = directory / NETWORK_DEFAULTS_FILE @@ -131,6 +134,8 @@ def deploy_network(directory: Path): node_config_override = {k: v for k, v in node.items() if k != "name"} cmd = f"{HELM_COMMAND} {node_name} {NETWORK_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" + if debug: + cmd += " --debug" if node_config_override: with tempfile.NamedTemporaryFile( From 50f5db1abf0a3c05988bc61a1baead4061d27de9 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 21:13:54 +0100 Subject: [PATCH 185/710] whitelistdefault=1 --- resources/networks/node-defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/networks/node-defaults.yaml b/resources/networks/node-defaults.yaml index 55fdbbd04..a940ffcf9 100644 --- a/resources/networks/node-defaults.yaml +++ b/resources/networks/node-defaults.yaml @@ -26,4 +26,4 @@ config: | debug=rpc rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo - rpcwhitelistdefault=0 + rpcwhitelistdefault=1 From 78a13ce203066a4bb74f735257f01c21ca5c3245 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 3 Sep 2024 21:28:12 +0100 Subject: [PATCH 186/710] disable fo in 6_node_bitcoin --- resources/networks/6_node_bitcoin/network.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/resources/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml index 4ca444eb2..6103d8a9b 100644 --- a/resources/networks/6_node_bitcoin/network.yaml +++ b/resources/networks/6_node_bitcoin/network.yaml @@ -29,5 +29,4 @@ nodes: - tank-0006 - name: tank-0006 fork_observer: - enabled: true - configQueryInterval: 20 + enabled: false From 3990a0e423653baf67bd9ebfc5f4b6fe09e32c8c Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 3 Sep 2024 21:34:59 -0400 Subject: [PATCH 187/710] test: observe fork observer --- .github/workflows/test.yml | 1 + test/data/services/network.yaml | 35 +++++++++++++++ test/data/services/node-defaults.yaml | 4 ++ test/services_test.py | 62 +++++++++++++++++++++++++++ 4 files changed, 102 insertions(+) create mode 100644 test/data/services/network.yaml create mode 100644 test/data/services/node-defaults.yaml create mode 100755 test/services_test.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f3c05cac7..1d1f78e2b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -61,6 +61,7 @@ jobs: - dag_connection_test.py - logging_test.py - rpc_test.py + - services_test.py - scenarios_test.py steps: - uses: actions/checkout@v4 diff --git a/test/data/services/network.yaml b/test/data/services/network.yaml new file mode 100644 index 000000000..6c19027a2 --- /dev/null +++ b/test/data/services/network.yaml @@ -0,0 +1,35 @@ +nodes: + - name: john + config: | + dns=1 + debug=rpc + rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c + rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo + rpcwhitelistdefault=0 + connect: + - paul + - name: paul + config: | + dns=1 + debug=rpc + rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c + rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo + rpcwhitelistdefault=0 + - name: george + config: | + dns=1 + debug=rpc + rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c + rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo + rpcwhitelistdefault=0 + connect: + - ringo + - name: ringo + config: | + dns=1 + debug=rpc + rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c + rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo + rpcwhitelistdefault=0 +fork_observer: + enabled: true diff --git a/test/data/services/node-defaults.yaml b/test/data/services/node-defaults.yaml new file mode 100644 index 000000000..7e021cad1 --- /dev/null +++ b/test/data/services/node-defaults.yaml @@ -0,0 +1,4 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" diff --git a/test/services_test.py b/test/services_test.py new file mode 100755 index 000000000..ec6422765 --- /dev/null +++ b/test/services_test.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import os +from pathlib import Path +from subprocess import PIPE, Popen + +import requests +from test_base import TestBase + + +class ServicesTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "services" + + def run_test(self): + try: + self.setup_network() + self.check_fork_observer() + finally: + self.stop_server() + + def setup_network(self): + self.log.info("Setting up network") + self.log.info(self.warnet(f"deploy {self.network_dir}")) + self.wait_for_all_tanks_status(target="running") + self.wait_for_all_edges() + + def check_fork_observer(self): + self.log.info("Creating chain split") + self.warnet("bitcoin rpc john createwallet miner") + self.warnet("bitcoin rpc john -generate 1") + self.log.info("Forwarding port 2323...") + # Stays alive in background + self.fo_port_fwd_process = Popen( + ["kubectl", "port-forward", "fork-observer", "2323"], + stdout=PIPE, + stderr=PIPE, + bufsize=1, + universal_newlines=True, + ) + + def call_fo_api(): + try: + fo_res = requests.get("http://localhost:2323/api/networks.json") + network_id = fo_res.json()["networks"][0]["id"] + fo_data = requests.get(f"http://localhost:2323/api/{network_id}/data.json") + # fork observed! + return len(fo_data.json()["header_infos"]) == 2 + except Exception as e: + self.log.info(f"Fork Observer API error: {e}") + self.log.info("No Fork observed yet") + return False + + self.wait_for_predicate(call_fo_api) + self.log.info("Fork observed!") + self.fo_port_fwd_process.terminate() + + +if __name__ == "__main__": + test = ServicesTest() + test.run_test() From dc52ac126b6e31a625a2a8c1bd0fb52cc4e087a7 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 10:37:48 +0200 Subject: [PATCH 188/710] remove deadcode from network.py --- src/warnet/network.py | 118 +----------------------------------------- 1 file changed, 1 insertion(+), 117 deletions(-) diff --git a/src/warnet/network.py b/src/warnet/network.py index f3735c6de..bceb74fa6 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -1,15 +1,12 @@ import json import shutil -import tempfile from importlib.resources import files from pathlib import Path -import click -import yaml from rich import print from .bitcoin import _rpc -from .k8s import delete_namespace, get_default_namespace, get_mission, get_pods +from .k8s import get_mission from .process import stream_command WAR_MANIFESTS_FILES = files("resources.manifests") @@ -26,30 +23,6 @@ BITCOIN_CHART_LOCATION = str(files("resources.charts").joinpath("bitcoincore")) -@click.group(name="network") -def network(): - """Network commands""" - - -class Edge: - def __init__(self, src: str, dst: str, data: dict[str, any]): - self.src = src - self.dst = dst - self.data = data - - def to_dict(self): - return {"src": self.src, "dst": self.dst, "data": self.data} - - -def edges_from_network_file(network_file: dict[str, any]) -> list[Edge]: - edges = [] - for node in network_file["nodes"]: - if "connect" in node: - for connection in node["connect"]: - edges.append(Edge(node["name"], connection, "")) - return edges - - def setup_logging_helm() -> bool: helm_commands = [ "helm repo add grafana https://grafana.github.io/helm-charts", @@ -104,76 +77,6 @@ def copy_scenario_defaults(directory: Path): ) -@network.command() -@click.argument( - "network_dir", - type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), -) -@click.option("--logging/--no-logging", default=False) -def deploy(network_dir: Path, logging: bool): - """Deploy a warnet with topology loaded from """ - network_file_path = network_dir / NETWORK_FILE - defaults_file_path = network_dir / DEFAULTS_FILE - - with network_file_path.open() as f: - network_file = yaml.safe_load(f) - - namespace = get_default_namespace() - - for node in network_file["nodes"]: - print(f"Deploying node: {node.get('name')}") - try: - temp_override_file_path = "" - node_name = node.get("name") - # all the keys apart from name - node_config_override = {k: v for k, v in node.items() if k != "name"} - - cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" - - if node_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(node_config_override, temp_file) - temp_override_file_path = Path(temp_file.name) - cmd = f"{cmd} -f {temp_override_file_path}" - - if not stream_command(cmd): - print(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - print(f"Error: {e}") - return - finally: - if temp_override_file_path: - Path(temp_override_file_path).unlink() - - -@network.command() -def down(): - """Bring down a running warnet""" - if delete_namespace("warnet-logging"): - print("Warnet logging deleted") - else: - print("Warnet logging NOT deleted") - tanks = get_mission("tank") - for tank in tanks: - cmd = f"helm uninstall {tank.metadata.name}" - stream_command(cmd) - # Clean up scenarios and other pods - # TODO: scenarios should be helm-ified as well - pods = get_pods() - for pod in pods.items: - cmd = f"kubectl delete pod {pod.metadata.name}" - stream_command(cmd) - - -@network.command() -def connected(): - """Determine if all p2p connections defined in graph are established""" - print(_connected()) - - def _connected(): tanks = get_mission("tank") for tank in tanks: @@ -193,22 +96,3 @@ def _connected(): return False print("Network connected") return True - - -@network.command() -def status(): - """Return pod status""" - # TODO: make it a pretty table - print(_status()) - - -def _status(): - tanks = get_mission("tank") - stats = [] - for tank in tanks: - status = { - "tank": tank.metadata.name, - "bitcoin_status": tank.status.phase.lower(), - } - stats.append(status) - return stats From 2014f09b9769d8e74f261cb607a5a643474e4683 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 10:41:29 +0200 Subject: [PATCH 189/710] remove deadcode from namespaces.py --- src/warnet/namespaces.py | 51 ---------------------------------------- 1 file changed, 51 deletions(-) diff --git a/src/warnet/namespaces.py b/src/warnet/namespaces.py index 0b14de4dd..b193380dd 100644 --- a/src/warnet/namespaces.py +++ b/src/warnet/namespaces.py @@ -1,10 +1,8 @@ import shutil -import tempfile from importlib.resources import files from pathlib import Path import click -import yaml from .process import run_command, stream_command @@ -35,58 +33,9 @@ def namespaces(): """Namespaces commands""" -@namespaces.command() @click.argument( "namespaces_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path) ) -def deploy(namespaces_dir: Path): - """Deploy namespaces with users from a """ - namespaces_file_path = namespaces_dir / NAMESPACES_FILE - defaults_file_path = namespaces_dir / DEFAULTS_FILE - - with namespaces_file_path.open() as f: - namespaces_file = yaml.safe_load(f) - - # validate names before deploying - names = [n.get("name") for n in namespaces_file["namespaces"]] - for n in names: - if not n.startswith("warnet-"): - print( - f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." - ) - - # deploy namespaces - for namespace in namespaces_file["namespaces"]: - print(f"Deploying namespace: {namespace.get('name')}") - try: - temp_override_file_path = Path() - namespace_name = namespace.get("name") - # all the keys apart from name - namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} - - cmd = ( - f"{HELM_COMMAND} {namespace_name} {BITCOIN_CHART_LOCATION} -f {defaults_file_path}" - ) - - if namespace_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(namespace_config_override, temp_file) - temp_override_file_path = Path(temp_file.name) - cmd = f"{cmd} -f {temp_override_file_path}" - - if not stream_command(cmd): - print(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - print(f"Error: {e}") - return - finally: - if temp_override_file_path.exists(): - temp_override_file_path.unlink() - - @namespaces.command() def list(): """List all namespaces with 'warnet-' prefix""" From 57d7b4961a6a9f7c8417ed3d145c9c9eb6f2606a Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 12:02:41 +0200 Subject: [PATCH 190/710] fix tests --- test/scenarios_test.py | 6 +++--- test/test_base.py | 10 ++++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 30578f9b1..30d9a3f7e 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -7,7 +7,7 @@ from warnet.k8s import delete_pod from warnet.process import run_command -from warnet.scenarios import _active as scenarios_active +from warnet.status import _get_active_scenarios as scenarios_active class ScenariosTest(TestBase): @@ -36,7 +36,7 @@ def scenario_running(self, scenario_name: str): """Check that we are only running a single scenario of the correct name""" active = scenarios_active() assert len(active) == 1 - return scenario_name in active[0]["commander"] + return scenario_name in active[0]["name"] def run_and_check_scenario_from_file(self): scenario_file = "test/data/scenario_p2p_interface.py" @@ -80,7 +80,7 @@ def stop_scenario(self): running = scenarios_active() assert len(running) == 1, f"Expected one running scenario, got {len(running)}" assert running[0]["status"] == "running", "Scenario should be running" - delete_pod(running[0]["commander"]) + delete_pod(running[0]["name"]) self.wait_for_predicate(self.check_scenario_stopped) def check_scenario_stopped(self): diff --git a/test/test_base.py b/test/test_base.py index 9326cc9a7..582ca5c8f 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -14,7 +14,7 @@ from warnet.control import get_active_scenarios from warnet.k8s import get_pod_exit_status from warnet.network import _connected as network_connected -from warnet.network import _status as network_status +from warnet.status import _get_tank_status as network_status class TestBase: @@ -112,11 +112,9 @@ def check_status(): if len(tanks) == 0: return True for tank in tanks: - for service in ["bitcoin", "lightning", "circuitbreaker"]: - status = tank.get(f"{service}_status") - if status: - stats["total"] += 1 - stats[status] = stats.get(status, 0) + 1 + status = tank["status"] + stats["total"] += 1 + stats[status] = stats.get(status, 0) + 1 self.log.info(f"Waiting for all tanks to reach '{target}': {stats}") return target in stats and stats[target] == stats["total"] From fa30684ca1c41fa0f28efde0d35058414d1ca847 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 12:05:10 +0200 Subject: [PATCH 191/710] remove unused graph_schema.json --- src/warnet/graph_schema.json | 83 ------------------------------------ 1 file changed, 83 deletions(-) delete mode 100644 src/warnet/graph_schema.json diff --git a/src/warnet/graph_schema.json b/src/warnet/graph_schema.json deleted file mode 100644 index ac1f7aa9f..000000000 --- a/src/warnet/graph_schema.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "graph": { - "type": "object", - "properties": { - "node": {"type": "object"}, - "edge": {"type": "object"}, - "node_default": {"type": "object"}, - "edge_default": {"type": "object"}, - "services": { - "type": "string", - "comment": "A space-separated list of extra service containers to deploy in the network. See [docs/services.md](services.md) for complete list of available services" - } - }, - "additionalProperties": false - }, - "node": { - "type": "object", - "properties": { - "version": { - "type": "string", - "comment": "Bitcoin Core version with an available Warnet tank image on Dockerhub. May also be a GitHub repository with format user/repository:branch to build from source code"}, - "image": { - "type": "string", - "comment": "Bitcoin Core Warnet tank image on Dockerhub with the format repository/image:tag"}, - "bitcoin_config": { - "type": "string", - "default": "", - "comment": "A string of Bitcoin Core options in command-line format, e.g. '-debug=net -blocksonly'"}, - "tc_netem": { - "type": "string", - "comment": "A tc-netem command as a string beginning with 'tc qdisc add dev eth0 root netem'"}, - "exporter": { - "type": "boolean", - "default": false, - "comment": "Whether to attach a Prometheus data exporter to the tank"}, - "metrics": { - "type": "string", - "comment": "A space-separated string of RPC queries to scrape by Prometheus"}, - "collect_logs": { - "type": "boolean", - "default": false, - "comment": "Whether to collect Bitcoin Core debug logs with Promtail"}, - "build_args": { - "type": "string", - "default": "", - "comment": "A string of configure options used when building Bitcoin Core from source code, e.g. '--without-gui --disable-tests'"}, - "ln": { - "type": "string", - "comment": "Attach a lightning network node of this implementation (currently only supports 'lnd' or 'cln')"}, - "ln_image": { - "type": "string", - "comment": "Specify a lightning network node image from Dockerhub with the format repository/image:tag"}, - "ln_cb_image": { - "type": "string", - "comment": "Specify a lnd Circuit Breaker image from Dockerhub with the format repository/image:tag"}, - "ln_config": { - "type": "string", - "comment": "A string of arguments for the lightning network node in command-line format, e.g. '--protocol.wumbo-channels --bitcoin.timelockdelta=80'"} - }, - "additionalProperties": false, - "oneOf": [ - {"required": ["version"]}, - {"required": ["image"]} - ], - "required": [] - }, - "edge": { - "type": "object", - "properties": { - "channel_open": { - "type": "string", - "comment": "Indicate that this edge is a lightning channel with these arguments passed to lnd openchannel"}, - "source_policy": { - "type": "string", - "comment": "Update the channel originator policy by passing these arguments passed to lnd updatechanpolicy"}, - "target_policy": { - "type": "string", - "comment": "Update the channel partner policy by passing these arguments passed to lnd updatechanpolicy"} - }, - "additionalProperties": false, - "required": [] - } -} From 884d94b3845d7a73d22dc4cc9106af8022900976 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 4 Sep 2024 14:57:19 +0100 Subject: [PATCH 192/710] answered the call to remove hardcoded values --- resources/charts/fork-observer/templates/configmap.yaml | 2 +- resources/charts/fork-observer/values.yaml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/resources/charts/fork-observer/templates/configmap.yaml b/resources/charts/fork-observer/templates/configmap.yaml index 8fb4dcad6..f1fc97930 100644 --- a/resources/charts/fork-observer/templates/configmap.yaml +++ b/resources/charts/fork-observer/templates/configmap.yaml @@ -7,5 +7,5 @@ metadata: data: config.toml: | {{- .Values.configQueryInterval | nindent 4 }} - {{- .Values.baseConfig | nindent 4 }} + {{- tpl .Values.baseConfig . | nindent 4 }} {{- .Values.config | nindent 8 }} diff --git a/resources/charts/fork-observer/values.yaml b/resources/charts/fork-observer/values.yaml index 513c97744..a6543de7c 100644 --- a/resources/charts/fork-observer/values.yaml +++ b/resources/charts/fork-observer/values.yaml @@ -82,6 +82,8 @@ configQueryInterval: | # Interval for checking for new blocks query_interval = 20 +maxInterestingHeights: 100 + baseConfig: | # Database path of the key value store. Will be created if non-existing. database_path = "db" @@ -106,7 +108,7 @@ baseConfig: | name = "Warnet" description = "A Warnet" min_fork_height = 0 - max_interesting_heights = 100 + max_interesting_heights = {{ .Values.maxInterestingHeights }} [pool_identification] enable = false From fee6e0d0be21ad6366abe0ada1eeb78ddc0b99e1 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 4 Sep 2024 15:37:13 +0100 Subject: [PATCH 193/710] set rpcwhitelistdefault to 0 --- resources/networks/node-defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/networks/node-defaults.yaml b/resources/networks/node-defaults.yaml index a940ffcf9..55fdbbd04 100644 --- a/resources/networks/node-defaults.yaml +++ b/resources/networks/node-defaults.yaml @@ -26,4 +26,4 @@ config: | debug=rpc rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo - rpcwhitelistdefault=1 + rpcwhitelistdefault=0 From 4a472709f678e501e329e2336812236d130a6d28 Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 3 Sep 2024 16:06:58 -0500 Subject: [PATCH 194/710] remove quick_start.sh --- resources/scripts/quick_start.sh | 99 -------------------------------- 1 file changed, 99 deletions(-) delete mode 100755 resources/scripts/quick_start.sh diff --git a/resources/scripts/quick_start.sh b/resources/scripts/quick_start.sh deleted file mode 100755 index 7d444de75..000000000 --- a/resources/scripts/quick_start.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash -set -euo pipefail - - -is_cygwin_etal() { - uname -s | grep -qE "CYGWIN|MINGW|MSYS" -} -is_wsl() { - grep -qEi "(Microsoft|WSL)" /proc/version &> /dev/null -} -if is_cygwin_etal || is_wsl; then - echo "Quick start does not support Windows" - exit 1 -fi - - -# Colors and styles -RESET='\033[0m' -BOLD='\033[1m' - -# Use colors if we can and have the color space -if command -v tput &> /dev/null; then - ncolors=$(tput colors) - if [ -n "$ncolors" ] && [ "$ncolors" -ge 8 ]; then - RESET=$(tput sgr0) - BOLD=$(tput bold) - fi -fi - -print_message() { - local color="$1" - local message="$2" - local format="${3:-}" - echo -e "${format}${color}${message}${RESET}" -} - -print_partial_message() { - local pre_message="$1" - local formatted_part="$2" - local post_message="$3" - local format="${4:-}" # Default to empty string if not provided - local color="${5:-$RESET}" - - echo -e "${color}${pre_message}${format}${formatted_part}${RESET}${color}${post_message}${RESET}" -} - -print_message "" "" "" -print_message "" " ╭───────────────────────────────────╮" "" -print_message "" " │ Welcome to the Warnet Quickstart │" "" -print_message "" " ╰───────────────────────────────────╯" "" -print_message "" "" "" -print_message "" " Let's find out if your system has what it takes to run Warnet..." "" -print_message "" "" "" - -kubectl_path=$(command -v kubectl || true) -if [ -n "$kubectl_path" ]; then - print_partial_message " ⭐️ Found " "kubectl" ": $kubectl_path " "$BOLD" -else - print_partial_message " 💥 Could not find " "kubectl" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://kubernetes.io/docs/tasks/tools/" "$BOLD" - exit 127 -fi - -docker_path=$(command -v docker || true) -if [ -n "$docker_path" ]; then - print_partial_message " ⭐️ Found " "docker" ": $docker_path" "$BOLD" -else - print_partial_message " 💥 Could not find " "docker" ". Please follow this link to install Docker Engine..." "$BOLD" - print_message "" " https://docs.docker.com/engine/install/" "$BOLD" - exit 127 -fi - -helm_path=$(command -v helm || true) -if [ -n "$helm_path" ]; then - print_partial_message " ⭐️ Found " "helm" ": $helm_path" "$BOLD" -else - print_partial_message " 💥 Could not find " "helm" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://helm.sh/docs/intro/install/" "$BOLD" - exit 127 -fi - -python_path=$(command -v python3 || true) -if [ -n "$python_path" ]; then - print_partial_message " ⭐️ Found " "python3" ": $python_path " "$BOLD" -else - print_partial_message " 💥 Could not find " "python3" ". Please follow this link to install it (or use your package manager)..." "$BOLD" - print_message "" " https://www.python.org/downloads/" "$BOLD" - exit 127 -fi - -if [ -n "$VIRTUAL_ENV" ]; then - print_partial_message " ⭐️ Running in virtual environment: " "$VIRTUAL_ENV" "$BOLD" -else - print_partial_message " 💥 Not running in a virtual environment. " "Please activate a venv before proceeding." "$BOLD" - exit 127 -fi - -echo " ✅ Everything needed found" - From 1090984f41158ea20387b4ef787e901cdef0aa96 Mon Sep 17 00:00:00 2001 From: Grant Date: Tue, 3 Sep 2024 14:40:21 -0500 Subject: [PATCH 195/710] move quickstart checks to python --- src/warnet/main.py | 240 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 222 insertions(+), 18 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 539404ebc..25fc7bff0 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -1,10 +1,14 @@ import json import os +import platform import random import subprocess import sys +from dataclasses import dataclass +from enum import Enum, auto from importlib.resources import files from pathlib import Path +from typing import Callable import click import inquirer @@ -46,25 +50,225 @@ def cli(): @cli.command() def quickstart(): """Setup warnet""" + + class ToolStatus(Enum): + Satisfied = auto() + Unsatisfied = auto() + + @dataclass + class ToolInfo: + tool_name: str + is_installed_func: Callable[[], tuple[bool, str]] + install_instruction: str + install_url: str + + __slots__ = ["tool_name", "is_installed_func", "install_instruction", "install_url"] + + def is_minikube_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run( + ["minikube", "version", "--short"], + capture_output=True, + text=True, + ) + location_result = subprocess.run( + ["which", "minikube"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return True, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_minikube_version_valid_on_darwin() -> tuple[bool, str]: + try: + version_result = subprocess.run( + ["minikube", "version", "--short"], + capture_output=True, + text=True, + ) + location_result = subprocess.run( + ["which", "minikube"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + version = version_result.stdout.strip().split()[-1] # Get the version number + return version not in [ + "v1.32.0", + "1.33.0", + ], f"{location_result.stdout.strip()} ({version})" + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_platform_darwin() -> bool: + return platform.system() == "Darwin" + + def is_docker_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run(["docker", "--version"], capture_output=True, text=True) + location_result = subprocess.run( + ["which", "docker"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return True, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_docker_desktop_running() -> tuple[bool, str]: + try: + version_result = subprocess.run(["docker", "info"], capture_output=True, text=True) + location_result = subprocess.run( + ["which", "docker"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return "Docker Desktop" in version_result.stdout, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_kubectl_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run( + ["kubectl", "version", "--client"], + capture_output=True, + text=True, + ) + location_result = subprocess.run( + ["which", "kubectl"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return True, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_helm_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run(["helm", "version"], capture_output=True, text=True) + location_result = subprocess.run( + ["which", "helm"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return version_result.returncode == 0, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def check_installation(tool_info: ToolInfo) -> ToolStatus: + has_good_version, location = tool_info.is_installed_func() + if not has_good_version: + instruction_label = click.style(" Instruction: ", fg="yellow", bold=True) + instruction_text = click.style(f"{tool_info.install_instruction}", fg="yellow") + url_label = click.style(" URL: ", fg="yellow", bold=True) + url_text = click.style(f"{tool_info.install_url}", fg="yellow") + + click.secho(f" 💥 {tool_info.tool_name} is not installed. {location}", fg="yellow") + click.echo(instruction_label + instruction_text) + click.echo(url_label + url_text) + return ToolStatus.Unsatisfied + else: + click.secho(f" ⭐️ {tool_info.tool_name} is satisfied: {location}", bold=False) + return ToolStatus.Satisfied + + docker_info = ToolInfo( + tool_name="Docker", + is_installed_func=is_docker_installed, + install_instruction="Install Docker from Docker's official site.", + install_url="https://docs.docker.com/engine/install/", + ) + docker_desktop_info = ToolInfo( + tool_name="Docker Desktop", + is_installed_func=is_docker_desktop_running, + install_instruction="Make sure Docker Desktop is installed and running.", + install_url="https://docs.docker.com/desktop/", + ) + kubectl_info = ToolInfo( + tool_name="Kubectl", + is_installed_func=is_kubectl_installed, + install_instruction="Install kubectl.", + install_url="https://kubernetes.io/docs/tasks/tools/install-kubectl/", + ) + helm_info = ToolInfo( + tool_name="Helm", + is_installed_func=is_helm_installed, + install_instruction="Install Helm from Helm's official site.", + install_url="https://helm.sh/docs/intro/install/", + ) + minikube_info = ToolInfo( + tool_name="Minikube", + is_installed_func=is_minikube_installed, + install_instruction="Install Minikube from the official Minikube site.", + install_url="https://minikube.sigs.k8s.io/docs/start/", + ) + minikube_version_info = ToolInfo( + tool_name="Minikube's version", + is_installed_func=is_minikube_version_valid_on_darwin, + install_instruction="Install the latest Minikube from the official Minikube site.", + install_url="https://minikube.sigs.k8s.io/docs/start/", + ) + + print("") + print(" ╭───────────────────────────────────╮") + print(" │ Welcome to the Warnet Quickstart │") + print(" ╰───────────────────────────────────╯") + print("") + print(" Let's find out if your system has what it takes to run Warnet...") + print("") + try: - # Requirements checks - with subprocess.Popen( - ["/bin/bash", str(QUICK_START_PATH)], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True, - env=dict(os.environ, TERM="xterm-256color"), - ) as process: - if process.stdout: - for line in iter(process.stdout.readline, ""): - click.echo(line, nl=False) - return_code = process.wait() - if return_code != 0: - click.secho( - f"Quick start script failed with return code {return_code}", fg="red", bold=True - ) - click.secho("Install missing requirements before proceeding", fg="yellow") - return False + questions = [ + inquirer.List( + "platform", + message=click.style("Which platform would you like to use?", fg="blue", bold=True), + choices=["Minikube", "Docker Desktop"], + ) + ] + answers = inquirer.prompt(questions) + + check_results: list[ToolStatus] = [] + if answers: + if answers["platform"] == "Docker Desktop": + check_results.append(check_installation(docker_info)) + check_results.append(check_installation(docker_desktop_info)) + check_results.append(check_installation(kubectl_info)) + check_results.append(check_installation(helm_info)) + elif answers["platform"] == "Minikube": + check_results.append(check_installation(docker_info)) + check_results.append(check_installation(minikube_info)) + if is_platform_darwin(): + check_results.append(check_installation(minikube_version_info)) + check_results.append(check_installation(kubectl_info)) + check_results.append(check_installation(helm_info)) + else: + click.secho("Please re-run Quickstart.", fg="yellow") + sys.exit(1) + + if ToolStatus.Unsatisfied in check_results: + click.secho( + "Please fix the installation issues above and try quickstart again.", fg="yellow" + ) + sys.exit(1) + else: + click.secho(" ⭐️ Warnet prerequisites look good.\n") # New project setup questions = [ From 327ffe74977e3a9edfe0d880c5346e3d8592f052 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 4 Sep 2024 17:47:29 +0100 Subject: [PATCH 196/710] no need to copy node-defaults (#518) --- src/warnet/network.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/warnet/network.py b/src/warnet/network.py index d3e5170de..bbf66de3d 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -65,7 +65,12 @@ def should_copy(item: Path) -> bool: def copy_network_defaults(directory: Path): """Create the project structure for a warnet project's network""" - copy_defaults(directory, WAR_NETWORK_DIR, WAR_NETWORK_FILES.joinpath(), []) + copy_defaults( + directory, + WAR_NETWORK_DIR, + WAR_NETWORK_FILES.joinpath(), + ["node-defaults.yaml", "__pycache__", "__init__.py"], + ) def copy_scenario_defaults(directory: Path): From d2b41039e2a981f1e0dc3f0ddd80aafa3d14bfaa Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 5 Sep 2024 13:09:24 -0400 Subject: [PATCH 197/710] charts: make bitcoin config chain-agnostic --- .../charts/bitcoincore/templates/_helpers.tpl | 4 ++-- .../charts/bitcoincore/templates/configmap.yaml | 7 ++----- resources/charts/bitcoincore/values.yaml | 17 +++++++++-------- test/data/signet/network.yaml | 8 ++++++++ test/data/signet/node-defaults.yaml | 6 ++++++ 5 files changed, 27 insertions(+), 15 deletions(-) create mode 100644 test/data/signet/network.yaml create mode 100644 test/data/signet/node-defaults.yaml diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index f22ca65e4..1adc5d205 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -60,10 +60,10 @@ Create the name of the service account to use {{/* Add network section heading in bitcoin.conf after v0.17.0 */}} -{{- define "bitcoincore.check_semver.regtest" -}} +{{- define "bitcoincore.check_semver" -}} {{- $tag := .Values.image.tag | trimPrefix "v" -}} {{- $version := semverCompare ">=0.17.0" $tag -}} {{- if $version -}} -[regtest] +[{{ .Values.chain }}] {{- end -}} {{- end -}} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index 37952ff48..c7a791031 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -6,12 +6,9 @@ metadata: {{- include "bitcoincore.labels" . | nindent 4 }} data: bitcoin.conf: | - {{- if eq .Values.chain "regtest" }} - regtest=1 + {{ .Values.chain }}=1 - {{ template "bitcoincore.check_semver.regtest" . }} - {{- tpl .Values.regtestConfig . | nindent 4 }} - {{- end }} + {{ template "bitcoincore.check_semver" . }} {{- .Values.baseConfig | nindent 4 }} {{- .Values.config | nindent 4 }} {{- range .Values.connect }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 00c639eb5..85dd71540 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -112,14 +112,6 @@ collectLogs: false metricsExport: false prometheusMetricsPort: 9332 -regtestConfig: | - rpcuser=user - rpcpassword=password - rpcport=18443 - rpcallowip=0.0.0.0/0 - rpcbind=0.0.0.0 - rest=1 - baseConfig: | checkmempool=0 acceptnonstdtxn=1 @@ -133,6 +125,15 @@ baseConfig: | zmqpubrawblock=tcp://0.0.0.0:28332 zmqpubrawtx=tcp://0.0.0.0:28333 + rpcuser=user + rpcpassword=password + # use regtest port by default for all networks + rpcport=18443 + rpcallowip=0.0.0.0/0 + rpcbind=0.0.0.0 + rest=1 + + config: "" connect: [] diff --git a/test/data/signet/network.yaml b/test/data/signet/network.yaml new file mode 100644 index 000000000..5677909cf --- /dev/null +++ b/test/data/signet/network.yaml @@ -0,0 +1,8 @@ +nodes: + - name: miner + - name: tank-0001 + connect: + - miner + - name: tank-0002 + connect: + - miner \ No newline at end of file diff --git a/test/data/signet/node-defaults.yaml b/test/data/signet/node-defaults.yaml new file mode 100644 index 000000000..a7fac79a1 --- /dev/null +++ b/test/data/signet/node-defaults.yaml @@ -0,0 +1,6 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" + +chain: signet \ No newline at end of file From d562df20a49e2ffb048fc6566ecba9a61f799241 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 6 Sep 2024 10:06:24 -0400 Subject: [PATCH 198/710] prettify _connected() output --- src/warnet/network.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/warnet/network.py b/src/warnet/network.py index bbf66de3d..d9d5d1e85 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -88,16 +88,15 @@ def _connected(): for tank in tanks: # Get actual peerinfo = json.loads(_rpc(tank.metadata.name, "getpeerinfo", "")) - manuals = 0 + actual = 0 for peer in peerinfo: if peer["connection_type"] == "manual": - manuals += 1 + actual += 1 + expected = int(tank.metadata.annotations["init_peers"]) + print(f"Tank {tank.metadata.name} peers expected: {expected}, actual: {actual}") # Even if more edges are specified, bitcoind only allows # 8 manual outbound connections - - print("manual " + str(manuals)) - print(tank.metadata.annotations["init_peers"]) - if min(8, int(tank.metadata.annotations["init_peers"])) > manuals: + if min(8, expected) > actual: print("Network not connected") return False print("Network connected") From 288f50f3bd79e6264f833ae0370ac05f7e475230 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 6 Sep 2024 10:54:41 -0400 Subject: [PATCH 199/710] charts: add signet values and make chain selection work --- .../bitcoincore/templates/configmap.yaml | 3 +++ .../charts/bitcoincore/templates/pod.yaml | 12 +++++++----- .../charts/bitcoincore/templates/service.yaml | 8 ++++---- resources/charts/bitcoincore/values.yaml | 18 ++++++++---------- 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index c7a791031..10985e49c 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -10,6 +10,9 @@ data: {{ template "bitcoincore.check_semver" . }} {{- .Values.baseConfig | nindent 4 }} + rpcport={{ index .Values .Values.chain "RPCPort" }} + zmqpubrawblock=tcp://0.0.0.0:{{ .Values.ZMQBlockPort }} + zmqpubrawtx=tcp://0.0.0.0:{{ .Values.ZMQTxPort }} {{- .Values.config | nindent 4 }} {{- range .Values.connect }} {{- print "connect=" . | nindent 4}} diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 7135dd893..72b8f149d 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -29,21 +29,23 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: rpc - containerPort: {{ .Values.regtest.RPCPort }} + containerPort: {{ index .Values .Values.chain "RPCPort" }} protocol: TCP - name: p2p - containerPort: {{ .Values.regtest.P2PPort }} + containerPort: {{ index .Values .Values.chain "P2PPort" }} protocol: TCP - name: zmq-tx - containerPort: {{ .Values.regtest.ZMQTxPort }} + containerPort: {{ .Values.ZMQTxPort }} protocol: TCP - name: zmq-block - containerPort: {{ .Values.regtest.ZMQBlockPort }} + containerPort: {{ .Values.ZMQBlockPort }} protocol: TCP livenessProbe: {{- toYaml .Values.livenessProbe | nindent 8 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 8 }} + tcpSocket: + port: {{ index .Values .Values.chain "RPCPort" }} resources: {{- toYaml .Values.resources | nindent 8 }} volumeMounts: @@ -65,7 +67,7 @@ spec: - name: BITCOIN_RPC_HOST value: "127.0.0.1" - name: BITCOIN_RPC_PORT - value: "{{ .Values.regtest.RPCPort }}" + value: "{{ index .Values .Values.chain "RPCPort" }}" - name: BITCOIN_RPC_USER value: user - name: BITCOIN_RPC_PASSWORD diff --git a/resources/charts/bitcoincore/templates/service.yaml b/resources/charts/bitcoincore/templates/service.yaml index 728cfb5b4..f37c384ef 100644 --- a/resources/charts/bitcoincore/templates/service.yaml +++ b/resources/charts/bitcoincore/templates/service.yaml @@ -8,19 +8,19 @@ metadata: spec: type: {{ .Values.service.type }} ports: - - port: {{ .Values.regtest.RPCPort }} + - port: {{ index .Values .Values.chain "RPCPort" }} targetPort: rpc protocol: TCP name: rpc - - port: {{ .Values.regtest.P2PPort }} + - port: {{ index .Values .Values.chain "P2PPort" }} targetPort: p2p protocol: TCP name: p2p - - port: {{ .Values.regtest.ZMQTxPort }} + - port: {{ .Values.ZMQTxPort }} targetPort: zmq-tx protocol: TCP name: zmq-tx - - port: {{ .Values.regtest.ZMQBlockPort }} + - port: {{ .Values.ZMQBlockPort }} targetPort: zmq-block protocol: TCP name: zmq-block diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 85dd71540..2498fee6d 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -36,8 +36,13 @@ service: regtest: RPCPort: 18443 P2PPort: 18444 - ZMQTxPort: 28333 - ZMQBlockPort: 28332 + +signet: + RPCPort: 38332 + P2PPort: 38333 + +ZMQTxPort: 28333 +ZMQBlockPort: 28332 ingress: enabled: false @@ -82,8 +87,6 @@ readinessProbe: failureThreshold: 1 periodSeconds: 1 successThreshold: 1 - tcpSocket: - port: 18443 timeoutSeconds: 1 @@ -121,17 +124,12 @@ baseConfig: | capturemessages=1 fallbackfee=0.00001000 listen=1 - - zmqpubrawblock=tcp://0.0.0.0:28332 - zmqpubrawtx=tcp://0.0.0.0:28333 - rpcuser=user rpcpassword=password - # use regtest port by default for all networks - rpcport=18443 rpcallowip=0.0.0.0/0 rpcbind=0.0.0.0 rest=1 + # rpcport and zmq endpoints are configured by chain in configmap.yaml config: "" From faf53170de85d17f99d6e9d3c446ee2f3b11b2fe Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 6 Sep 2024 10:56:24 -0400 Subject: [PATCH 200/710] bitcoin, scenarios: pull rpc port and auth from chart --- resources/charts/bitcoincore/templates/pod.yaml | 2 ++ src/warnet/bitcoin.py | 6 ++++-- src/warnet/control.py | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 72b8f149d..0607b31d5 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,6 +7,8 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} + chain: {{ .Values.chain }} + RPCPort: "{{ index .Values .Values.chain "RPCPort" }}" app: {{ include "bitcoincore.fullname" . }} {{- if .Values.collectLogs }} collect_logs: "true" diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index d466eef1d..8be750613 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -36,10 +36,12 @@ def rpc(tank: str, method: str, params: str): def _rpc(tank: str, method: str, params: str): + # bitcoin-cli should be able to read bitcoin.conf inside the container + # so no extra args like port, chain, username or password are needed if params: - cmd = f"kubectl exec {tank} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method} {' '.join(map(str, params))}" + cmd = f"kubectl exec {tank} -- bitcoin-cli {method} {' '.join(map(str, params))}" else: - cmd = f"kubectl exec {tank} -- bitcoin-cli -regtest -rpcuser='user' -rpcpassword='password' {method}" + cmd = f"kubectl exec {tank} -- bitcoin-cli {method}" return run_command(cmd) diff --git a/src/warnet/control.py b/src/warnet/control.py index 61ac1e534..43a65c6e8 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -173,9 +173,9 @@ def run(scenario_file: str, additional_args: tuple[str]): tanks = [ { "tank": tank.metadata.name, - "chain": "regtest", + "chain": tank.metadata.labels["chain"], "rpc_host": tank.status.pod_ip, - "rpc_port": 18443, + "rpc_port": int(tank.metadata.labels["RPCPort"]), "rpc_user": "user", "rpc_password": "password", "init_peers": [], From fc202da3686bb89085b989eba5fb0bae5936aa77 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 6 Sep 2024 10:56:38 -0400 Subject: [PATCH 201/710] test signet miner scenario --- resources/scenarios/signet_miner.py | 568 ++++++++++++++++++++++++++++ test/data/signet-signer.json | 52 +++ test/data/signet/node-defaults.yaml | 7 +- test/signet_test.py | 46 +++ 4 files changed, 672 insertions(+), 1 deletion(-) create mode 100644 resources/scenarios/signet_miner.py create mode 100644 test/data/signet-signer.json create mode 100755 test/signet_test.py diff --git a/resources/scenarios/signet_miner.py b/resources/scenarios/signet_miner.py new file mode 100644 index 000000000..2a09b1f0c --- /dev/null +++ b/resources/scenarios/signet_miner.py @@ -0,0 +1,568 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +### +# WARNET SCENARIO SIGNET MINER +# +# This file is a hacked-up version of Bitcoin Core contrib/signet/miner. +# The primary difference is that instead of using bitcoin-cli for RPCs +# we use the authproxy from the test framework. +### + +# The base class exists inside the commander container +try: + from commander import Commander +except ImportError: + from resources.scenarios.commander import Commander + +import json +import logging +import math +import re +import struct +import sys +import time +import subprocess + +from test_framework.blocktools import get_witness_script, script_BIP34_coinbase_height # noqa: E402 +from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, from_binary, from_hex, ser_string, ser_uint256, tx_from_hex # noqa: E402 +from test_framework.psbt import PSBT, PSBTMap, PSBT_GLOBAL_UNSIGNED_TX, PSBT_IN_FINAL_SCRIPTSIG, PSBT_IN_FINAL_SCRIPTWITNESS, PSBT_IN_NON_WITNESS_UTXO, PSBT_IN_SIGHASH_TYPE # noqa: E402 +from test_framework.script import CScriptOp # noqa: E402 + +logging.basicConfig( + format='%(asctime)s %(levelname)s %(message)s', + level=logging.INFO, + datefmt='%Y-%m-%d %H:%M:%S') + +SIGNET_HEADER = b"\xec\xc7\xda\xa2" +PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed +RE_MULTIMINER = re.compile(r"^(\d+)(-(\d+))?/(\d+)$") + + +class SignetMinerScenario(Commander): + def set_test_params(self): + self.num_nodes = 1 + + def add_options(self, parser): + parser.add_argument( + "--tank", + dest="tank", + type=int, + help="Index of tank with wallet loaded for block signing", + ) + get_args(parser) + + + def run_test(self): + args = self.options + args.bcli = lambda method, *args, **kwargs: self.nodes[self.options.tank].__getattr__(method)(*args, **kwargs) + return do_generate(args) + + +def create_coinbase(height, value, spk): + cb = CTransaction() + cb.vin = [CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff)] + cb.vout = [CTxOut(value, spk)] + return cb + +def signet_txs(block, challenge): + # assumes signet solution has not been added yet so does not need + # to be removed + + txs = block.vtx[:] + txs[0] = CTransaction(txs[0]) + txs[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER) + hashes = [] + for tx in txs: + tx.rehash() + hashes.append(ser_uint256(tx.sha256)) + mroot = block.get_merkle_root(hashes) + + sd = b"" + sd += struct.pack("> 24) & 0xff + return (nbits & 0x00ffffff) * 2**(8*(shift - 3)) + +def target_to_nbits(target): + tstr = "{0:x}".format(target) + if len(tstr) < 6: + tstr = ("000000"+tstr)[-6:] + if len(tstr) % 2 != 0: + tstr = "0" + tstr + if int(tstr[0],16) >= 0x8: + # avoid "negative" + tstr = "00" + tstr + fix = int(tstr[:6], 16) + sz = len(tstr)//2 + if tstr[6:] != "0"*(sz*2-6): + fix += 1 + + return int("%02x%06x" % (sz,fix), 16) + +def seconds_to_hms(s): + if s == 0: + return "0s" + neg = (s < 0) + if neg: + s = -s + out = "" + if s % 60 > 0: + out = "%ds" % (s % 60) + s //= 60 + if s % 60 > 0: + out = "%dm%s" % (s % 60, out) + s //= 60 + if s > 0: + out = "%dh%s" % (s, out) + if neg: + out = "-" + out + return out + +def next_block_delta(last_nbits, last_hash, ultimate_target, do_poisson, max_interval): + # strategy: + # 1) work out how far off our desired target we are + # 2) cap it to a factor of 4 since that's the best we can do in a single retarget period + # 3) use that to work out the desired average interval in this retarget period + # 4) if doing poisson, use the last hash to pick a uniformly random number in [0,1), and work out a random multiplier to vary the average by + # 5) cap the resulting interval between 1 second and 1 hour to avoid extremes + + INTERVAL = 600.0*2016/2015 # 10 minutes, adjusted for the off-by-one bug + + current_target = nbits_to_target(last_nbits) + retarget_factor = ultimate_target / current_target + retarget_factor = max(0.25, min(retarget_factor, 4.0)) + + avg_interval = INTERVAL * retarget_factor + + if do_poisson: + det_rand = int(last_hash[-8:], 16) * 2**-32 + this_interval_variance = -math.log1p(-det_rand) + else: + this_interval_variance = 1 + + this_interval = avg_interval * this_interval_variance + this_interval = max(1, min(this_interval, max_interval)) + + return this_interval + +def next_block_is_mine(last_hash, my_blocks): + det_rand = int(last_hash[-16:-8], 16) + return my_blocks[0] <= (det_rand % my_blocks[2]) < my_blocks[1] + +def do_generate(args): + if args.max_blocks is not None: + if args.ongoing: + logging.error("Cannot specify both --ongoing and --max-blocks") + return 1 + if args.max_blocks < 1: + logging.error("N must be a positive integer") + return 1 + max_blocks = args.max_blocks + elif args.ongoing: + max_blocks = None + else: + max_blocks = 1 + + if args.set_block_time is not None and max_blocks != 1: + logging.error("Cannot specify --ongoing or --max-blocks > 1 when using --set-block-time") + return 1 + if args.set_block_time is not None and args.set_block_time < 0: + args.set_block_time = time.time() + logging.info("Treating negative block time as current time (%d)" % (args.set_block_time)) + + if args.min_nbits: + if args.nbits is not None: + logging.error("Cannot specify --nbits and --min-nbits") + return 1 + args.nbits = "1e0377ae" + logging.info("Using nbits=%s" % (args.nbits)) + + if args.set_block_time is None: + if args.nbits is None or len(args.nbits) != 8: + logging.error("Must specify --nbits (use calibrate command to determine value)") + return 1 + + if args.multiminer is None: + my_blocks = (0,1,1) + else: + if not args.ongoing: + logging.error("Cannot specify --multiminer without --ongoing") + return 1 + m = RE_MULTIMINER.match(args.multiminer) + if m is None: + logging.error("--multiminer argument must be k/m or j-k/m") + return 1 + start,_,stop,total = m.groups() + if stop is None: + stop = start + start, stop, total = map(int, (start, stop, total)) + if stop < start or start <= 0 or total < stop or total == 0: + logging.error("Inconsistent values for --multiminer") + return 1 + my_blocks = (start-1, stop, total) + + if args.max_interval < 960: + logging.error("--max-interval must be at least 960 (16 minutes)") + return 1 + + ultimate_target = nbits_to_target(int(args.nbits,16)) + + mined_blocks = 0 + bestheader = {"hash": None} + lastheader = None + while max_blocks is None or mined_blocks < max_blocks: + + # current status? + bci = args.bcli("getblockchaininfo") + + if bestheader["hash"] != bci["bestblockhash"]: + bestheader = args.bcli("getblockheader", bci["bestblockhash"]) + + if lastheader is None: + lastheader = bestheader["hash"] + elif bestheader["hash"] != lastheader: + next_delta = next_block_delta(int(bestheader["bits"], 16), bestheader["hash"], ultimate_target, args.poisson, args.max_interval) + next_delta += bestheader["time"] - time.time() + next_is_mine = next_block_is_mine(bestheader["hash"], my_blocks) + logging.info("Received new block at height %d; next in %s (%s)", bestheader["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup")) + lastheader = bestheader["hash"] + + # when is the next block due to be mined? + now = time.time() + if args.set_block_time is not None: + logging.debug("Setting start time to %d", args.set_block_time) + mine_time = args.set_block_time + action_time = now + is_mine = True + elif bestheader["height"] == 0: + time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson, args.max_interval) + time_delta *= 100 # 100 blocks + logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60)) + mine_time = now - time_delta + action_time = now + is_mine = True + else: + time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson, args.max_interval) + mine_time = bestheader["time"] + time_delta + + is_mine = next_block_is_mine(bci["bestblockhash"], my_blocks) + + action_time = mine_time + if not is_mine: + action_time += args.backup_delay + + if args.standby_delay > 0: + action_time += args.standby_delay + elif mined_blocks == 0: + # for non-standby, always mine immediately on startup, + # even if the next block shouldn't be ours + action_time = now + + # don't want fractional times so round down + mine_time = int(mine_time) + action_time = int(action_time) + + # can't mine a block 2h in the future; 1h55m for some safety + action_time = max(action_time, mine_time - 6900) + + # ready to go? otherwise sleep and check for new block + if now < action_time: + sleep_for = min(action_time - now, 60) + if mine_time < now: + # someone else might have mined the block, + # so check frequently, so we don't end up late + # mining the next block if it's ours + sleep_for = min(20, sleep_for) + minestr = "mine" if is_mine else "backup" + logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(mine_time - now), minestr)) + time.sleep(sleep_for) + continue + + # gbt + tmpl = args.bcli("getblocktemplate", {"rules":["signet","segwit"]}) + if tmpl["previousblockhash"] != bci["bestblockhash"]: + logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"]) + time.sleep(1) + continue + + logging.debug("GBT template: %s", tmpl) + + if tmpl["mintime"] > mine_time: + logging.info("Updating block time from %d to %d", mine_time, tmpl["mintime"]) + mine_time = tmpl["mintime"] + if mine_time > now: + logging.error("GBT mintime is in the future: %d is %d seconds later than %d", mine_time, (mine_time-now), now) + return 1 + + # address for reward + reward_addr, reward_spk = get_reward_addr_spk(args, tmpl["height"]) + + # mine block + logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(mine_time-bestheader["time"]), mine_time, is_mine) + mined_blocks += 1 + psbt = generate_psbt(tmpl, reward_spk, blocktime=mine_time) + psbt_signed = args.bcli("walletprocesspsbt", psbt=psbt, sign=True, sighashtype="ALL") + if not psbt_signed.get("complete",False): + logging.debug("Generated PSBT: %s" % (psbt,)) + sys.stderr.write("PSBT signing failed\n") + return 1 + block, signet_solution = do_decode_psbt(psbt_signed["psbt"]) + block = finish_block(block, signet_solution, args.grind_cmd) + + # submit block + r = args.bcli("submitblock", block.serialize().hex()) + + # report + bstr = "block" if is_mine else "backup block" + + next_delta = next_block_delta(block.nBits, block.hash, ultimate_target, args.poisson, args.max_interval) + next_delta += block.nTime - time.time() + next_is_mine = next_block_is_mine(block.hash, my_blocks) + + logging.debug("Block hash %s payout to %s", block.hash, reward_addr) + logging.info("Mined %s at height %d; next in %s (%s)", bstr, tmpl["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup")) + if r != "": + logging.warning("submitblock returned %s for height %d hash %s", r, tmpl["height"], block.hash) + lastheader = block.hash + +def do_calibrate(args): + if args.nbits is not None and args.seconds is not None: + sys.stderr.write("Can only specify one of --nbits or --seconds\n") + return 1 + if args.nbits is not None and len(args.nbits) != 8: + sys.stderr.write("Must specify 8 hex digits for --nbits\n") + return 1 + + TRIALS = 600 # gets variance down pretty low + TRIAL_BITS = 0x1e3ea75f # takes about 5m to do 600 trials + + header = CBlockHeader() + header.nBits = TRIAL_BITS + targ = nbits_to_target(header.nBits) + + start = time.time() + count = 0 + for i in range(TRIALS): + header.nTime = i + header.nNonce = 0 + headhex = header.serialize().hex() + cmd = args.grind_cmd.split(" ") + [headhex] + newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip() + + avg = (time.time() - start) * 1.0 / TRIALS + + if args.nbits is not None: + want_targ = nbits_to_target(int(args.nbits,16)) + want_time = avg*targ/want_targ + else: + want_time = args.seconds if args.seconds is not None else 25 + want_targ = int(targ*(avg/want_time)) + + print("nbits=%08x for %ds average mining time" % (target_to_nbits(want_targ), want_time)) + return 0 + +def bitcoin_cli(basecmd, args, **kwargs): + cmd = basecmd + ["-signet"] + args + logging.debug("Calling bitcoin-cli: %r", cmd) + out = subprocess.run(cmd, stdout=subprocess.PIPE, **kwargs, check=True).stdout + if isinstance(out, bytes): + out = out.decode('utf8') + return out.strip() + +def get_args(parser): + parser.add_argument("--cli", default="bitcoin-cli", type=str, help="bitcoin-cli command") + parser.add_argument("--debug", action="store_true", help="Print debugging info") + parser.add_argument("--quiet", action="store_true", help="Only print warnings/errors") + + cmds = parser.add_subparsers(help="sub-commands") + genpsbt = cmds.add_parser("genpsbt", help="Generate a block PSBT for signing") + genpsbt.set_defaults(fn=do_genpsbt) + + solvepsbt = cmds.add_parser("solvepsbt", help="Solve a signed block PSBT") + solvepsbt.set_defaults(fn=do_solvepsbt) + + generate = cmds.add_parser("generate", help="Mine blocks") + generate.set_defaults(fn=do_generate) + generate.add_argument("--ongoing", action="store_true", help="Keep mining blocks") + generate.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)") + generate.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp)") + generate.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)") + generate.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)") + generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times") + generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)") + generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)") + generate.add_argument("--standby-delay", default=0, type=int, help="Seconds to delay before mining blocks (default=0)") + generate.add_argument("--max-interval", default=1800, type=int, help="Maximum interblock interval (seconds)") + + calibrate = cmds.add_parser("calibrate", help="Calibrate difficulty") + calibrate.set_defaults(fn=do_calibrate) + calibrate.add_argument("--nbits", type=str, default=None) + calibrate.add_argument("--seconds", type=int, default=None) + + for sp in [genpsbt, generate]: + sp.add_argument("--address", default=None, type=str, help="Address for block reward payment") + sp.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment") + + for sp in [solvepsbt, generate, calibrate]: + sp.add_argument("--grind-cmd", default=None, type=str, required=(sp==calibrate), help="Command to grind a block header for proof-of-work") + + args = parser.parse_args(sys.argv[1:]) + + args.bcli = lambda *a, input=b"", **kwargs: bitcoin_cli(args.cli.split(" "), list(a), input=input, **kwargs) + + if hasattr(args, "address") and hasattr(args, "descriptor"): + if args.address is None and args.descriptor is None: + sys.stderr.write("Must specify --address or --descriptor\n") + return 1 + elif args.address is not None and args.descriptor is not None: + sys.stderr.write("Only specify one of --address or --descriptor\n") + return 1 + args.derived_addresses = {} + + if args.debug: + logging.getLogger().setLevel(logging.DEBUG) + elif args.quiet: + logging.getLogger().setLevel(logging.WARNING) + else: + logging.getLogger().setLevel(logging.INFO) + + return args + +if __name__ == "__main__": + SignetMinerScenario().main() diff --git a/test/data/signet-signer.json b/test/data/signet-signer.json new file mode 100644 index 000000000..6cf25b9fd --- /dev/null +++ b/test/data/signet-signer.json @@ -0,0 +1,52 @@ +{ + "address": + { + "address": "tb1q6vakuyw2jhzwmnxcaxryxs6c670fr9esfvrrhj", + "scriptPubKey": "0014d33b6e11ca95c4edccd8e986434358d79e919730", + "ismine": true, + "solvable": true, + "desc": "wpkh([2e239023/84h/1h/0h/0/0]03d28a77a6ea884727049c72818c312d1f7fefb4aa5e62c9211403fd6218eaa6fa)#vp55uze6", + "parent_desc": "wpkh([2e239023/84h/1h/0h]tpubDDsmgRhtuNzhYbEiUbucryCNyjshjKf4fawmVWAAwej5HhSLmXVWD9z8U8QHgaSQmYmGBfTfab6nsM4bLQkRR1qdpazc258PGtcyVJeDLbj/0/*)#r5lwvzmj", + "iswatchonly": false, + "isscript": false, + "iswitness": true, + "witness_version": 0, + "witness_program": "d33b6e11ca95c4edccd8e986434358d79e919730", + "pubkey": "03d28a77a6ea884727049c72818c312d1f7fefb4aa5e62c9211403fd6218eaa6fa", + "ischange": false, + "timestamp": 1725633470, + "hdkeypath": "m/84h/1h/0h/0/0", + "hdseedid": "0000000000000000000000000000000000000000", + "hdmasterfingerprint": "2e239023", + "labels": [ + "bech32" + ] + }, + "descriptors": + [ + { + "desc": "wpkh(tprv8ZgxMBicQKsPfH87iaMtrpzTkWiyFDW7SVWqfsKAhtyEBEqMV6ctPdtc5pNrb2FpSmPcDe8NrxEouUnWj1ud7LT1X1hB1XHKAgB2Z5Z4u2s/84h/1h/0h/0/*)#5j6mshps", + "timestamp": 0, + "active": true, + "internal": false, + "range": [ + 0, + 999 + ], + "next": 0, + "next_index": 0 + }, + { + "desc": "wpkh(tprv8ZgxMBicQKsPfH87iaMtrpzTkWiyFDW7SVWqfsKAhtyEBEqMV6ctPdtc5pNrb2FpSmPcDe8NrxEouUnWj1ud7LT1X1hB1XHKAgB2Z5Z4u2s/84h/1h/0h/1/*)#9xl6dz3g", + "timestamp": 0, + "active": true, + "internal": true, + "range": [ + 0, + 999 + ], + "next": 0, + "next_index": 0 + } + ] +} \ No newline at end of file diff --git a/test/data/signet/node-defaults.yaml b/test/data/signet/node-defaults.yaml index a7fac79a1..4cf7b508b 100644 --- a/test/data/signet/node-defaults.yaml +++ b/test/data/signet/node-defaults.yaml @@ -3,4 +3,9 @@ image: pullPolicy: IfNotPresent tag: "27.0" -chain: signet \ No newline at end of file +chain: signet + +config: | + debug=rpc + debug=net + signetchallenge=0014d33b6e11ca95c4edccd8e986434358d79e919730 \ No newline at end of file diff --git a/test/signet_test.py b/test/signet_test.py new file mode 100755 index 000000000..5bbafd88e --- /dev/null +++ b/test/signet_test.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +import json +import os +from pathlib import Path + +from test_base import TestBase + + +class SignetTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "signet" + signer_data_path = Path(os.path.dirname(__file__)) / "data" / "signet-signer.json" + with open(signer_data_path, "r") as f: + self.signer_data = json.loads(f.read()) + + def run_test(self): + try: + self.setup_network() + self.check_signet_miner() + finally: + self.stop_server() + + def setup_network(self): + self.log.info("Setting up network") + self.log.info(self.warnet(f"deploy {self.network_dir}")) + self.wait_for_all_tanks_status(target="running") + self.wait_for_all_edges() + + def check_signet_miner(self): + self.warnet("bitcoin rpc miner createwallet miner") + self.warnet(f"bitcoin rpc miner importdescriptors '{json.dumps(self.signer_data['descriptors'])}'") + self.warnet(f"run resources/scenarios/signet_miner.py --tank=0 generate --min-nbits --address={self.signer_data['address']['address']}") + + def block_one(): + for tank in ["tank-0001", "tank-0002"]: + height = int(self.warnet(f"bitcoin rpc {tank} getblockcount")) + if height != 1: + return False + return True + self.wait_for_predicate(block_one) + +if __name__ == "__main__": + test = SignetTest() + test.run_test() From e5ffc8b833cde6c0cebcb580138116cde4e55ddb Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 6 Sep 2024 11:11:54 -0400 Subject: [PATCH 202/710] ruff --- ruff.toml | 1 + test/signet_test.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/ruff.toml b/ruff.toml index f66c3e7d9..1e17fe2d6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,6 +1,7 @@ extend-exclude = [ "resources/images/commander/src/test_framework", "resources/images/exporter/authproxy.py", + "resources/scenarios/signet_miner.py", "src/test_framework/*", ] line-length = 100 diff --git a/test/signet_test.py b/test/signet_test.py index 5bbafd88e..b550596fd 100755 --- a/test/signet_test.py +++ b/test/signet_test.py @@ -12,7 +12,7 @@ def __init__(self): super().__init__() self.network_dir = Path(os.path.dirname(__file__)) / "data" / "signet" signer_data_path = Path(os.path.dirname(__file__)) / "data" / "signet-signer.json" - with open(signer_data_path, "r") as f: + with open(signer_data_path) as f: self.signer_data = json.loads(f.read()) def run_test(self): @@ -30,8 +30,12 @@ def setup_network(self): def check_signet_miner(self): self.warnet("bitcoin rpc miner createwallet miner") - self.warnet(f"bitcoin rpc miner importdescriptors '{json.dumps(self.signer_data['descriptors'])}'") - self.warnet(f"run resources/scenarios/signet_miner.py --tank=0 generate --min-nbits --address={self.signer_data['address']['address']}") + self.warnet( + f"bitcoin rpc miner importdescriptors '{json.dumps(self.signer_data['descriptors'])}'" + ) + self.warnet( + f"run resources/scenarios/signet_miner.py --tank=0 generate --min-nbits --address={self.signer_data['address']['address']}" + ) def block_one(): for tank in ["tank-0001", "tank-0002"]: @@ -39,8 +43,10 @@ def block_one(): if height != 1: return False return True + self.wait_for_predicate(block_one) + if __name__ == "__main__": test = SignetTest() test.run_test() From 16fe25f048c237fb317bbb9f39d7f16c59251a21 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 6 Sep 2024 11:15:00 -0400 Subject: [PATCH 203/710] ci: test signet --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1d1f78e2b..7383c59a3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -62,6 +62,7 @@ jobs: - logging_test.py - rpc_test.py - services_test.py + - signet_test.py - scenarios_test.py steps: - uses: actions/checkout@v4 From e64b547e47620c4941c6177fd0e0912abec46dfe Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 17:45:32 +0200 Subject: [PATCH 204/710] add file for config and constants --- src/warnet/constants.py | 92 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 src/warnet/constants.py diff --git a/src/warnet/constants.py b/src/warnet/constants.py new file mode 100644 index 000000000..cbc34f479 --- /dev/null +++ b/src/warnet/constants.py @@ -0,0 +1,92 @@ +import os +from importlib.resources import files +from pathlib import Path + +# Constants used throughout the project +# Storing as constants for now but we might want a more sophisticated config management +# at some point. +SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] +DEFAULT_TAG = SUPPORTED_TAGS[0] +WEIGHTED_TAGS = [ + tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) +] + +DEFAULT_NAMESPACE = "warnet" +HELM_COMMAND = "helm upgrade --install --create-namespace" + +# Directories and files for non-python assets, e.g., helm charts, example scenarios, default configs +SRC_DIR = files("warnet") +RESOURCES_DIR = files("resources") +NETWORK_DIR = RESOURCES_DIR.joinpath("networks") +SCENARIOS_DIR = RESOURCES_DIR.joinpath("scenarios") +CHARTS_DIR = RESOURCES_DIR.joinpath("charts") +MANIFESTS_DIR = RESOURCES_DIR.joinpath("manifests") +NETWORK_FILE = "network.yaml" +DEFAULTS_FILE = "node-defaults.yaml" +NAMESPACES_FILE = "namespaces.yaml" + +# Helm charts +BITCOIN_CHART_LOCATION = str(CHARTS_DIR.joinpath("bitcoincore")) +FORK_OBSERVER_CHART = str(CHARTS_DIR.joinpath("fork-observer")) +NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") +DEFAULT_NETWORK = Path("6_node_bitcoin") +DEFAULT_NAMESPACES = Path("two_namespaces_two_users") + +# Kubeconfig related stuffs +KUBECONFIG = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) + +# TODO: all of this logging stuff should be a helm chart +LOGGING_CONFIG = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "simple": { + "format": "%(asctime)s | %(levelname)-7s | %(name)-8s | %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + }, + "detailed": { + "format": "%(asctime)s | %(levelname)-7s | [%(module)21s:%(lineno)4d] | %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + }, + }, + "handlers": { + "stdout": { + "class": "logging.StreamHandler", + "level": "DEBUG", + "formatter": "simple", + "stream": "ext://sys.stdout", + }, + "stderr": { + "class": "logging.StreamHandler", + "level": "WARNING", + "formatter": "simple", + "stream": "ext://sys.stderr", + }, + "file": { + "class": "logging.handlers.RotatingFileHandler", + "level": "DEBUG", + "formatter": "detailed", + "filename": "warnet.log", + "maxBytes": 16000000, + "backupCount": 3, + }, + }, + "loggers": { + "root": {"level": "DEBUG", "handlers": ["stdout", "stderr", "file"]}, + "urllib3.connectionpool": {"level": "WARNING", "propagate": 1}, + "kubernetes.client.rest": {"level": "WARNING", "propagate": 1}, + "werkzeug": {"level": "WARNING", "propagate": 1}, + }, +} + +# Helm commands for logging setup +# TODO: also lots of hardcode stuff in these helm commands, will need to fix this when moving to helm charts +LOGGING_HELM_COMMANDS = [ + "helm repo add grafana https://grafana.github.io/helm-charts", + "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", + "helm repo update", + f"helm upgrade --install --namespace warnet-logging --create-namespace --values {MANIFESTS_DIR}/loki_values.yaml loki grafana/loki --version 5.47.2", + "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", + "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", + f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {MANIFESTS_DIR}/grafana_values.yaml", +] From 8d6129305aea57c2997158c23839ccb42a5a83bc Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 17:46:07 +0200 Subject: [PATCH 205/710] use new config in main.py --- src/warnet/main.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 25fc7bff0..19c488445 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -6,7 +6,6 @@ import sys from dataclasses import dataclass from enum import Enum, auto -from importlib.resources import files from pathlib import Path from typing import Callable @@ -15,6 +14,14 @@ import yaml from inquirer.themes import GreenPassion +from warnet.constants import ( + DEFAULT_TAG, + DEFAULTS_FILE, + NETWORK_DIR, + NETWORK_FILE, + SRC_DIR, + SUPPORTED_TAGS, +) from warnet.k8s import get_default_namespace from warnet.process import run_command, stream_command @@ -26,9 +33,8 @@ from .image import image from .network import copy_network_defaults, copy_scenario_defaults from .status import status as status_command -from .util import DEFAULT_TAG, SUPPORTED_TAGS -QUICK_START_PATH = files("resources.scripts").joinpath("quick_start.sh") +QUICK_START_PATH = SRC_DIR.joinpath("resources", "scripts", "quick_start.sh") @click.group() @@ -592,10 +598,6 @@ def logs(pod_name: str, follow: bool): pass # cancelled by user -if __name__ == "__main__": - cli() - - def custom_graph( num_nodes: int, num_connections: int, @@ -639,17 +641,19 @@ def custom_graph( "configQueryInterval": fork_obs_query_interval, } - with open(os.path.join(datadir, "network.yaml"), "w") as f: + with open(os.path.join(datadir, NETWORK_FILE), "w") as f: yaml.dump(network_yaml_data, f, default_flow_style=False) # Generate node-defaults.yaml - default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") + default_yaml_path = NETWORK_DIR.joinpath(DEFAULTS_FILE) with open(str(default_yaml_path)) as f: defaults_yaml_content = f.read() - with open(os.path.join(datadir, "node-defaults.yaml"), "w") as f: + with open(os.path.join(datadir, DEFAULTS_FILE), "w") as f: f.write(defaults_yaml_content) - click.echo( - f"Project '{datadir}' has been created with 'network.yaml' and 'node-defaults.yaml'." - ) + click.echo(f"Project '{datadir}' has been created with '{NETWORK_FILE}' and '{DEFAULTS_FILE}'.") + + +if __name__ == "__main__": + cli() From 0ae986b7a7fa0046c0887fe196fff407cd4932f9 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 17:52:08 +0200 Subject: [PATCH 206/710] refactor network.py --- src/warnet/network.py | 40 ++++++++++------------------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/src/warnet/network.py b/src/warnet/network.py index bbf66de3d..513c292a1 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -1,41 +1,21 @@ import json import shutil -from importlib.resources import files from pathlib import Path from rich import print from .bitcoin import _rpc +from .constants import ( + LOGGING_HELM_COMMANDS, + NETWORK_DIR, + SCENARIOS_DIR, +) from .k8s import get_mission from .process import stream_command -WAR_MANIFESTS_FILES = files("resources.manifests") -WAR_NETWORK_FILES = files("resources.networks") -WAR_SCENARIOS_FILES = files("resources.scenarios") - -WAR_NETWORK_DIR = WAR_NETWORK_FILES.name -WAR_SCENARIOS_DIR = WAR_SCENARIOS_FILES.name - -DEFAULT_NETWORK = Path("6_node_bitcoin") -NETWORK_FILE = "network.yaml" -DEFAULTS_FILE = "node-defaults.yaml" -HELM_COMMAND = "helm upgrade --install --create-namespace" -BITCOIN_CHART_LOCATION = str(files("resources.charts").joinpath("bitcoincore")) -FORK_OBSERVER_CHART = str(files("resources.charts").joinpath("fork-observer")) - def setup_logging_helm() -> bool: - helm_commands = [ - "helm repo add grafana https://grafana.github.io/helm-charts", - "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", - "helm repo update", - f"helm upgrade --install --namespace warnet-logging --create-namespace --values {WAR_MANIFESTS_FILES}/loki_values.yaml loki grafana/loki --version 5.47.2", - "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", - "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", - f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {WAR_MANIFESTS_FILES}/grafana_values.yaml", - ] - - for command in helm_commands: + for command in LOGGING_HELM_COMMANDS: if not stream_command(command): print(f"Failed to run Helm command: {command}") return False @@ -67,8 +47,8 @@ def copy_network_defaults(directory: Path): """Create the project structure for a warnet project's network""" copy_defaults( directory, - WAR_NETWORK_DIR, - WAR_NETWORK_FILES.joinpath(), + NETWORK_DIR.name, + NETWORK_DIR, ["node-defaults.yaml", "__pycache__", "__init__.py"], ) @@ -77,8 +57,8 @@ def copy_scenario_defaults(directory: Path): """Create the project structure for a warnet project's scenarios""" copy_defaults( directory, - WAR_SCENARIOS_DIR, - WAR_SCENARIOS_FILES.joinpath(), + SCENARIOS_DIR.name, + SCENARIOS_DIR, ["__init__.py", "__pycache__", "commander.py"], ) From 84152f20e0ca29f08bc74b8319d499c954efa3ef Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 17:52:22 +0200 Subject: [PATCH 207/710] refactor namespaces.py --- src/warnet/namespaces.py | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/src/warnet/namespaces.py b/src/warnet/namespaces.py index b193380dd..432a09605 100644 --- a/src/warnet/namespaces.py +++ b/src/warnet/namespaces.py @@ -1,31 +1,24 @@ import shutil -from importlib.resources import files from pathlib import Path import click +from .constants import ( + DEFAULT_NAMESPACES, + DEFAULTS_FILE, + NAMESPACES_FILE, + NETWORK_DIR, +) from .process import run_command, stream_command -WARNET_NAMESPACES_DIR = files("resources").joinpath("namespaces") -NAMESPACES_DIR = Path("namespaces") -DEFAULT_NAMESPACES = Path("two_namespaces_two_users") -NAMESPACES_FILE = "namespaces.yaml" -DEFAULTS_FILE = "namespace-defaults.yaml" -HELM_COMMAND = "helm upgrade --install" -BITCOIN_CHART_LOCATION = Path(str(files("resources.charts").joinpath("namespaces"))) - def copy_namespaces_defaults(directory: Path): """Create the project structure for a warnet project""" - (directory / NAMESPACES_DIR / DEFAULT_NAMESPACES).mkdir(parents=True, exist_ok=True) - target_namespaces_defaults = directory / NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE - target_namespaces_example = directory / NAMESPACES_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE - shutil.copy2( - WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE, target_namespaces_defaults - ) - shutil.copy2( - WARNET_NAMESPACES_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE, target_namespaces_example - ) + (directory / NETWORK_DIR / DEFAULT_NAMESPACES).mkdir(parents=True, exist_ok=True) + target_namespaces_defaults = directory / NETWORK_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE + target_namespaces_example = directory / NETWORK_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE + shutil.copy2(NETWORK_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE, target_namespaces_defaults) + shutil.copy2(NETWORK_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE, target_namespaces_example) @click.group(name="namespaces") From 38d7aacbe27af091884e241a44157c7a40ef441a Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 17:52:39 +0200 Subject: [PATCH 208/710] refactor k8s.py --- src/warnet/k8s.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index 421ed77cd..0cb223072 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -7,18 +7,17 @@ from kubernetes.client.models import CoreV1Event, V1PodList from kubernetes.dynamic import DynamicClient +from .constants import DEFAULT_NAMESPACE, KUBECONFIG from .process import run_command, stream_command -DEFAULT_NAMESPACE = "warnet" - def get_static_client() -> CoreV1Event: - config.load_kube_config() + config.load_kube_config(config_file=KUBECONFIG) return client.CoreV1Api() def get_dynamic_client() -> DynamicClient: - config.load_kube_config() + config.load_kube_config(config_file=KUBECONFIG) return DynamicClient(client.ApiClient()) From 12940521dd131529676c8c6f2e4744af2c64ad24 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 17:52:54 +0200 Subject: [PATCH 209/710] refactor util.py --- src/warnet/util.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/src/warnet/util.py b/src/warnet/util.py index 314a39eb8..90cc7b91d 100644 --- a/src/warnet/util.py +++ b/src/warnet/util.py @@ -1,18 +1,5 @@ -import logging -from importlib.resources import files - -logger = logging.getLogger("utils") - -SUPPORTED_TAGS = ["27.0", "26.0", "25.1", "24.2", "23.2", "22.2"] -DEFAULT_TAG = SUPPORTED_TAGS[0] -WEIGHTED_TAGS = [ - tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) -] -SRC_DIR = files("warnet") - - def create_cycle_graph(n: int, version: str, bitcoin_conf: str | None, random_version: bool): - raise Exception("Not Implemented") + raise NotImplementedError("create_cycle_graph function is not implemented") def parse_bitcoin_conf(file_content): From 2db74c360084330910eb15f14504724b090a09f1 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 18:05:33 +0200 Subject: [PATCH 210/710] refactor admin.py also remove create cmd, we only need init and create will be used for creating networks --- src/warnet/admin.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/src/warnet/admin.py b/src/warnet/admin.py index 89bd6252a..f194e16bd 100644 --- a/src/warnet/admin.py +++ b/src/warnet/admin.py @@ -4,6 +4,7 @@ import click from rich import print as richprint +from .constants import NETWORK_DIR from .namespaces import copy_namespaces_defaults, namespaces from .network import copy_network_defaults @@ -17,22 +18,6 @@ def admin(): admin.add_command(namespaces) -@admin.command() -@click.argument("directory", type=Path) -def create(directory): - """Create a new warnet project in the specified directory""" - if os.path.exists(directory): - richprint(f"[red]Error: Directory {directory} already exists[/red]") - return - - copy_network_defaults(directory) - copy_namespaces_defaults(directory) - richprint( - f"[green]Copied network and namespace example files to {directory / 'networks'}[/green]" - ) - richprint(f"[green]Created warnet project structure in {directory}[/green]") - - @admin.command() def init(): """Initialize a warnet project in the current directory""" @@ -45,6 +30,6 @@ def init(): copy_network_defaults(Path(current_dir)) copy_namespaces_defaults(Path(current_dir)) richprint( - f"[green]Copied network and namespace example files to {Path(current_dir) / 'networks'}[/green]" + f"[green]Copied network and namespace example files to {Path(current_dir) / NETWORK_DIR.name}[/green]" ) richprint(f"[green]Created warnet project structure in {current_dir}[/green]") From 96d9eee5df8939b38210d713ba4b9f874440290c Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 4 Sep 2024 18:12:33 +0200 Subject: [PATCH 211/710] refactor deploy.py --- src/warnet/deploy.py | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 9087c6a1a..b2bf0f92c 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -4,32 +4,17 @@ import click import yaml -from .k8s import get_default_namespace -from .namespaces import ( - BITCOIN_CHART_LOCATION as NAMESPACES_CHART_LOCATION, -) -from .namespaces import ( - DEFAULTS_FILE as NAMESPACES_DEFAULTS_FILE, -) -from .namespaces import ( - NAMESPACES_FILE, -) -from .network import ( - BITCOIN_CHART_LOCATION as NETWORK_CHART_LOCATION, -) -from .network import ( - DEFAULTS_FILE as NETWORK_DEFAULTS_FILE, -) - -# Import necessary functions and variables from network.py and namespaces.py -from .network import ( +from .constants import ( + BITCOIN_CHART_LOCATION, + DEFAULTS_FILE, FORK_OBSERVER_CHART, + HELM_COMMAND, + NAMESPACES_FILE, NETWORK_FILE, ) +from .k8s import get_default_namespace from .process import stream_command -HELM_COMMAND = "helm upgrade --install --create-namespace" - def validate_directory(ctx, param, value): directory = Path(value) @@ -82,7 +67,6 @@ def deploy_fork_observer(directory: Path, debug: bool): override_string = "" # Add an entry for each node in the graph - # TODO: should this be moved into a chart, and only have substituted name and rpc_host values for i, node in enumerate(network_file["nodes"]): node_name = node.get("name") node_config = f""" @@ -97,7 +81,6 @@ def deploy_fork_observer(directory: Path, debug: bool): """ override_string += node_config - # End loop # Create yaml string using multi-line string format override_string = override_string.strip() @@ -119,7 +102,7 @@ def deploy_fork_observer(directory: Path, debug: bool): def deploy_network(directory: Path, debug: bool = False): network_file_path = directory / NETWORK_FILE - defaults_file_path = directory / NETWORK_DEFAULTS_FILE + defaults_file_path = directory / DEFAULTS_FILE with network_file_path.open() as f: network_file = yaml.safe_load(f) @@ -133,7 +116,7 @@ def deploy_network(directory: Path, debug: bool = False): node_name = node.get("name") node_config_override = {k: v for k, v in node.items() if k != "name"} - cmd = f"{HELM_COMMAND} {node_name} {NETWORK_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" + cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" if debug: cmd += " --debug" @@ -158,7 +141,7 @@ def deploy_network(directory: Path, debug: bool = False): def deploy_namespaces(directory: Path): namespaces_file_path = directory / NAMESPACES_FILE - defaults_file_path = directory / NAMESPACES_DEFAULTS_FILE + defaults_file_path = directory / DEFAULTS_FILE with namespaces_file_path.open() as f: namespaces_file = yaml.safe_load(f) @@ -178,7 +161,9 @@ def deploy_namespaces(directory: Path): namespace_name = namespace.get("name") namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} - cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" + cmd = ( + f"{HELM_COMMAND} {namespace_name} {BITCOIN_CHART_LOCATION} -f {defaults_file_path}" + ) if namespace_config_override: with tempfile.NamedTemporaryFile( From 242620bcc462b6f61fd7fa9049137ccb8603635f Mon Sep 17 00:00:00 2001 From: mplsgrant <58152638+mplsgrant@users.noreply.github.com> Date: Sat, 7 Sep 2024 04:08:11 -0500 Subject: [PATCH 212/710] fixup namespace directory constants (#544) --- src/warnet/constants.py | 2 ++ src/warnet/deploy.py | 8 ++++---- src/warnet/namespaces.py | 20 +++++++++++++------- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/warnet/constants.py b/src/warnet/constants.py index cbc34f479..aa53b3484 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -18,12 +18,14 @@ SRC_DIR = files("warnet") RESOURCES_DIR = files("resources") NETWORK_DIR = RESOURCES_DIR.joinpath("networks") +NAMESPACES_DIR = RESOURCES_DIR.joinpath("namespaces") SCENARIOS_DIR = RESOURCES_DIR.joinpath("scenarios") CHARTS_DIR = RESOURCES_DIR.joinpath("charts") MANIFESTS_DIR = RESOURCES_DIR.joinpath("manifests") NETWORK_FILE = "network.yaml" DEFAULTS_FILE = "node-defaults.yaml" NAMESPACES_FILE = "namespaces.yaml" +DEFAULTS_NAMESPACE_FILE = "namespace-defaults.yaml" # Helm charts BITCOIN_CHART_LOCATION = str(CHARTS_DIR.joinpath("bitcoincore")) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index b2bf0f92c..5417da5ae 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -7,8 +7,10 @@ from .constants import ( BITCOIN_CHART_LOCATION, DEFAULTS_FILE, + DEFAULTS_NAMESPACE_FILE, FORK_OBSERVER_CHART, HELM_COMMAND, + NAMESPACES_CHART_LOCATION, NAMESPACES_FILE, NETWORK_FILE, ) @@ -141,7 +143,7 @@ def deploy_network(directory: Path, debug: bool = False): def deploy_namespaces(directory: Path): namespaces_file_path = directory / NAMESPACES_FILE - defaults_file_path = directory / DEFAULTS_FILE + defaults_file_path = directory / DEFAULTS_NAMESPACE_FILE with namespaces_file_path.open() as f: namespaces_file = yaml.safe_load(f) @@ -161,9 +163,7 @@ def deploy_namespaces(directory: Path): namespace_name = namespace.get("name") namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} - cmd = ( - f"{HELM_COMMAND} {namespace_name} {BITCOIN_CHART_LOCATION} -f {defaults_file_path}" - ) + cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" if namespace_config_override: with tempfile.NamedTemporaryFile( diff --git a/src/warnet/namespaces.py b/src/warnet/namespaces.py index 432a09605..45bcb7af5 100644 --- a/src/warnet/namespaces.py +++ b/src/warnet/namespaces.py @@ -5,20 +5,26 @@ from .constants import ( DEFAULT_NAMESPACES, - DEFAULTS_FILE, + DEFAULTS_NAMESPACE_FILE, + NAMESPACES_DIR, NAMESPACES_FILE, - NETWORK_DIR, ) from .process import run_command, stream_command def copy_namespaces_defaults(directory: Path): """Create the project structure for a warnet project""" - (directory / NETWORK_DIR / DEFAULT_NAMESPACES).mkdir(parents=True, exist_ok=True) - target_namespaces_defaults = directory / NETWORK_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE - target_namespaces_example = directory / NETWORK_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE - shutil.copy2(NETWORK_DIR / DEFAULT_NAMESPACES / DEFAULTS_FILE, target_namespaces_defaults) - shutil.copy2(NETWORK_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE, target_namespaces_example) + (directory / NAMESPACES_DIR.name / DEFAULT_NAMESPACES).mkdir(parents=True, exist_ok=True) + target_namespaces_defaults = ( + directory / NAMESPACES_DIR.name / DEFAULT_NAMESPACES / DEFAULTS_NAMESPACE_FILE + ) + target_namespaces_example = ( + directory / NAMESPACES_DIR.name / DEFAULT_NAMESPACES / NAMESPACES_FILE + ) + shutil.copy2( + NAMESPACES_DIR / DEFAULT_NAMESPACES / DEFAULTS_NAMESPACE_FILE, target_namespaces_defaults + ) + shutil.copy2(NAMESPACES_DIR / DEFAULT_NAMESPACES / NAMESPACES_FILE, target_namespaces_example) @click.group(name="namespaces") From c325aec47f795ad4d9654a226669511b787fbb71 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Sun, 8 Sep 2024 18:52:14 +0100 Subject: [PATCH 213/710] Finish refactor of main. Align setup, new & init commands (#536) * refactor setup and create commands * tidy up main * align init behaviour with create * update docs * rename create -> new * move auth into users.py --- docs/quickstart.md | 16 +- docs/warnet.md | 29 +- src/warnet/control.py | 49 ++++ src/warnet/main.py | 650 +----------------------------------------- src/warnet/project.py | 510 +++++++++++++++++++++++++++++++++ src/warnet/users.py | 70 +++++ 6 files changed, 666 insertions(+), 658 deletions(-) create mode 100644 src/warnet/project.py create mode 100644 src/warnet/users.py diff --git a/docs/quickstart.md b/docs/quickstart.md index 0b6c1454f..dcf3ca41c 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -28,13 +28,23 @@ pip install -e . ## Running -To get started simply run: +To get started first check you have all the necessary requirements: ```bash -warnet quickstart +warnet setup ``` -This will check you have the required dependencies and guide you through setting up and deploying your first network. +Then create your first network: + +```bash +# Create a new network in the current directory +warnet init + +# Or in a directory of choice +warnet new +``` + +Follow the guide to configure network variables. ## fork-observer diff --git a/docs/warnet.md b/docs/warnet.md index fc485b26c..105da2103 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -21,14 +21,6 @@ options: |-------------|--------|------------|-----------| | kube_config | String | yes | | -### `warnet create` -Create a new warnet project in the specified directory - -options: -| name | type | required | default | -|-----------|--------|------------|-----------| -| directory | Path | yes | | - ### `warnet deploy` Deploy a warnet with topology loaded from \ @@ -36,6 +28,7 @@ options: | name | type | required | default | |-----------|--------|------------|-----------| | directory | Path | yes | | +| debug | Bool | | False | ### `warnet down` Bring down a running warnet @@ -45,9 +38,13 @@ Bring down a running warnet Initialize a warnet project in the current directory -### `warnet quickstart` -Setup warnet +### `warnet new` +Create a new warnet project in the specified directory +options: +| name | type | required | default | +|-----------|--------|------------|-----------| +| directory | Path | yes | | ### `warnet run` Run a scenario from a file @@ -58,6 +55,10 @@ options: | scenario_file | Path | yes | | | additional_args | String | | | +### `warnet setup` +Setup warnet + + ### `warnet status` Display the unified status of the Warnet network and active scenarios @@ -72,14 +73,6 @@ options: ## Admin -### `warnet admin create` -Create a new warnet project in the specified directory - -options: -| name | type | required | default | -|-----------|--------|------------|-----------| -| directory | Func | yes | | - ### `warnet admin init` Initialize a warnet project in the current directory diff --git a/src/warnet/control.py b/src/warnet/control.py index 43a65c6e8..fa4679b77 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -4,7 +4,9 @@ import time import click +import inquirer import yaml +from inquirer.themes import GreenPassion from rich import print from rich.console import Console from rich.prompt import Confirm, Prompt @@ -248,3 +250,50 @@ def run(scenario_file: str, additional_args: tuple[str]): print(f"Failed to start scenario: {scenario_name}") os.unlink(temp_file_path) + + +@click.command() +@click.argument("pod_name", type=str, default="") +@click.option("--follow", "-f", is_flag=True, default=False, help="Follow logs") +def logs(pod_name: str, follow: bool): + """Show the logs of a pod""" + follow_flag = "--follow" if follow else "" + namespace = get_default_namespace() + + if pod_name: + try: + command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" + stream_command(command) + return + except Exception as e: + print(f"Could not find the pod {pod_name}: {e}") + + try: + pods = run_command(f"kubectl get pods -n {namespace} -o json") + pods = json.loads(pods) + pod_list = [item["metadata"]["name"] for item in pods["items"]] + except Exception as e: + print(f"Could not fetch any pods in namespace {namespace}: {e}") + return + + if not pod_list: + print(f"Could not fetch any pods in namespace {namespace}") + return + + q = [ + inquirer.List( + name="pod", + message="Please choose a pod", + choices=pod_list, + ) + ] + selected = inquirer.prompt(q, theme=GreenPassion()) + if selected: + pod_name = selected["pod"] + try: + command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" + stream_command(command) + except Exception as e: + print(f"Please consider waiting for the pod to become available. Encountered: {e}") + else: + pass # cancelled by user diff --git a/src/warnet/main.py b/src/warnet/main.py index 19c488445..390f699be 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -1,40 +1,14 @@ -import json -import os -import platform -import random -import subprocess -import sys -from dataclasses import dataclass -from enum import Enum, auto -from pathlib import Path -from typing import Callable - import click -import inquirer -import yaml -from inquirer.themes import GreenPassion - -from warnet.constants import ( - DEFAULT_TAG, - DEFAULTS_FILE, - NETWORK_DIR, - NETWORK_FILE, - SRC_DIR, - SUPPORTED_TAGS, -) -from warnet.k8s import get_default_namespace -from warnet.process import run_command, stream_command from .admin import admin from .bitcoin import bitcoin from .control import down, run, stop -from .deploy import deploy as deploy_command +from .deploy import deploy from .graph import graph from .image import image -from .network import copy_network_defaults, copy_scenario_defaults -from .status import status as status_command - -QUICK_START_PATH = SRC_DIR.joinpath("resources", "scripts", "quick_start.sh") +from .project import init, new, setup +from .status import status +from .users import auth @click.group() @@ -42,617 +16,19 @@ def cli(): pass +cli.add_command(admin) +cli.add_command(auth) cli.add_command(bitcoin) -cli.add_command(deploy_command) +cli.add_command(new) +cli.add_command(deploy) +cli.add_command(down) cli.add_command(graph) cli.add_command(image) -cli.add_command(status_command) -cli.add_command(admin) -cli.add_command(stop) -cli.add_command(down) +cli.add_command(init) cli.add_command(run) - - -@cli.command() -def quickstart(): - """Setup warnet""" - - class ToolStatus(Enum): - Satisfied = auto() - Unsatisfied = auto() - - @dataclass - class ToolInfo: - tool_name: str - is_installed_func: Callable[[], tuple[bool, str]] - install_instruction: str - install_url: str - - __slots__ = ["tool_name", "is_installed_func", "install_instruction", "install_url"] - - def is_minikube_installed() -> tuple[bool, str]: - try: - version_result = subprocess.run( - ["minikube", "version", "--short"], - capture_output=True, - text=True, - ) - location_result = subprocess.run( - ["which", "minikube"], - capture_output=True, - text=True, - ) - if version_result.returncode == 0 and location_result.returncode == 0: - return True, location_result.stdout.strip() - else: - return False, "" - except FileNotFoundError as err: - return False, str(err) - - def is_minikube_version_valid_on_darwin() -> tuple[bool, str]: - try: - version_result = subprocess.run( - ["minikube", "version", "--short"], - capture_output=True, - text=True, - ) - location_result = subprocess.run( - ["which", "minikube"], - capture_output=True, - text=True, - ) - if version_result.returncode == 0 and location_result.returncode == 0: - version = version_result.stdout.strip().split()[-1] # Get the version number - return version not in [ - "v1.32.0", - "1.33.0", - ], f"{location_result.stdout.strip()} ({version})" - else: - return False, "" - except FileNotFoundError as err: - return False, str(err) - - def is_platform_darwin() -> bool: - return platform.system() == "Darwin" - - def is_docker_installed() -> tuple[bool, str]: - try: - version_result = subprocess.run(["docker", "--version"], capture_output=True, text=True) - location_result = subprocess.run( - ["which", "docker"], - capture_output=True, - text=True, - ) - if version_result.returncode == 0 and location_result.returncode == 0: - return True, location_result.stdout.strip() - else: - return False, "" - except FileNotFoundError as err: - return False, str(err) - - def is_docker_desktop_running() -> tuple[bool, str]: - try: - version_result = subprocess.run(["docker", "info"], capture_output=True, text=True) - location_result = subprocess.run( - ["which", "docker"], - capture_output=True, - text=True, - ) - if version_result.returncode == 0 and location_result.returncode == 0: - return "Docker Desktop" in version_result.stdout, location_result.stdout.strip() - else: - return False, "" - except FileNotFoundError as err: - return False, str(err) - - def is_kubectl_installed() -> tuple[bool, str]: - try: - version_result = subprocess.run( - ["kubectl", "version", "--client"], - capture_output=True, - text=True, - ) - location_result = subprocess.run( - ["which", "kubectl"], - capture_output=True, - text=True, - ) - if version_result.returncode == 0 and location_result.returncode == 0: - return True, location_result.stdout.strip() - else: - return False, "" - except FileNotFoundError as err: - return False, str(err) - - def is_helm_installed() -> tuple[bool, str]: - try: - version_result = subprocess.run(["helm", "version"], capture_output=True, text=True) - location_result = subprocess.run( - ["which", "helm"], - capture_output=True, - text=True, - ) - if version_result.returncode == 0 and location_result.returncode == 0: - return version_result.returncode == 0, location_result.stdout.strip() - else: - return False, "" - except FileNotFoundError as err: - return False, str(err) - - def check_installation(tool_info: ToolInfo) -> ToolStatus: - has_good_version, location = tool_info.is_installed_func() - if not has_good_version: - instruction_label = click.style(" Instruction: ", fg="yellow", bold=True) - instruction_text = click.style(f"{tool_info.install_instruction}", fg="yellow") - url_label = click.style(" URL: ", fg="yellow", bold=True) - url_text = click.style(f"{tool_info.install_url}", fg="yellow") - - click.secho(f" 💥 {tool_info.tool_name} is not installed. {location}", fg="yellow") - click.echo(instruction_label + instruction_text) - click.echo(url_label + url_text) - return ToolStatus.Unsatisfied - else: - click.secho(f" ⭐️ {tool_info.tool_name} is satisfied: {location}", bold=False) - return ToolStatus.Satisfied - - docker_info = ToolInfo( - tool_name="Docker", - is_installed_func=is_docker_installed, - install_instruction="Install Docker from Docker's official site.", - install_url="https://docs.docker.com/engine/install/", - ) - docker_desktop_info = ToolInfo( - tool_name="Docker Desktop", - is_installed_func=is_docker_desktop_running, - install_instruction="Make sure Docker Desktop is installed and running.", - install_url="https://docs.docker.com/desktop/", - ) - kubectl_info = ToolInfo( - tool_name="Kubectl", - is_installed_func=is_kubectl_installed, - install_instruction="Install kubectl.", - install_url="https://kubernetes.io/docs/tasks/tools/install-kubectl/", - ) - helm_info = ToolInfo( - tool_name="Helm", - is_installed_func=is_helm_installed, - install_instruction="Install Helm from Helm's official site.", - install_url="https://helm.sh/docs/intro/install/", - ) - minikube_info = ToolInfo( - tool_name="Minikube", - is_installed_func=is_minikube_installed, - install_instruction="Install Minikube from the official Minikube site.", - install_url="https://minikube.sigs.k8s.io/docs/start/", - ) - minikube_version_info = ToolInfo( - tool_name="Minikube's version", - is_installed_func=is_minikube_version_valid_on_darwin, - install_instruction="Install the latest Minikube from the official Minikube site.", - install_url="https://minikube.sigs.k8s.io/docs/start/", - ) - - print("") - print(" ╭───────────────────────────────────╮") - print(" │ Welcome to the Warnet Quickstart │") - print(" ╰───────────────────────────────────╯") - print("") - print(" Let's find out if your system has what it takes to run Warnet...") - print("") - - try: - questions = [ - inquirer.List( - "platform", - message=click.style("Which platform would you like to use?", fg="blue", bold=True), - choices=["Minikube", "Docker Desktop"], - ) - ] - answers = inquirer.prompt(questions) - - check_results: list[ToolStatus] = [] - if answers: - if answers["platform"] == "Docker Desktop": - check_results.append(check_installation(docker_info)) - check_results.append(check_installation(docker_desktop_info)) - check_results.append(check_installation(kubectl_info)) - check_results.append(check_installation(helm_info)) - elif answers["platform"] == "Minikube": - check_results.append(check_installation(docker_info)) - check_results.append(check_installation(minikube_info)) - if is_platform_darwin(): - check_results.append(check_installation(minikube_version_info)) - check_results.append(check_installation(kubectl_info)) - check_results.append(check_installation(helm_info)) - else: - click.secho("Please re-run Quickstart.", fg="yellow") - sys.exit(1) - - if ToolStatus.Unsatisfied in check_results: - click.secho( - "Please fix the installation issues above and try quickstart again.", fg="yellow" - ) - sys.exit(1) - else: - click.secho(" ⭐️ Warnet prerequisites look good.\n") - - # New project setup - questions = [ - inquirer.Confirm( - "create_project", - message=click.style("Do you want to create a new project?", fg="blue", bold=True), - default=True, - ), - ] - answers = inquirer.prompt(questions) - if answers is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - if not answers["create_project"]: - click.secho("\nSetup completed successfully!", fg="green", bold=True) - return True - - # Custom project setup - questions = [ - inquirer.Path( - "project_path", - message=click.style("Enter the project directory path", fg="blue", bold=True), - path_type=inquirer.Path.DIRECTORY, - exists=False, - ), - inquirer.Confirm( - "custom_network", - message=click.style( - "Do you want to create a custom network?", fg="blue", bold=True - ), - default=True, - ), - ] - proj_answers = inquirer.prompt(questions) - if proj_answers is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - if not proj_answers["custom_network"]: - project_path = Path(os.path.expanduser(proj_answers["project_path"])) - create_warnet_project(project_path) - click.secho("\nSetup completed successfully!", fg="green", bold=True) - click.echo( - "\nRun the following command to deploy this network using the default demo network:" - ) - click.echo(f"warcli deploy {proj_answers['project_path']}/networks/6_node_bitcoin") - return True - answers.update(proj_answers) - - # Custom network configuration - questions = [ - inquirer.Text( - "network_name", - message=click.style("Enter your network name", fg="blue", bold=True), - validate=lambda _, x: len(x) > 0, - ), - inquirer.List( - "nodes", - message=click.style("How many nodes would you like?", fg="blue", bold=True), - choices=["8", "12", "20", "50", "other"], - default="12", - ), - inquirer.List( - "connections", - message=click.style( - "How many connections would you like each node to have?", - fg="blue", - bold=True, - ), - choices=["0", "1", "2", "8", "12", "other"], - default="8", - ), - inquirer.List( - "version", - message=click.style( - "Which version would you like nodes to run by default?", fg="blue", bold=True - ), - choices=SUPPORTED_TAGS, - default=DEFAULT_TAG, - ), - ] - - net_answers = inquirer.prompt(questions) - if net_answers is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - - if net_answers["nodes"] == "other": - custom_nodes = inquirer.prompt( - [ - inquirer.Text( - "nodes", - message=click.style("Enter the number of nodes", fg="blue", bold=True), - validate=lambda _, x: int(x) > 0, - ) - ] - ) - if custom_nodes is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - net_answers["nodes"] = custom_nodes["nodes"] - - if net_answers["connections"] == "other": - custom_connections = inquirer.prompt( - [ - inquirer.Text( - "connections", - message=click.style( - "Enter the number of connections", fg="blue", bold=True - ), - validate=lambda _, x: int(x) >= 0, - ) - ] - ) - if custom_connections is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - net_answers["connections"] = custom_connections["connections"] - answers.update(net_answers) - fork_observer = click.prompt( - click.style( - "\nWould you like to enable fork-observer on the network?", fg="blue", bold=True - ), - type=bool, - default=True, - ) - fork_observer_query_interval = 20 - if fork_observer: - fork_observer_query_interval = click.prompt( - click.style( - "\nHow often would you like fork-observer to query node status (seconds)?", - fg="blue", - bold=True, - ), - type=int, - default=20, - ) - - click.secho("\nCreating project structure...", fg="yellow", bold=True) - project_path = Path(os.path.expanduser(proj_answers["project_path"])) - create_warnet_project(project_path) - - click.secho("\nGenerating custom network...", fg="yellow", bold=True) - custom_network_path = project_path / "networks" / answers["network_name"] - custom_graph( - int(answers["nodes"]), - int(answers["connections"]), - answers["version"], - custom_network_path, - fork_observer, - fork_observer_query_interval, - ) - click.secho("\nSetup completed successfully!", fg="green", bold=True) - - click.echo( - f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." - ) - if fork_observer: - click.echo( - "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" - "`kubectl port-forward fork-observer 2323`\n" - "fork-observer will then be available at web address: localhost:2323" - ) - - click.echo("\nWhen you're ready, run the following command to deploy this network:") - click.echo(f"warnet deploy {custom_network_path}") - - except Exception as e: - click.echo(f"{e}\n\n") - click.secho(f"An error occurred while running the quick start script:\n\n{e}\n\n", fg="red") - click.secho( - "Please report the above context to https://github.com/bitcoin-dev-project/warnet/issues", - fg="yellow", - ) - return False - - -def create_warnet_project(directory: Path, check_empty: bool = False): - """Common function to create a warnet project""" - if check_empty and any(directory.iterdir()): - click.secho("Warning: Directory is not empty", fg="yellow") - if not click.confirm("Do you want to continue?", default=True): - return - - try: - copy_network_defaults(directory) - copy_scenario_defaults(directory) - click.echo(f"Copied network example files to {directory}/networks") - click.echo(f"Created warnet project structure in {directory}") - except Exception as e: - click.secho(f"Error creating project: {e}", fg="red") - raise e - - -@cli.command() -@click.argument( - "directory", type=click.Path(file_okay=False, dir_okay=True, resolve_path=True, path_type=Path) -) -def create(directory: Path): - """Create a new warnet project in the specified directory""" - if directory.exists(): - click.secho(f"Error: Directory {directory} already exists", fg="red") - return - create_warnet_project(directory) - - -@cli.command() -def init(): - """Initialize a warnet project in the current directory""" - current_dir = Path.cwd() - create_warnet_project(current_dir, check_empty=True) - - -@cli.command() -@click.argument("kube_config", type=str) -def auth(kube_config: str) -> None: - """ - Authenticate with a warnet cluster using a kube config file - """ - try: - current_kubeconfig = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) - combined_kubeconfig = ( - f"{current_kubeconfig}:{kube_config}" if current_kubeconfig else kube_config - ) - os.environ["KUBECONFIG"] = combined_kubeconfig - with open(kube_config) as file: - content = yaml.safe_load(file) - user = content["users"][0] - user_name = user["name"] - user_token = user["user"]["token"] - current_context = content["current-context"] - flatten_cmd = "kubectl config view --flatten" - result_flatten = subprocess.run( - flatten_cmd, shell=True, check=True, capture_output=True, text=True - ) - except subprocess.CalledProcessError as e: - click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") - click.secho(e.stderr, fg="red") - sys.exit(1) - - if result_flatten.returncode == 0: - with open(current_kubeconfig, "w") as file: - file.write(result_flatten.stdout) - click.secho(f"Authorization file written to: {current_kubeconfig}", fg="green") - else: - click.secho("Could not create authorization file", fg="red") - click.secho(result_flatten.stderr, fg="red") - sys.exit(result_flatten.returncode) - - try: - update_cmd = f"kubectl config set-credentials {user_name} --token {user_token}" - result_update = subprocess.run( - update_cmd, shell=True, check=True, capture_output=True, text=True - ) - if result_update.returncode != 0: - click.secho("Could not update authorization file", fg="red") - click.secho(result_flatten.stderr, fg="red") - sys.exit(result_flatten.returncode) - except subprocess.CalledProcessError as e: - click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") - click.secho(e.stderr, fg="red") - sys.exit(1) - - with open(current_kubeconfig) as file: - contents = yaml.safe_load(file) - - with open(current_kubeconfig, "w") as file: - contents["current-context"] = current_context - yaml.safe_dump(contents, file) - - with open(current_kubeconfig) as file: - contents = yaml.safe_load(file) - click.secho( - f"\nwarnet's current context is now set to: {contents['current-context']}", fg="green" - ) - - -@cli.command() -@click.argument("pod_name", type=str, default="") -@click.option("--follow", "-f", is_flag=True, default=False, help="Follow logs") -def logs(pod_name: str, follow: bool): - """Show the logs of a pod""" - follow_flag = "--follow" if follow else "" - namespace = get_default_namespace() - - if pod_name: - try: - command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" - stream_command(command) - return - except Exception as e: - print(f"Could not find the pod {pod_name}: {e}") - - try: - pods = run_command(f"kubectl get pods -n {namespace} -o json") - pods = json.loads(pods) - pod_list = [item["metadata"]["name"] for item in pods["items"]] - except Exception as e: - print(f"Could not fetch any pods in namespace {namespace}: {e}") - return - - if not pod_list: - print(f"Could not fetch any pods in namespace {namespace}") - return - - q = [ - inquirer.List( - name="pod", - message="Please choose a pod", - choices=pod_list, - ) - ] - selected = inquirer.prompt(q, theme=GreenPassion()) - if selected: - pod_name = selected["pod"] - try: - command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" - stream_command(command) - except Exception as e: - print(f"Please consider waiting for the pod to become available. Encountered: {e}") - else: - pass # cancelled by user - - -def custom_graph( - num_nodes: int, - num_connections: int, - version: str, - datadir: Path, - fork_observer: bool, - fork_obs_query_interval: int, -): - datadir.mkdir(parents=False, exist_ok=False) - # Generate network.yaml - nodes = [] - connections = set() - - for i in range(num_nodes): - node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} - - # Add round-robin connection - next_node = (i + 1) % num_nodes - node["connect"].append(f"tank-{next_node:04d}") - connections.add((i, next_node)) - - # Add random connections - available_nodes = list(range(num_nodes)) - available_nodes.remove(i) - if next_node in available_nodes: - available_nodes.remove(next_node) - - for _ in range(min(num_connections - 1, len(available_nodes))): - random_node = random.choice(available_nodes) - # Avoid circular loops of A -> B -> A - if (random_node, i) not in connections: - node["connect"].append(f"tank-{random_node:04d}") - connections.add((i, random_node)) - available_nodes.remove(random_node) - - nodes.append(node) - - network_yaml_data = {"nodes": nodes} - network_yaml_data["fork_observer"] = { - "enabled": fork_observer, - "configQueryInterval": fork_obs_query_interval, - } - - with open(os.path.join(datadir, NETWORK_FILE), "w") as f: - yaml.dump(network_yaml_data, f, default_flow_style=False) - - # Generate node-defaults.yaml - default_yaml_path = NETWORK_DIR.joinpath(DEFAULTS_FILE) - with open(str(default_yaml_path)) as f: - defaults_yaml_content = f.read() - - with open(os.path.join(datadir, DEFAULTS_FILE), "w") as f: - f.write(defaults_yaml_content) - - click.echo(f"Project '{datadir}' has been created with '{NETWORK_FILE}' and '{DEFAULTS_FILE}'.") +cli.add_command(setup) +cli.add_command(status) +cli.add_command(stop) if __name__ == "__main__": diff --git a/src/warnet/project.py b/src/warnet/project.py new file mode 100644 index 000000000..f30ef2721 --- /dev/null +++ b/src/warnet/project.py @@ -0,0 +1,510 @@ +import os +import platform +import random +import subprocess +import sys +from dataclasses import dataclass +from enum import Enum, auto +from importlib.resources import files +from pathlib import Path +from typing import Callable + +import click +import inquirer +import yaml + +from .constants import DEFAULT_TAG, SUPPORTED_TAGS +from .network import copy_network_defaults, copy_scenario_defaults + + +@click.command() +def setup(): + """Setup warnet""" + + class ToolStatus(Enum): + Satisfied = auto() + Unsatisfied = auto() + + @dataclass + class ToolInfo: + tool_name: str + is_installed_func: Callable[[], tuple[bool, str]] + install_instruction: str + install_url: str + + __slots__ = ["tool_name", "is_installed_func", "install_instruction", "install_url"] + + def is_minikube_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run( + ["minikube", "version", "--short"], + capture_output=True, + text=True, + ) + location_result = subprocess.run( + ["which", "minikube"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return True, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_minikube_version_valid_on_darwin() -> tuple[bool, str]: + try: + version_result = subprocess.run( + ["minikube", "version", "--short"], + capture_output=True, + text=True, + ) + location_result = subprocess.run( + ["which", "minikube"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + version = version_result.stdout.strip().split()[-1] # Get the version number + return version not in [ + "v1.32.0", + "1.33.0", + ], f"{location_result.stdout.strip()} ({version})" + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_platform_darwin() -> bool: + return platform.system() == "Darwin" + + def is_docker_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run(["docker", "--version"], capture_output=True, text=True) + location_result = subprocess.run( + ["which", "docker"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return True, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_docker_desktop_running() -> tuple[bool, str]: + try: + version_result = subprocess.run(["docker", "info"], capture_output=True, text=True) + location_result = subprocess.run( + ["which", "docker"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return "Docker Desktop" in version_result.stdout, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_kubectl_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run( + ["kubectl", "version", "--client"], + capture_output=True, + text=True, + ) + location_result = subprocess.run( + ["which", "kubectl"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return True, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def is_helm_installed() -> tuple[bool, str]: + try: + version_result = subprocess.run(["helm", "version"], capture_output=True, text=True) + location_result = subprocess.run( + ["which", "helm"], + capture_output=True, + text=True, + ) + if version_result.returncode == 0 and location_result.returncode == 0: + return version_result.returncode == 0, location_result.stdout.strip() + else: + return False, "" + except FileNotFoundError as err: + return False, str(err) + + def check_installation(tool_info: ToolInfo) -> ToolStatus: + has_good_version, location = tool_info.is_installed_func() + if not has_good_version: + instruction_label = click.style(" Instruction: ", fg="yellow", bold=True) + instruction_text = click.style(f"{tool_info.install_instruction}", fg="yellow") + url_label = click.style(" URL: ", fg="yellow", bold=True) + url_text = click.style(f"{tool_info.install_url}", fg="yellow") + + click.secho(f" 💥 {tool_info.tool_name} is not installed. {location}", fg="yellow") + click.echo(instruction_label + instruction_text) + click.echo(url_label + url_text) + return ToolStatus.Unsatisfied + else: + click.secho(f" ⭐️ {tool_info.tool_name} is satisfied: {location}", bold=False) + return ToolStatus.Satisfied + + docker_info = ToolInfo( + tool_name="Docker", + is_installed_func=is_docker_installed, + install_instruction="Install Docker from Docker's official site.", + install_url="https://docs.docker.com/engine/install/", + ) + docker_desktop_info = ToolInfo( + tool_name="Docker Desktop", + is_installed_func=is_docker_desktop_running, + install_instruction="Make sure Docker Desktop is installed and running.", + install_url="https://docs.docker.com/desktop/", + ) + kubectl_info = ToolInfo( + tool_name="Kubectl", + is_installed_func=is_kubectl_installed, + install_instruction="Install kubectl.", + install_url="https://kubernetes.io/docs/tasks/tools/install-kubectl/", + ) + helm_info = ToolInfo( + tool_name="Helm", + is_installed_func=is_helm_installed, + install_instruction="Install Helm from Helm's official site.", + install_url="https://helm.sh/docs/intro/install/", + ) + minikube_info = ToolInfo( + tool_name="Minikube", + is_installed_func=is_minikube_installed, + install_instruction="Install Minikube from the official Minikube site.", + install_url="https://minikube.sigs.k8s.io/docs/start/", + ) + minikube_version_info = ToolInfo( + tool_name="Minikube's version", + is_installed_func=is_minikube_version_valid_on_darwin, + install_instruction="Install the latest Minikube from the official Minikube site.", + install_url="https://minikube.sigs.k8s.io/docs/start/", + ) + + print("") + print(" ╭───────────────────────────────────╮") + print(" │ Welcome to the Warnet Quickstart │") + print(" ╰───────────────────────────────────╯") + print("") + print(" Let's find out if your system has what it takes to run Warnet...") + print("") + + try: + questions = [ + inquirer.List( + "platform", + message=click.style("Which platform would you like to use?", fg="blue", bold=True), + choices=["Minikube", "Docker Desktop"], + ) + ] + answers = inquirer.prompt(questions) + + check_results: list[ToolStatus] = [] + if answers: + if answers["platform"] == "Docker Desktop": + check_results.append(check_installation(docker_info)) + check_results.append(check_installation(docker_desktop_info)) + check_results.append(check_installation(kubectl_info)) + check_results.append(check_installation(helm_info)) + elif answers["platform"] == "Minikube": + check_results.append(check_installation(docker_info)) + check_results.append(check_installation(minikube_info)) + if is_platform_darwin(): + check_results.append(check_installation(minikube_version_info)) + check_results.append(check_installation(kubectl_info)) + check_results.append(check_installation(helm_info)) + else: + click.secho("Please re-run Quickstart.", fg="yellow") + sys.exit(1) + + if ToolStatus.Unsatisfied in check_results: + click.secho( + "Please fix the installation issues above and try quickstart again.", fg="yellow" + ) + sys.exit(1) + else: + click.secho(" ⭐️ Warnet prerequisites look good.\n") + + except Exception as e: + click.echo(f"{e}\n\n") + click.secho(f"An error occurred while running the quick start script:\n\n{e}\n\n", fg="red") + click.secho( + "Please report the above context to https://github.com/bitcoin-dev-project/warnet/issues", + fg="yellow", + ) + return False + + +def create_warnet_project(directory: Path, check_empty: bool = False): + """Common function to create a warnet project""" + if check_empty and any(directory.iterdir()): + click.secho("Warning: Directory is not empty", fg="yellow") + if not click.confirm("Do you want to continue?", default=True): + return + + try: + copy_network_defaults(directory) + copy_scenario_defaults(directory) + click.echo(f"Copied network example files to {directory}/networks") + click.echo(f"Created warnet project structure in {directory}") + except Exception as e: + click.secho(f"Error creating project: {e}", fg="red") + raise e + + +@click.command() +@click.argument( + "directory", type=click.Path(file_okay=False, dir_okay=True, resolve_path=True, path_type=Path) +) +def new(directory: Path): + """Create a new warnet project in the specified directory""" + new_internal(directory) + + +def new_internal(directory: Path, from_init=False): + if directory.exists() and not from_init: + click.secho(f"Error: Directory {directory} already exists", fg="red") + return + + answers = {} + + # Network name + network_name = inquirer.prompt( + [ + inquirer.Text( + "network_name", + message=click.style("Choose a network name", fg="blue", bold=True), + validate=lambda _, x: len(x) > 0, + ) + ] + ) + if network_name is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + answers.update(network_name) + + # Number of nodes + nodes_question = inquirer.prompt( + [ + inquirer.List( + "nodes", + message=click.style( + "How many nodes would you like in the network?", fg="blue", bold=True + ), + choices=["8", "12", "20", "50", "other"], + default="12", + ) + ] + ) + if nodes_question is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + + if nodes_question["nodes"] == "other": + custom_nodes = inquirer.prompt( + [ + inquirer.Text( + "nodes", + message=click.style("Enter the number of nodes", fg="blue", bold=True), + validate=lambda _, x: int(x) > 0, + ) + ] + ) + if custom_nodes is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + answers["nodes"] = custom_nodes["nodes"] + else: + answers["nodes"] = nodes_question["nodes"] + + # Number of connections + connections_question = inquirer.prompt( + [ + inquirer.List( + "connections", + message=click.style( + "How many connections would you like each node to have?", + fg="blue", + bold=True, + ), + choices=["0", "1", "2", "8", "12", "other"], + default="8", + ) + ] + ) + if connections_question is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + + if connections_question["connections"] == "other": + custom_connections = inquirer.prompt( + [ + inquirer.Text( + "connections", + message=click.style("Enter the number of connections", fg="blue", bold=True), + validate=lambda _, x: int(x) >= 0, + ) + ] + ) + if custom_connections is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + answers["connections"] = custom_connections["connections"] + else: + answers["connections"] = connections_question["connections"] + + # Version + version_question = inquirer.prompt( + [ + inquirer.List( + "version", + message=click.style( + "Which version would you like nodes to run by default?", fg="blue", bold=True + ), + choices=SUPPORTED_TAGS, + default=DEFAULT_TAG, + ) + ] + ) + if version_question is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + answers.update(version_question) + fork_observer = click.prompt( + click.style( + "\nWould you like to enable fork-observer on the network?", fg="blue", bold=True + ), + type=bool, + default=True, + ) + fork_observer_query_interval = 20 + if fork_observer: + fork_observer_query_interval = click.prompt( + click.style( + "\nHow often would you like fork-observer to query node status (seconds)?", + fg="blue", + bold=True, + ), + type=int, + default=20, + ) + + click.secho("\nCreating project structure...", fg="yellow", bold=True) + project_path = Path(os.path.expanduser(directory)) + create_warnet_project(project_path) + + click.secho("\nGenerating custom network...", fg="yellow", bold=True) + custom_network_path = project_path / "networks" / answers["network_name"] + custom_graph( + int(answers["nodes"]), + int(answers["connections"]), + answers["version"], + custom_network_path, + fork_observer, + fork_observer_query_interval, + ) + click.secho("\nSetup completed successfully!", fg="green", bold=True) + + click.echo( + f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." + ) + if fork_observer: + click.echo( + "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" + "`kubectl port-forward fork-observer 2323`\n" + "fork-observer will then be available at web address: localhost:2323" + ) + + click.echo("\nWhen you're ready, run the following command to deploy this network:") + click.echo(f" warnet deploy {custom_network_path}") + + +@click.command() +def init(): + """Initialize a warnet project in the current directory""" + current_dir = Path.cwd() + + custom_project = click.prompt( + click.style("\nWould you like to create a custom network?", fg="blue", bold=True), + type=bool, + default=True, + ) + if not custom_project: + create_warnet_project(current_dir, check_empty=True) + return 0 + else: + new_internal(directory=current_dir, from_init=True) + + +def custom_graph( + num_nodes: int, + num_connections: int, + version: str, + datadir: Path, + fork_observer: bool, + fork_obs_query_interval: int, +): + datadir.mkdir(parents=False, exist_ok=False) + # Generate network.yaml + nodes = [] + connections = set() + + for i in range(num_nodes): + node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} + + # Add round-robin connection + next_node = (i + 1) % num_nodes + node["connect"].append(f"tank-{next_node:04d}") + connections.add((i, next_node)) + + # Add random connections + available_nodes = list(range(num_nodes)) + available_nodes.remove(i) + if next_node in available_nodes: + available_nodes.remove(next_node) + + for _ in range(min(num_connections - 1, len(available_nodes))): + random_node = random.choice(available_nodes) + # Avoid circular loops of A -> B -> A + if (random_node, i) not in connections: + node["connect"].append(f"tank-{random_node:04d}") + connections.add((i, random_node)) + available_nodes.remove(random_node) + + nodes.append(node) + + network_yaml_data = {"nodes": nodes} + network_yaml_data["fork_observer"] = { + "enabled": fork_observer, + "configQueryInterval": fork_obs_query_interval, + } + + with open(os.path.join(datadir, "network.yaml"), "w") as f: + yaml.dump(network_yaml_data, f, default_flow_style=False) + + # Generate node-defaults.yaml + default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") + with open(str(default_yaml_path)) as f: + defaults_yaml_content = f.read() + + with open(os.path.join(datadir, "node-defaults.yaml"), "w") as f: + f.write(defaults_yaml_content) + + click.echo( + f"Project '{datadir}' has been created with 'network.yaml' and 'node-defaults.yaml'." + ) diff --git a/src/warnet/users.py b/src/warnet/users.py new file mode 100644 index 000000000..c85e53585 --- /dev/null +++ b/src/warnet/users.py @@ -0,0 +1,70 @@ +import os +import subprocess +import sys + +import click +import yaml + + +@click.command() +@click.argument("kube_config", type=str) +def auth(kube_config: str) -> None: + """ + Authenticate with a warnet cluster using a kube config file + """ + try: + current_kubeconfig = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) + combined_kubeconfig = ( + f"{current_kubeconfig}:{kube_config}" if current_kubeconfig else kube_config + ) + os.environ["KUBECONFIG"] = combined_kubeconfig + with open(kube_config) as file: + content = yaml.safe_load(file) + user = content["users"][0] + user_name = user["name"] + user_token = user["user"]["token"] + current_context = content["current-context"] + flatten_cmd = "kubectl config view --flatten" + result_flatten = subprocess.run( + flatten_cmd, shell=True, check=True, capture_output=True, text=True + ) + except subprocess.CalledProcessError as e: + click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") + click.secho(e.stderr, fg="red") + sys.exit(1) + + if result_flatten.returncode == 0: + with open(current_kubeconfig, "w") as file: + file.write(result_flatten.stdout) + click.secho(f"Authorization file written to: {current_kubeconfig}", fg="green") + else: + click.secho("Could not create authorization file", fg="red") + click.secho(result_flatten.stderr, fg="red") + sys.exit(result_flatten.returncode) + + try: + update_cmd = f"kubectl config set-credentials {user_name} --token {user_token}" + result_update = subprocess.run( + update_cmd, shell=True, check=True, capture_output=True, text=True + ) + if result_update.returncode != 0: + click.secho("Could not update authorization file", fg="red") + click.secho(result_flatten.stderr, fg="red") + sys.exit(result_flatten.returncode) + except subprocess.CalledProcessError as e: + click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") + click.secho(e.stderr, fg="red") + sys.exit(1) + + with open(current_kubeconfig) as file: + contents = yaml.safe_load(file) + + with open(current_kubeconfig, "w") as file: + contents["current-context"] = current_context + yaml.safe_dump(contents, file) + + with open(current_kubeconfig) as file: + contents = yaml.safe_load(file) + click.secho( + f"\nwarnet's current context is now set to: {contents['current-context']}", fg="green" + ) From 78a7d85b7417e9aae49e6e0ac8debe06ee67b556 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 8 Sep 2024 23:03:44 +0100 Subject: [PATCH 214/710] run CI on main --- .github/workflows/apidocs.yml | 2 +- .github/workflows/deploy.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/apidocs.yml b/.github/workflows/apidocs.yml index 64625f0bc..87da78b75 100644 --- a/.github/workflows/apidocs.yml +++ b/.github/workflows/apidocs.yml @@ -2,7 +2,7 @@ name: Format-api-docs on: push: branches: - - dev + - main jobs: format-api-docs: runs-on: ubuntu-latest diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 62ef02a69..77cf8e32d 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -3,7 +3,7 @@ name: Publish Commander Docker image on: push: branches: - - dev + - main paths: - resources/images/commander/Dockerfile tags-ignore: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7383c59a3..799a1b996 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - dev + - main env: UV_SYSTEM_PYTHON: 1 From b7a1980e4b8b1a9abad150f7b44f446fafa9d7a8 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 8 Sep 2024 23:55:08 +0100 Subject: [PATCH 215/710] check attr exists first --- resources/scenarios/commander.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index b478c13ba..661f68727 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -135,7 +135,8 @@ def parse_args(self): help_parser = argparse.ArgumentParser(usage="%(prog)s [options]") self.add_options(help_parser) help_args, _ = help_parser.parse_known_args() - if help_args.help: + # Check if 'help' attribute exists in help_args before accessing it + if hasattr(help_args, "help") and help_args.help: help_parser.print_help() sys.exit(0) From 0427a2f2cce144ff5df1f4e777a9b09d83d440f7 Mon Sep 17 00:00:00 2001 From: josie Date: Mon, 9 Sep 2024 14:02:39 +0200 Subject: [PATCH 216/710] add logs back to cli (#554) --- src/warnet/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/warnet/main.py b/src/warnet/main.py index 390f699be..e39876adc 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -2,7 +2,7 @@ from .admin import admin from .bitcoin import bitcoin -from .control import down, run, stop +from .control import down, logs, run, stop from .deploy import deploy from .graph import graph from .image import image @@ -19,12 +19,13 @@ def cli(): cli.add_command(admin) cli.add_command(auth) cli.add_command(bitcoin) -cli.add_command(new) cli.add_command(deploy) cli.add_command(down) cli.add_command(graph) cli.add_command(image) cli.add_command(init) +cli.add_command(logs) +cli.add_command(new) cli.add_command(run) cli.add_command(setup) cli.add_command(status) From 2a31ed6037493254d616f94247e1fe9fea2ae77e Mon Sep 17 00:00:00 2001 From: josibake Date: Mon, 9 Sep 2024 20:39:49 +0000 Subject: [PATCH 217/710] Update apidocs on --- docs/warnet.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/warnet.md b/docs/warnet.md index 105da2103..ef06a27d5 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -38,6 +38,15 @@ Bring down a running warnet Initialize a warnet project in the current directory +### `warnet logs` +Show the logs of a pod + +options: +| name | type | required | default | +|----------|--------|------------|-----------| +| pod_name | String | | | +| follow | Bool | | False | + ### `warnet new` Create a new warnet project in the specified directory From dd22649322662887f6dfb4319e441ea6104652ac Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Tue, 10 Sep 2024 08:15:28 +0100 Subject: [PATCH 218/710] Add create-network cli command (#524) * add create-network cli command * add test to CI * removed create and renamaed network-create * removed stop_server from test base --- .github/workflows/test.yml | 15 +++ pyproject.toml | 1 + src/warnet/graph.py | 192 ++++++++++++++++++++++++++++++++++++ src/warnet/main.py | 3 +- src/warnet/project.py | 174 +++++--------------------------- test/conf_test.py | 2 +- test/dag_connection_test.py | 2 +- test/graph_test.py | 48 +++++++++ test/ln_test.py | 2 +- test/logging_test.py | 2 +- test/rpc_test.py | 2 +- test/scenarios_test.py | 2 +- test/services_test.py | 2 +- test/signet_test.py | 2 +- test/test_base.py | 5 - 15 files changed, 289 insertions(+), 165 deletions(-) create mode 100755 test/graph_test.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 799a1b996..d26a10fa5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -91,3 +91,18 @@ jobs: run: | source .venv/bin/activate ./test/${{matrix.test}} + test-without-mk: + runs-on: ubuntu-latest + strategy: + matrix: + test: + - graph_test.py + steps: + - uses: actions/checkout@v4 + - uses: eifinger/setup-uv@v1 + - name: Install project + run: uv sync --all-extras --dev + - name: Run tests + run: | + source .venv/bin/activate + ./test/${{matrix.test}} diff --git a/pyproject.toml b/pyproject.toml index cb9f39384..2b1849084 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,7 @@ dependencies = [ "rich==13.7.1", "tabulate==0.9.0", "PyYAML==6.0.2", + "pexpect==4.9.0", ] [project.scripts] diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 1c91e1db2..34b98960d 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -1,6 +1,13 @@ +import os +import random +from importlib.resources import files from pathlib import Path import click +import inquirer +import yaml + +from .constants import DEFAULT_TAG, SUPPORTED_TAGS @click.group(name="graph", hidden=True) @@ -17,3 +24,188 @@ def import_json(infile: Path, outfile: Path, cb: str, ln_image: str): Returns XML file as string with or without --outfile option. """ raise Exception("Not Implemented") + + +def custom_graph( + num_nodes: int, + num_connections: int, + version: str, + datadir: Path, + fork_observer: bool, + fork_obs_query_interval: int, +): + datadir.mkdir(parents=False, exist_ok=False) + # Generate network.yaml + nodes = [] + connections = set() + + for i in range(num_nodes): + node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} + + # Add round-robin connection + next_node = (i + 1) % num_nodes + node["connect"].append(f"tank-{next_node:04d}") + connections.add((i, next_node)) + + # Add random connections + available_nodes = list(range(num_nodes)) + available_nodes.remove(i) + if next_node in available_nodes: + available_nodes.remove(next_node) + + for _ in range(min(num_connections - 1, len(available_nodes))): + random_node = random.choice(available_nodes) + # Avoid circular loops of A -> B -> A + if (random_node, i) not in connections: + node["connect"].append(f"tank-{random_node:04d}") + connections.add((i, random_node)) + available_nodes.remove(random_node) + + nodes.append(node) + + network_yaml_data = {"nodes": nodes} + network_yaml_data["fork_observer"] = { + "enabled": fork_observer, + "configQueryInterval": fork_obs_query_interval, + } + + with open(os.path.join(datadir, "network.yaml"), "w") as f: + yaml.dump(network_yaml_data, f, default_flow_style=False) + + # Generate node-defaults.yaml + default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") + with open(str(default_yaml_path)) as f: + defaults_yaml_content = f.read() + + with open(os.path.join(datadir, "node-defaults.yaml"), "w") as f: + f.write(defaults_yaml_content) + + click.echo( + f"Project '{datadir}' has been created with 'network.yaml' and 'node-defaults.yaml'." + ) + + +def inquirer_create_network(project_path: Path): + # Custom network configuration + questions = [ + inquirer.Text( + "network_name", + message=click.style("Enter your network name", fg="blue", bold=True), + validate=lambda _, x: len(x) > 0, + ), + inquirer.List( + "nodes", + message=click.style("How many nodes would you like?", fg="blue", bold=True), + choices=["8", "12", "20", "50", "other"], + default="12", + ), + inquirer.List( + "connections", + message=click.style( + "How many connections would you like each node to have?", + fg="blue", + bold=True, + ), + choices=["0", "1", "2", "8", "12", "other"], + default="8", + ), + inquirer.List( + "version", + message=click.style( + "Which version would you like nodes to run by default?", fg="blue", bold=True + ), + choices=SUPPORTED_TAGS, + default=DEFAULT_TAG, + ), + ] + + net_answers = inquirer.prompt(questions) + if net_answers is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + + if net_answers["nodes"] == "other": + custom_nodes = inquirer.prompt( + [ + inquirer.Text( + "nodes", + message=click.style("Enter the number of nodes", fg="blue", bold=True), + validate=lambda _, x: int(x) > 0, + ) + ] + ) + if custom_nodes is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + net_answers["nodes"] = custom_nodes["nodes"] + + if net_answers["connections"] == "other": + custom_connections = inquirer.prompt( + [ + inquirer.Text( + "connections", + message=click.style("Enter the number of connections", fg="blue", bold=True), + validate=lambda _, x: int(x) >= 0, + ) + ] + ) + if custom_connections is None: + click.secho("Setup cancelled by user.", fg="yellow") + return False + net_answers["connections"] = custom_connections["connections"] + fork_observer = click.prompt( + click.style( + "\nWould you like to enable fork-observer on the network?", fg="blue", bold=True + ), + type=bool, + default=True, + ) + fork_observer_query_interval = 20 + if fork_observer: + fork_observer_query_interval = click.prompt( + click.style( + "\nHow often would you like fork-observer to query node status (seconds)?", + fg="blue", + bold=True, + ), + type=int, + default=20, + ) + custom_network_path = project_path / "networks" / net_answers["network_name"] + click.secho("\nGenerating custom network...", fg="yellow", bold=True) + custom_graph( + int(net_answers["nodes"]), + int(net_answers["connections"]), + net_answers["version"], + custom_network_path, + fork_observer, + fork_observer_query_interval, + ) + return custom_network_path + + +@click.command() +def create(): + """Create a new warnet network""" + try: + project_path = Path(os.getcwd()) + # Check if the project has a networks directory + if not (project_path / "networks").exists(): + click.secho( + "The current directory does not have a 'networks' directory. Please run 'warnet init' or 'warnet create' first.", + fg="red", + bold=True, + ) + return False + custom_network_path = inquirer_create_network(project_path) + click.secho("\nNew network created successfully!", fg="green", bold=True) + click.echo("\nRun the following command to deploy this network:") + click.echo(f"warnet deploy {custom_network_path}") + except Exception as e: + click.echo(f"{e}\n\n") + click.secho(f"An error occurred while creating a new network:\n\n{e}\n\n", fg="red") + click.secho( + "Please report the above context to https://github.com/bitcoin-dev-project/warnet/issues", + fg="yellow", + ) + return False diff --git a/src/warnet/main.py b/src/warnet/main.py index e39876adc..28b933ef2 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -4,7 +4,7 @@ from .bitcoin import bitcoin from .control import down, logs, run, stop from .deploy import deploy -from .graph import graph +from .graph import create, graph from .image import image from .project import init, new, setup from .status import status @@ -30,6 +30,7 @@ def cli(): cli.add_command(setup) cli.add_command(status) cli.add_command(stop) +cli.add_command(create) if __name__ == "__main__": diff --git a/src/warnet/project.py b/src/warnet/project.py index f30ef2721..fb5e03aab 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -13,7 +13,7 @@ import inquirer import yaml -from .constants import DEFAULT_TAG, SUPPORTED_TAGS +from .graph import inquirer_create_network from .network import copy_network_defaults, copy_scenario_defaults @@ -198,7 +198,7 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: print("") print(" ╭───────────────────────────────────╮") - print(" │ Welcome to the Warnet Quickstart │") + print(" │ Welcome to Warnet setup │") print(" ╰───────────────────────────────────╯") print("") print(" Let's find out if your system has what it takes to run Warnet...") @@ -229,12 +229,12 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: check_results.append(check_installation(kubectl_info)) check_results.append(check_installation(helm_info)) else: - click.secho("Please re-run Quickstart.", fg="yellow") + click.secho("Please re-run setup.", fg="yellow") sys.exit(1) if ToolStatus.Unsatisfied in check_results: click.secho( - "Please fix the installation issues above and try quickstart again.", fg="yellow" + "Please fix the installation issues above and try setup again.", fg="yellow" ) sys.exit(1) else: @@ -281,154 +281,36 @@ def new_internal(directory: Path, from_init=False): click.secho(f"Error: Directory {directory} already exists", fg="red") return - answers = {} - - # Network name - network_name = inquirer.prompt( - [ - inquirer.Text( - "network_name", - message=click.style("Choose a network name", fg="blue", bold=True), - validate=lambda _, x: len(x) > 0, - ) - ] - ) - if network_name is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - answers.update(network_name) - - # Number of nodes - nodes_question = inquirer.prompt( - [ - inquirer.List( - "nodes", - message=click.style( - "How many nodes would you like in the network?", fg="blue", bold=True - ), - choices=["8", "12", "20", "50", "other"], - default="12", - ) - ] - ) - if nodes_question is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - - if nodes_question["nodes"] == "other": - custom_nodes = inquirer.prompt( - [ - inquirer.Text( - "nodes", - message=click.style("Enter the number of nodes", fg="blue", bold=True), - validate=lambda _, x: int(x) > 0, - ) - ] - ) - if custom_nodes is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - answers["nodes"] = custom_nodes["nodes"] - else: - answers["nodes"] = nodes_question["nodes"] - - # Number of connections - connections_question = inquirer.prompt( - [ - inquirer.List( - "connections", - message=click.style( - "How many connections would you like each node to have?", - fg="blue", - bold=True, - ), - choices=["0", "1", "2", "8", "12", "other"], - default="8", - ) - ] - ) - if connections_question is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False + click.secho("\nCreating project structure...", fg="yellow", bold=True) + project_path = Path(os.path.expanduser(directory)) + create_warnet_project(project_path) - if connections_question["connections"] == "other": - custom_connections = inquirer.prompt( - [ - inquirer.Text( - "connections", - message=click.style("Enter the number of connections", fg="blue", bold=True), - validate=lambda _, x: int(x) >= 0, - ) - ] - ) - if custom_connections is None: - click.secho("Setup cancelled by user.", fg="yellow") - return False - answers["connections"] = custom_connections["connections"] - else: - answers["connections"] = connections_question["connections"] - - # Version - version_question = inquirer.prompt( + proj_answers = inquirer.prompt( [ - inquirer.List( - "version", + inquirer.Confirm( + "custom_network", message=click.style( - "Which version would you like nodes to run by default?", fg="blue", bold=True + "Do you want to create a custom network?", fg="blue", bold=True ), - choices=SUPPORTED_TAGS, - default=DEFAULT_TAG, - ) + default=True, + ), ] ) - if version_question is None: + if proj_answers is None: click.secho("Setup cancelled by user.", fg="yellow") return False - answers.update(version_question) - fork_observer = click.prompt( - click.style( - "\nWould you like to enable fork-observer on the network?", fg="blue", bold=True - ), - type=bool, - default=True, - ) - fork_observer_query_interval = 20 - if fork_observer: - fork_observer_query_interval = click.prompt( - click.style( - "\nHow often would you like fork-observer to query node status (seconds)?", - fg="blue", - bold=True, - ), - type=int, - default=20, - ) - - click.secho("\nCreating project structure...", fg="yellow", bold=True) - project_path = Path(os.path.expanduser(directory)) - create_warnet_project(project_path) - - click.secho("\nGenerating custom network...", fg="yellow", bold=True) - custom_network_path = project_path / "networks" / answers["network_name"] - custom_graph( - int(answers["nodes"]), - int(answers["connections"]), - answers["version"], - custom_network_path, - fork_observer, - fork_observer_query_interval, - ) - click.secho("\nSetup completed successfully!", fg="green", bold=True) + if proj_answers["custom_network"]: + click.secho("\nGenerating custom network...", fg="yellow", bold=True) + custom_network_path = inquirer_create_network(directory) click.echo( f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." ) - if fork_observer: - click.echo( - "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" - "`kubectl port-forward fork-observer 2323`\n" - "fork-observer will then be available at web address: localhost:2323" - ) + click.echo( + "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" + "`kubectl port-forward fork-observer 2323`\n" + "fork-observer will then be available at web address: localhost:2323" + ) click.echo("\nWhen you're ready, run the following command to deploy this network:") click.echo(f" warnet deploy {custom_network_path}") @@ -438,17 +320,7 @@ def new_internal(directory: Path, from_init=False): def init(): """Initialize a warnet project in the current directory""" current_dir = Path.cwd() - - custom_project = click.prompt( - click.style("\nWould you like to create a custom network?", fg="blue", bold=True), - type=bool, - default=True, - ) - if not custom_project: - create_warnet_project(current_dir, check_empty=True) - return 0 - else: - new_internal(directory=current_dir, from_init=True) + new_internal(directory=current_dir, from_init=True) def custom_graph( diff --git a/test/conf_test.py b/test/conf_test.py index c6495ada6..bc717a732 100755 --- a/test/conf_test.py +++ b/test/conf_test.py @@ -20,7 +20,7 @@ def run_test(self): self.setup_network() self.check_uacomment() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 195ae0e7e..258052fc4 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -16,7 +16,7 @@ def run_test(self): self.setup_network() self.run_connect_dag_scenario() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/graph_test.py b/test/graph_test.py new file mode 100755 index 000000000..5cc3200a6 --- /dev/null +++ b/test/graph_test.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import os +import shutil + +import pexpect +from test_base import TestBase + +NETWORKS_DIR = "networks" + + +class GraphTest(TestBase): + def __init__(self): + super().__init__() + + def run_test(self): + try: + self.directory_not_exist() + os.mkdir(NETWORKS_DIR) + self.directory_exists() + + finally: + shutil.rmtree(NETWORKS_DIR) if os.path.exists(NETWORKS_DIR) else None + + def directory_not_exist(self): + self.sut = pexpect.spawn("warnet create") + self.sut.expect("init", timeout=50) + + def directory_exists(self): + self.sut = pexpect.spawn("warnet create") + self.sut.expect("name", timeout=10) + self.sut.sendline("ANewNetwork") + self.sut.expect("many", timeout=10) + self.sut.sendline("") + self.sut.expect("connections", timeout=10) + self.sut.sendline("") + self.sut.expect("version", timeout=10) + self.sut.sendline("") + self.sut.expect("enable fork-observer", timeout=10) + self.sut.sendline("") + self.sut.expect("seconds", timeout=10) + self.sut.sendline("") + self.sut.expect("successfully", timeout=50) + + +if __name__ == "__main__": + test = GraphTest() + test.run_test() diff --git a/test/ln_test.py b/test/ln_test.py index 22a86d1ba..576846b6b 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -24,7 +24,7 @@ def run_test(self): self.test_ln_payment_2_to_0() self.test_simln() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/logging_test.py b/test/logging_test.py index 5f547fb53..9b8c0f9f3 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -29,7 +29,7 @@ def run_test(self): if self.connect_logging_process is not None: self.log.info("Terminating background connect_logging.sh process...") self.connect_logging_process.terminate() - self.stop_server() + self.cleanup() def start_logging(self): self.log.info("Running install_logging.sh") diff --git a/test/rpc_test.py b/test/rpc_test.py index 9cc0ee336..ba466ea53 100755 --- a/test/rpc_test.py +++ b/test/rpc_test.py @@ -20,7 +20,7 @@ def run_test(self): self.test_message_exchange() self.test_address_manager() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 30d9a3f7e..01521ff92 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -20,7 +20,7 @@ def run_test(self): self.setup_network() self.test_scenarios() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/services_test.py b/test/services_test.py index ec6422765..59048d3ce 100755 --- a/test/services_test.py +++ b/test/services_test.py @@ -18,7 +18,7 @@ def run_test(self): self.setup_network() self.check_fork_observer() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/signet_test.py b/test/signet_test.py index b550596fd..5307ec9d9 100755 --- a/test/signet_test.py +++ b/test/signet_test.py @@ -20,7 +20,7 @@ def run_test(self): self.setup_network() self.check_signet_miner() finally: - self.stop_server() + self.cleanup() def setup_network(self): self.log.info("Setting up network") diff --git a/test/test_base.py b/test/test_base.py index 582ca5c8f..3250ecd95 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -1,4 +1,3 @@ -import atexit import json import logging import logging.config @@ -21,7 +20,6 @@ class TestBase: def __init__(self): self.setup_environment() self.setup_logging() - atexit.register(self.cleanup) self.log_expected_msgs: None | [str] = None self.log_unexpected_msgs: None | [str] = None self.log_msg_assertions_passed = False @@ -80,9 +78,6 @@ def output_reader(self, pipe, func): if line: func(line) - def stop_server(self): - self.cleanup() - def wait_for_predicate(self, predicate, timeout=5 * 60, interval=5): self.log.debug(f"Waiting for predicate with timeout {timeout}s and interval {interval}s") while timeout > 0: From 5d0e1875f5c48196f5a110b58bdbe51cf0c1eb53 Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 10 Sep 2024 07:15:55 +0000 Subject: [PATCH 219/710] Update apidocs on --- docs/warnet.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/warnet.md b/docs/warnet.md index ef06a27d5..fe2ff71a1 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -21,6 +21,10 @@ options: |-------------|--------|------------|-----------| | kube_config | String | yes | | +### `warnet create` +Create a new warnet network + + ### `warnet deploy` Deploy a warnet with topology loaded from \ From aeb81ebe1fa8e4e1bba6e55c9f9aed4ec5d3304a Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 10 Sep 2024 03:16:19 -0400 Subject: [PATCH 220/710] setup: add "No Backend" option (#556) * setup: add "No Backend" option * reccomend warnet auth --- src/warnet/project.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/warnet/project.py b/src/warnet/project.py index fb5e03aab..e2a4962a7 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -209,7 +209,11 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: inquirer.List( "platform", message=click.style("Which platform would you like to use?", fg="blue", bold=True), - choices=["Minikube", "Docker Desktop"], + choices=[ + "Minikube", + "Docker Desktop", + "No Backend (Interacting with remote cluster, see `warnet auth --help`)", + ], ) ] answers = inquirer.prompt(questions) @@ -219,15 +223,13 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: if answers["platform"] == "Docker Desktop": check_results.append(check_installation(docker_info)) check_results.append(check_installation(docker_desktop_info)) - check_results.append(check_installation(kubectl_info)) - check_results.append(check_installation(helm_info)) elif answers["platform"] == "Minikube": check_results.append(check_installation(docker_info)) check_results.append(check_installation(minikube_info)) if is_platform_darwin(): check_results.append(check_installation(minikube_version_info)) - check_results.append(check_installation(kubectl_info)) - check_results.append(check_installation(helm_info)) + check_results.append(check_installation(kubectl_info)) + check_results.append(check_installation(helm_info)) else: click.secho("Please re-run setup.", fg="yellow") sys.exit(1) From 4fff537082fdcc563ee06409f1978011c7a1d9e9 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 10 Sep 2024 03:46:34 -0400 Subject: [PATCH 221/710] status: check connections (#557) --- src/warnet/network.py | 8 ++++---- src/warnet/status.py | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/warnet/network.py b/src/warnet/network.py index 8d32781e5..92e5cda16 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -63,7 +63,7 @@ def copy_scenario_defaults(directory: Path): ) -def _connected(): +def _connected(end="\n"): tanks = get_mission("tank") for tank in tanks: # Get actual @@ -73,11 +73,11 @@ def _connected(): if peer["connection_type"] == "manual": actual += 1 expected = int(tank.metadata.annotations["init_peers"]) - print(f"Tank {tank.metadata.name} peers expected: {expected}, actual: {actual}") + print(f"Tank {tank.metadata.name} peers expected: {expected}, actual: {actual}", end=end) # Even if more edges are specified, bitcoind only allows # 8 manual outbound connections if min(8, expected) > actual: - print("Network not connected") + print("\nNetwork not connected") return False - print("Network connected") + print("Network connected ") return True diff --git a/src/warnet/status.py b/src/warnet/status.py index 8aa5c95b0..beb4de6f9 100644 --- a/src/warnet/status.py +++ b/src/warnet/status.py @@ -5,6 +5,7 @@ from rich.text import Text from .k8s import get_mission +from .network import _connected @click.command() @@ -53,6 +54,7 @@ def status(): summary.append(f"\nTotal Tanks: {len(tanks)}", style="bold cyan") summary.append(f" | Active Scenarios: {len(scenarios)}", style="bold green") console.print(summary) + _connected(end="\r") def _get_tank_status(): From 65fa357ac01933e2de6df0eb4fc5a339e5429043 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Tue, 10 Sep 2024 09:05:37 +0100 Subject: [PATCH 222/710] deploy commander using helm (#550) * deploy commander using helm * teardown commanders using helm, fix status * stop scenario faster * stop warnet faster * fix scenarios_test User proper scenarios delete function --- resources/charts/commander/.helmignore | 23 ++ resources/charts/commander/Chart.yaml | 24 ++ .../charts/commander/templates/NOTES.txt | 1 + .../charts/commander/templates/_helpers.tpl | 60 +++++ .../charts/commander/templates/configmap.yaml | 17 ++ resources/charts/commander/templates/pod.yaml | 32 +++ resources/charts/commander/values.yaml | 78 +++++++ src/warnet/constants.py | 1 + src/warnet/control.py | 208 +++++++++--------- test/scenarios_test.py | 4 +- 10 files changed, 339 insertions(+), 109 deletions(-) create mode 100644 resources/charts/commander/.helmignore create mode 100644 resources/charts/commander/Chart.yaml create mode 100644 resources/charts/commander/templates/NOTES.txt create mode 100644 resources/charts/commander/templates/_helpers.tpl create mode 100644 resources/charts/commander/templates/configmap.yaml create mode 100644 resources/charts/commander/templates/pod.yaml create mode 100644 resources/charts/commander/values.yaml diff --git a/resources/charts/commander/.helmignore b/resources/charts/commander/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/charts/commander/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/charts/commander/Chart.yaml b/resources/charts/commander/Chart.yaml new file mode 100644 index 000000000..202456e92 --- /dev/null +++ b/resources/charts/commander/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: commander +description: A Helm chart for a commander + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/commander/templates/NOTES.txt b/resources/charts/commander/templates/NOTES.txt new file mode 100644 index 000000000..29639a44e --- /dev/null +++ b/resources/charts/commander/templates/NOTES.txt @@ -0,0 +1 @@ +Commander beginning their mission. diff --git a/resources/charts/commander/templates/_helpers.tpl b/resources/charts/commander/templates/_helpers.tpl new file mode 100644 index 000000000..9383f0ff9 --- /dev/null +++ b/resources/charts/commander/templates/_helpers.tpl @@ -0,0 +1,60 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "commander.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "commander.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "commander.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "commander.labels" -}} +helm.sh/chart: {{ include "commander.chart" . }} +{{ include "commander.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.podLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "commander.selectorLabels" -}} +app.kubernetes.io/name: {{ include "commander.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "commander.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "commander.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/commander/templates/configmap.yaml b/resources/charts/commander/templates/configmap.yaml new file mode 100644 index 000000000..9c45ea0d2 --- /dev/null +++ b/resources/charts/commander/templates/configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "commander.fullname" . }}-scenario + labels: + {{- include "commander.labels" . | nindent 4 }} +binaryData: + scenario.py: {{ .Values.scenario }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "commander.fullname" . }}-warnet + labels: + {{- include "commander.labels" . | nindent 4 }} +binaryData: + warnet.json: {{ .Values.warnet }} diff --git a/resources/charts/commander/templates/pod.yaml b/resources/charts/commander/templates/pod.yaml new file mode 100644 index 000000000..94c79205f --- /dev/null +++ b/resources/charts/commander/templates/pod.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "commander.fullname" . }} + labels: + {{- include "commander.labels" . | nindent 4 }} + app: {{ include "commander.name" . }} + mission: commander +spec: + restartPolicy: {{ .Values.restartPolicy }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/sh", "-c"] + args: + - | + python3 /scenario.py {{ .Values.args }} + volumeMounts: + - name: scenario + mountPath: /scenario.py + subPath: scenario.py + - name: warnet + mountPath: /warnet.json + subPath: warnet.json + volumes: + - name: scenario + configMap: + name: {{ include "commander.fullname" . }}-scenario + - name: warnet + configMap: + name: {{ include "commander.fullname" . }}-warnet diff --git a/resources/charts/commander/values.yaml b/resources/charts/commander/values.yaml new file mode 100644 index 000000000..fc7e8233d --- /dev/null +++ b/resources/charts/commander/values.yaml @@ -0,0 +1,78 @@ +# Default values for commander. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +namespace: warnet + +restartPolicy: Never + +image: + repository: bitcoindevproject/warnet-commander + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +podLabels: + app: "warnet" + mission: "commander" + +podSecurityContext: {} + +securityContext: {} + +service: + type: ClusterIP + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# livenessProbe: +# exec: +# command: +# - pidof +# - commander +# failureThreshold: 3 +# initialDelaySeconds: 5 +# periodSeconds: 5 +# successThreshold: 1 +# timeoutSeconds: 1 +# readinessProbe: +# failureThreshold: 1 +# periodSeconds: 1 +# successThreshold: 1 +# tcpSocket: +# port: 2323 +# timeoutSeconds: 1 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +port: + +scenario: "" + +warnet: "" + +args: "" diff --git a/src/warnet/constants.py b/src/warnet/constants.py index aa53b3484..bdd9dce9d 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -30,6 +30,7 @@ # Helm charts BITCOIN_CHART_LOCATION = str(CHARTS_DIR.joinpath("bitcoincore")) FORK_OBSERVER_CHART = str(CHARTS_DIR.joinpath("fork-observer")) +COMMANDER_CHART = str(CHARTS_DIR.joinpath("commander")) NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") DEFAULT_NETWORK = Path("6_node_bitcoin") DEFAULT_NAMESPACES = Path("two_namespaces_two_users") diff --git a/src/warnet/control.py b/src/warnet/control.py index fa4679b77..203130f80 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -1,20 +1,20 @@ +import base64 import json -import os -import tempfile +import subprocess import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path import click import inquirer -import yaml from inquirer.themes import GreenPassion from rich import print from rich.console import Console from rich.prompt import Confirm, Prompt from rich.table import Table +from .constants import COMMANDER_CHART from .k8s import ( - apply_kubernetes_yaml, - delete_namespace, get_default_namespace, get_mission, get_pods, @@ -77,16 +77,27 @@ def stop(scenario_name): def stop_scenario(scenario_name): - """Stop a single scenario""" - cmd = f"kubectl delete pod {scenario_name}" + """Stop a single scenario using Helm""" + # Stop the pod immediately (faster than uninstalling) + cmd = f"kubectl delete pod {scenario_name} --grace-period=0 --force" if stream_command(cmd): console.print(f"[bold green]Successfully stopped scenario: {scenario_name}[/bold green]") else: console.print(f"[bold red]Failed to stop scenario: {scenario_name}[/bold red]") + # Then uninstall via helm (non-blocking) + namespace = get_default_namespace() + command = f"helm uninstall {scenario_name} --namespace {namespace} --wait=false" + + # Run the helm uninstall command in the background + subprocess.Popen(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + console.print( + f"[bold yellow]Initiated helm uninstall for release: {scenario_name}[/bold yellow]" + ) + def stop_all_scenarios(scenarios): - """Stop all active scenarios""" + """Stop all active scenarios using Helm""" with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"): for scenario in scenarios: stop_scenario(scenario) @@ -95,8 +106,8 @@ def stop_all_scenarios(scenarios): def list_active_scenarios(): """List all active scenarios""" - commanders = get_mission("commander") - if not commanders: + active_scenarios = get_active_scenarios() + if not active_scenarios: print("No active scenarios found.") return @@ -105,43 +116,53 @@ def list_active_scenarios(): table.add_column("Name", style="cyan") table.add_column("Status", style="green") - for commander in commanders: - table.add_row(commander.metadata.name, commander.status.phase.lower()) + for scenario in active_scenarios: + table.add_row(scenario, "deployed") console.print(table) @click.command() def down(): - """Bring down a running warnet""" - with console.status("[bold yellow]Bringing down the warnet...[/bold yellow]"): - # Delete warnet-logging namespace - if delete_namespace("warnet-logging"): - console.print("[green]Warnet logging deleted[/green]") - else: - console.print("[red]Warnet logging NOT deleted[/red]") - - # Uninstall tanks - tanks = get_mission("tank") - with console.status("[yellow]Uninstalling tanks...[/yellow]"): - for tank in tanks: - cmd = f"helm uninstall {tank.metadata.name} --namespace {get_default_namespace()}" - if stream_command(cmd): - console.print(f"[green]Uninstalled tank: {tank.metadata.name}[/green]") - else: - console.print(f"[red]Failed to uninstall tank: {tank.metadata.name}[/red]") - - # Clean up scenarios and other pods - pods = get_pods() - with console.status("[yellow]Cleaning up remaining pods...[/yellow]"): + """Bring down a running warnet quickly""" + console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") + + namespaces = [get_default_namespace(), "warnet-logging"] + + def uninstall_release(namespace, release_name): + cmd = f"helm uninstall {release_name} --namespace {namespace} --wait=false" + subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + return f"Initiated uninstall for: {release_name} in namespace {namespace}" + + def delete_pod(pod_name, namespace): + cmd = f"kubectl delete pod --ignore-not-found=true {pod_name} -n {namespace} --grace-period=0 --force" + subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + return f"Initiated deletion of pod: {pod_name} in namespace {namespace}" + + with ThreadPoolExecutor(max_workers=10) as executor: + futures = [] + + # Uninstall Helm releases + for namespace in namespaces: + command = f"helm list --namespace {namespace} -o json" + result = run_command(command) + if result: + releases = json.loads(result) + for release in releases: + futures.append(executor.submit(uninstall_release, namespace, release["name"])) + + # Delete remaining pods + pods = get_pods() for pod in pods.items: - cmd = f"kubectl delete pod --ignore-not-found=true {pod.metadata.name} -n {get_default_namespace()}" - if stream_command(cmd): - console.print(f"[green]Deleted pod: {pod.metadata.name}[/green]") - else: - console.print(f"[red]Failed to delete pod: {pod.metadata.name}[/red]") + futures.append(executor.submit(delete_pod, pod.metadata.name, pod.metadata.namespace)) + + # Wait for all tasks to complete and print results + for future in as_completed(futures): + console.print(f"[yellow]{future.result()}[/yellow]") - console.print("[bold green]Warnet has been brought down.[/bold green]") + console.print("[bold yellow]Teardown process initiated for all components.[/bold yellow]") + console.print("[bold yellow]Note: Some processes may continue in the background.[/bold yellow]") + console.print("[bold green]Warnet teardown process completed.[/bold green]") def get_active_network(namespace): @@ -163,11 +184,11 @@ def get_active_network(namespace): @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) def run(scenario_file: str, additional_args: tuple[str]): """Run a scenario from a file""" - scenario_path = os.path.abspath(scenario_file) - scenario_name = os.path.splitext(os.path.basename(scenario_path))[0] + scenario_path = Path(scenario_file).resolve() + scenario_name = scenario_path.stem - with open(scenario_path) as file: - scenario_text = file.read() + with open(scenario_path, "rb") as file: + scenario_data = base64.b64encode(file.read()).decode() name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" namespace = get_default_namespace() @@ -184,72 +205,45 @@ def run(scenario_file: str, additional_args: tuple[str]): } for tank in tankpods ] - kubernetes_objects = [ - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "warnetjson", - "namespace": namespace, - }, - "data": {"warnet.json": json.dumps(tanks)}, - }, - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "scenariopy", - "namespace": namespace, - }, - "data": {"scenario.py": scenario_text}, - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": name, - "namespace": namespace, - "labels": {"mission": "commander"}, - }, - "spec": { - "restartPolicy": "Never", - "containers": [ - { - "name": name, - "image": "bitcoindevproject/warnet-commander:latest", - "args": additional_args, - "volumeMounts": [ - { - "name": "warnetjson", - "mountPath": "warnet.json", - "subPath": "warnet.json", - }, - { - "name": "scenariopy", - "mountPath": "scenario.py", - "subPath": "scenario.py", - }, - ], - } - ], - "volumes": [ - {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, - {"name": "scenariopy", "configMap": {"name": "scenariopy"}}, - ], - }, - }, - ] - with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: - yaml.dump_all(kubernetes_objects, temp_file) - temp_file_path = temp_file.name - if apply_kubernetes_yaml(temp_file_path): - print(f"Successfully started scenario: {scenario_name}") - print(f"Commander pod name: {name}") - else: - print(f"Failed to start scenario: {scenario_name}") + # Encode warnet data + warnet_data = base64.b64encode(json.dumps(tanks).encode()).decode() - os.unlink(temp_file_path) + try: + # Construct Helm command + helm_command = [ + "helm", + "upgrade", + "--install", + "--namespace", + namespace, + "--set", + f"fullnameOverride={name}", + "--set", + f"scenario={scenario_data}", + "--set", + f"warnet={warnet_data}", + ] + + # Add additional arguments + if additional_args: + helm_command.extend(["--set", f"args={' '.join(additional_args)}"]) + + helm_command.extend([name, COMMANDER_CHART]) + + # Execute Helm command + result = subprocess.run(helm_command, check=True, capture_output=True, text=True) + + if result.returncode == 0: + print(f"Successfully started scenario: {scenario_name}") + print(f"Commander pod name: {name}") + else: + print(f"Failed to start scenario: {scenario_name}") + print(f"Error: {result.stderr}") + + except subprocess.CalledProcessError as e: + print(f"Failed to start scenario: {scenario_name}") + print(f"Error: {e.stderr}") @click.command() diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 01521ff92..8be7f4a14 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -5,7 +5,7 @@ from test_base import TestBase -from warnet.k8s import delete_pod +from warnet.control import stop_scenario from warnet.process import run_command from warnet.status import _get_active_scenarios as scenarios_active @@ -80,7 +80,7 @@ def stop_scenario(self): running = scenarios_active() assert len(running) == 1, f"Expected one running scenario, got {len(running)}" assert running[0]["status"] == "running", "Scenario should be running" - delete_pod(running[0]["name"]) + stop_scenario(running[0]["name"]) self.wait_for_predicate(self.check_scenario_stopped) def check_scenario_stopped(self): From a6b212c5dc4fb05fc6df5491f6b1d8b1626e3738 Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 10 Sep 2024 08:05:59 +0000 Subject: [PATCH 223/710] Update apidocs on --- docs/warnet.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/warnet.md b/docs/warnet.md index fe2ff71a1..69f4be092 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -35,7 +35,7 @@ options: | debug | Bool | | False | ### `warnet down` -Bring down a running warnet +Bring down a running warnet quickly ### `warnet init` From 0d45b031282fd833bc78ebcedc3be2a8aec957a1 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Tue, 10 Sep 2024 12:23:27 +0100 Subject: [PATCH 224/710] fix init bug --- src/warnet/project.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/warnet/project.py b/src/warnet/project.py index e2a4962a7..41b5fa09a 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -298,6 +298,7 @@ def new_internal(directory: Path, from_init=False): ), ] ) + custom_network_path = "" if proj_answers is None: click.secho("Setup cancelled by user.", fg="yellow") return False @@ -305,18 +306,19 @@ def new_internal(directory: Path, from_init=False): click.secho("\nGenerating custom network...", fg="yellow", bold=True) custom_network_path = inquirer_create_network(directory) - click.echo( - f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." - ) + if custom_network_path: + click.echo( + f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." + ) + click.echo("\nWhen you're ready, run the following command to deploy this network:") + click.echo(f" warnet deploy {custom_network_path}") + click.echo( "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" "`kubectl port-forward fork-observer 2323`\n" "fork-observer will then be available at web address: localhost:2323" ) - click.echo("\nWhen you're ready, run the following command to deploy this network:") - click.echo(f" warnet deploy {custom_network_path}") - @click.command() def init(): From 0c4f76c624826c408918f0859b463a4502a106dc Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Tue, 10 Sep 2024 12:29:45 +0100 Subject: [PATCH 225/710] move fork observer comment to after it's installed --- resources/charts/fork-observer/templates/NOTES.txt | 6 +++++- src/warnet/project.py | 6 ------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/resources/charts/fork-observer/templates/NOTES.txt b/resources/charts/fork-observer/templates/NOTES.txt index 80e50940d..9894b7843 100644 --- a/resources/charts/fork-observer/templates/NOTES.txt +++ b/resources/charts/fork-observer/templates/NOTES.txt @@ -1 +1,5 @@ -Fork-observer is watching you +To view forkobserver you must forward the port from the cluster to your local machine + +kubectl port-forward fork-observer 2323 + +fork-observer will then be available at web address: http://localhost:2323 \ No newline at end of file diff --git a/src/warnet/project.py b/src/warnet/project.py index 41b5fa09a..e3a2fed0e 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -313,12 +313,6 @@ def new_internal(directory: Path, from_init=False): click.echo("\nWhen you're ready, run the following command to deploy this network:") click.echo(f" warnet deploy {custom_network_path}") - click.echo( - "If you enabled fork-observer you must forward the port from the cluster to your local machine:\n" - "`kubectl port-forward fork-observer 2323`\n" - "fork-observer will then be available at web address: localhost:2323" - ) - @click.command() def init(): From 29d911f4edc292b9f1d325f8ef8f23ccb528eaaa Mon Sep 17 00:00:00 2001 From: josie Date: Tue, 10 Sep 2024 13:30:58 +0200 Subject: [PATCH 226/710] add CVE images with signet (based on #498) (#539) * add CVE images with signet (based on #498) add base image for building old versions with CVEs. these images are pulled from a branch where for each version signet has been packported and the patches applied. the image also supports building from a local source, which is useful when adding new images. Co-authored-by: Will Clark <6606587+willcl-ark@users.noreply.github.com> * remove acceptnonstdtx from baseConfig this option is not allowed on signet. could probably figure out a fix to allow it but doesnt seem worth it at this time and definitely not something we want in the base config, anyways. * fix 16.1 p2sh activation * remove unused tor section simplify entrypoint.sh by removing the tor dead code. we can add this in later if we revisit tor, but its likely not that we are using helm we will take a different approach for configuring this, anyways. * update tests to work with CVE images Co-authored-by: Matthew Zipkin <2084648+pinheadmz@users.noreply.github.com> --------- Co-authored-by: willcl-ark Co-authored-by: Will Clark <6606587+willcl-ark@users.noreply.github.com> Co-authored-by: Matthew Zipkin <2084648+pinheadmz@users.noreply.github.com> --- .../charts/bitcoincore/templates/_helpers.tpl | 3 +- resources/charts/bitcoincore/values.yaml | 1 - resources/images/bitcoin/insecure/Dockerfile | 168 ++++++++++++++++++ .../bitcoin/insecure/addrman_v0.16.1.patch | 13 ++ .../bitcoin/insecure/addrman_v0.17.0.patch | 13 ++ .../bitcoin/insecure/addrman_v0.19.2.patch | 13 ++ .../bitcoin/insecure/addrman_v0.20.0.patch | 13 ++ .../bitcoin/insecure/addrman_v0.21.1.patch | 13 ++ resources/images/bitcoin/insecure/build.md | 89 ++++++++++ .../images/bitcoin/insecure/entrypoint.sh | 28 +++ .../bitcoin/insecure/isroutable_v0.16.1.patch | 13 ++ .../bitcoin/insecure/isroutable_v0.17.0.patch | 13 ++ .../bitcoin/insecure/isroutable_v0.19.2.patch | 13 ++ .../bitcoin/insecure/isroutable_v0.20.0.patch | 13 ++ .../bitcoin/insecure/isroutable_v0.21.1.patch | 13 ++ src/warnet/image.py | 15 +- src/warnet/image_build.py | 20 +-- src/warnet/network.py | 7 +- test/data/bitcoin_conf/network.yaml | 10 +- test/data/signet/network.yaml | 50 +++++- test/data/signet/node-defaults.yaml | 3 + test/signet_test.py | 4 +- test/test_base.py | 7 +- 23 files changed, 503 insertions(+), 32 deletions(-) create mode 100644 resources/images/bitcoin/insecure/Dockerfile create mode 100644 resources/images/bitcoin/insecure/addrman_v0.16.1.patch create mode 100644 resources/images/bitcoin/insecure/addrman_v0.17.0.patch create mode 100644 resources/images/bitcoin/insecure/addrman_v0.19.2.patch create mode 100644 resources/images/bitcoin/insecure/addrman_v0.20.0.patch create mode 100644 resources/images/bitcoin/insecure/addrman_v0.21.1.patch create mode 100644 resources/images/bitcoin/insecure/build.md create mode 100755 resources/images/bitcoin/insecure/entrypoint.sh create mode 100644 resources/images/bitcoin/insecure/isroutable_v0.16.1.patch create mode 100644 resources/images/bitcoin/insecure/isroutable_v0.17.0.patch create mode 100644 resources/images/bitcoin/insecure/isroutable_v0.19.2.patch create mode 100644 resources/images/bitcoin/insecure/isroutable_v0.20.0.patch create mode 100644 resources/images/bitcoin/insecure/isroutable_v0.21.1.patch diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index 1adc5d205..dc3bf9ce6 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -61,8 +61,7 @@ Create the name of the service account to use Add network section heading in bitcoin.conf after v0.17.0 */}} {{- define "bitcoincore.check_semver" -}} -{{- $tag := .Values.image.tag | trimPrefix "v" -}} -{{- $version := semverCompare ">=0.17.0" $tag -}} +{{- $version := semverCompare ">=0.17.0" .Values.image.tag -}} {{- if $version -}} [{{ .Values.chain }}] {{- end -}} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 2498fee6d..2c9962b8a 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -117,7 +117,6 @@ prometheusMetricsPort: 9332 baseConfig: | checkmempool=0 - acceptnonstdtxn=1 debuglogfile=0 logips=1 logtimemicros=1 diff --git a/resources/images/bitcoin/insecure/Dockerfile b/resources/images/bitcoin/insecure/Dockerfile new file mode 100644 index 000000000..e8bcb986c --- /dev/null +++ b/resources/images/bitcoin/insecure/Dockerfile @@ -0,0 +1,168 @@ +# Base stage +# ---------- +# +# We use the alpine version to get the +# correct version of glibc / gcc for building older bitcoin +# core versions. + +# Default is set here to quiet a warning from Docker, but the caller must +# be sure to ALWAYS set this correct per the version of bitcoin core they are +# trying to build +ARG ALPINE_VERSION=3.7 +FROM alpine:${ALPINE_VERSION} AS base + +# Setup deps stage +# ---------------- +# +# this installs the common dependencies for all of the old versions +# and then version specific dependencies are passed via the +# EXTRA_PACKAGES ARG +FROM base AS deps +ARG EXTRA_PACKAGES="" +RUN --mount=type=cache,target=/var/cache/apk \ + sed -i 's/http\:\/\/dl-cdn.alpinelinux.org/https\:\/\/alpine.global.ssl.fastly.net/g' /etc/apk/repositories \ + && apk --no-cache add \ + autoconf \ + automake \ + boost-dev \ + build-base \ + ccache \ + chrpath \ + file \ + gnupg \ + git \ + libevent-dev \ + libressl \ + libtool \ + linux-headers \ + zeromq-dev \ + ${EXTRA_PACKAGES} + +ENV BERKELEYDB_VERSION=db-4.8.30.NC +ENV BERKELEYDB_PREFIX=/opt/${BERKELEYDB_VERSION} + +RUN wget https://download.oracle.com/berkeley-db/${BERKELEYDB_VERSION}.tar.gz +RUN tar -xzf *.tar.gz +RUN sed s/__atomic_compare_exchange/__atomic_compare_exchange_db/g -i ${BERKELEYDB_VERSION}/dbinc/atomic.h +RUN mkdir -p ${BERKELEYDB_PREFIX} + +WORKDIR /${BERKELEYDB_VERSION}/build_unix + +RUN ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=${BERKELEYDB_PREFIX} +RUN make -j$(nproc) +RUN make install +RUN rm -rf ${BERKELEYDB_PREFIX}/docs + +# Build stage +# ----------- +# +# We can build from a git repo using the REPO and COMMIT_SHA args +# or from a local directory using FROM_SRC=true and specifying the local +# source directory. Build args are set using a default but can be changed +# on an imnage by image basis, if needed +# +# PRE_CONFIGURE_COMMANDS is used for version specific fixes needed before +# running ./autogen.sh && ./configure +# +# EXTRA_BUILD_ARGS is used for version specific build flags +FROM deps AS build +ARG FROM_SRC="false" +ARG REPO="" +ARG COMMIT_SHA="" +ARG BUILD_ARGS="--disable-tests --without-gui --disable-bench --disable-fuzz-binary --enable-suppress-external-warnings" +ARG EXTRA_BUILD_ARGS="" +ARG PRE_CONFIGURE_COMMANDS="" + +COPY --from=deps /opt /opt +ENV BITCOIN_PREFIX=/opt/bitcoin +WORKDIR /build + +# Even if not being used, --build-context bitcoin-src must be specified else +# this line will error. If building from a remote repo, use something like +# --build-context bitcoin-src="." +COPY --from=bitcoin-src . /tmp/bitcoin-source +RUN if [ "$FROM_SRC" = "true" ]; then \ + # run with --progress=plain to see these log outputs + echo "Using local files from /tmp/bitcoin-source"; \ + if [ -d "/tmp/bitcoin-source" ] && [ "$(ls -A /tmp/bitcoin-source)" ]; then \ + cp -R /tmp/bitcoin-source /build/bitcoin; \ + else \ + echo "Error: Local source directory is empty or does not exist" && exit 1; \ + fi \ + else \ + echo "Cloning from git repository"; \ + git clone --depth 1 "https://github.com/${REPO}" /build/bitcoin \ + && cd /build/bitcoin \ + && git fetch --depth 1 origin "$COMMIT_SHA" \ + && git checkout "$COMMIT_SHA"; \ + fi; + +# This is not our local ccache, but ccache in the docker cache +# this does speed up builds substantially when building from source or building +# multiple versions sequentially +ENV CCACHE_DIR=/ccache +RUN --mount=type=cache,target=/ccache \ + set -ex \ + && cd /build/bitcoin \ + && if [ -n "$PRE_CONFIGURE_COMMANDS" ]; then \ + eval ${PRE_CONFIGURE_COMMANDS}; \ + fi \ + && ./autogen.sh \ + && ./configure \ + LDFLAGS=-L`ls -d /opt/db*`/lib/ \ + CPPFLAGS="-g0 -I`ls -d /opt/db*`/include/ --param ggc-min-expand=1 --param ggc-min-heapsize=32768" \ + --prefix=${BITCOIN_PREFIX} \ + ${BUILD_ARGS} \ + ${EXTRA_BUILD_ARGS} \ + --with-daemon \ + && make -j$(nproc) \ + && make install \ + && strip ${BITCOIN_PREFIX}/bin/bitcoin-cli \ + && strip ${BITCOIN_PREFIX}/bin/bitcoind \ + && rm -f ${BITCOIN_PREFIX}/lib/libbitcoinconsensus.a \ + && rm -f ${BITCOIN_PREFIX}/lib/libbitcoinconsensus.so.0.0.0 \ + && rm -f ${BITCOIN_PREFIX}/bin/bitcoin-tx \ + && rm -f ${BITCOIN_PREFIX}/bin/bitcoin-wallet + +# verify ccache is working, specify --progress=plain to see output in build logs +RUN ccache -s + +# Final clean stage +# ----------------- +# +# EXTRA_RUNTIME_PACKAGES is used for version specific runtime deps +FROM alpine:${ALPINE_VERSION} +ARG EXTRA_RUNTIME_PACKAGES="" +ARG UID=100 +ARG GID=101 +ARG BITCOIN_VERSION +ENV BITCOIN_DATA=/root/.bitcoin +ENV BITCOIN_PREFIX=/opt/bitcoin +ENV PATH=${BITCOIN_PREFIX}/bin:$PATH +ENV BITCOIN_VERSION=${BITCOIN_VERSION} +LABEL maintainer.0="bitcoindevproject" + +RUN addgroup -g ${GID} -S bitcoin +RUN adduser -u ${UID} -S bitcoin -G bitcoin +RUN --mount=type=cache,target=/var/cache/apk sed -i 's/http\:\/\/dl-cdn.alpinelinux.org/https\:\/\/alpine.global.ssl.fastly.net/g' /etc/apk/repositories \ + && apk --no-cache add \ + bash \ + boost-filesystem \ + boost-system \ + boost-thread \ + libevent \ + libzmq \ + shadow \ + sqlite-dev \ + su-exec \ + ${EXTRA_RUNTIME_PACKAGES} + +COPY --from=build /opt/bitcoin /usr/local +COPY entrypoint.sh /entrypoint.sh + +VOLUME ["/home/bitcoin/.bitcoin"] +EXPOSE 8332 8333 18332 18333 18443 18444 38333 38332 + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["bitcoind"] + diff --git a/resources/images/bitcoin/insecure/addrman_v0.16.1.patch b/resources/images/bitcoin/insecure/addrman_v0.16.1.patch new file mode 100644 index 000000000..bc8a73bf8 --- /dev/null +++ b/resources/images/bitcoin/insecure/addrman_v0.16.1.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 4fbfa2b5c85..0d8d5751268 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -455,6 +455,8 @@ std::vector CNetAddr::GetGroup() const + vchRet.push_back(NET_IPV4); + vchRet.push_back(GetByte(3) ^ 0xFF); + vchRet.push_back(GetByte(2) ^ 0xFF); ++ vchRet.push_back(GetByte(1) ^ 0xFF); ++ vchRet.push_back(GetByte(0) ^ 0xFF); + return vchRet; + } + else if (IsTor()) diff --git a/resources/images/bitcoin/insecure/addrman_v0.17.0.patch b/resources/images/bitcoin/insecure/addrman_v0.17.0.patch new file mode 100644 index 000000000..265a30b6e --- /dev/null +++ b/resources/images/bitcoin/insecure/addrman_v0.17.0.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 778c2700f95..03d97bcd673 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -354,6 +354,8 @@ std::vector CNetAddr::GetGroup() const + vchRet.push_back(NET_IPV4); + vchRet.push_back(GetByte(3) ^ 0xFF); + vchRet.push_back(GetByte(2) ^ 0xFF); ++ vchRet.push_back(GetByte(1) ^ 0xFF); ++ vchRet.push_back(GetByte(0) ^ 0xFF); + return vchRet; + } + else if (IsTor()) diff --git a/resources/images/bitcoin/insecure/addrman_v0.19.2.patch b/resources/images/bitcoin/insecure/addrman_v0.19.2.patch new file mode 100644 index 000000000..bc8a73bf8 --- /dev/null +++ b/resources/images/bitcoin/insecure/addrman_v0.19.2.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 4fbfa2b5c85..0d8d5751268 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -455,6 +455,8 @@ std::vector CNetAddr::GetGroup() const + vchRet.push_back(NET_IPV4); + vchRet.push_back(GetByte(3) ^ 0xFF); + vchRet.push_back(GetByte(2) ^ 0xFF); ++ vchRet.push_back(GetByte(1) ^ 0xFF); ++ vchRet.push_back(GetByte(0) ^ 0xFF); + return vchRet; + } + else if (IsTor()) diff --git a/resources/images/bitcoin/insecure/addrman_v0.20.0.patch b/resources/images/bitcoin/insecure/addrman_v0.20.0.patch new file mode 100644 index 000000000..db638357c --- /dev/null +++ b/resources/images/bitcoin/insecure/addrman_v0.20.0.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 228caf74a93..a6728321d1d 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -517,6 +517,8 @@ std::vector CNetAddr::GetGroup(const std::vector &asmap) co + uint32_t ipv4 = GetLinkedIPv4(); + vchRet.push_back((ipv4 >> 24) & 0xFF); + vchRet.push_back((ipv4 >> 16) & 0xFF); ++ vchRet.push_back((ipv4 >> 8) & 0xFF); ++ vchRet.push_back(ipv4 & 0xFF); + return vchRet; + } else if (IsTor()) { + nStartByte = 6; diff --git a/resources/images/bitcoin/insecure/addrman_v0.21.1.patch b/resources/images/bitcoin/insecure/addrman_v0.21.1.patch new file mode 100644 index 000000000..c85679b16 --- /dev/null +++ b/resources/images/bitcoin/insecure/addrman_v0.21.1.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index e0d4638dd6a..a84b3980f30 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -742,6 +742,8 @@ std::vector CNetAddr::GetGroup(const std::vector &asmap) co + uint32_t ipv4 = GetLinkedIPv4(); + vchRet.push_back((ipv4 >> 24) & 0xFF); + vchRet.push_back((ipv4 >> 16) & 0xFF); ++ vchRet.push_back((ipv4 >> 8) & 0xFF); ++ vchRet.push_back(ipv4 & 0xFF); + return vchRet; + } else if (IsTor() || IsI2P() || IsCJDNS()) { + nBits = 4; diff --git a/resources/images/bitcoin/insecure/build.md b/resources/images/bitcoin/insecure/build.md new file mode 100644 index 000000000..68502d51b --- /dev/null +++ b/resources/images/bitcoin/insecure/build.md @@ -0,0 +1,89 @@ +# Historic CVE images + +These images are for old versions of Bitcoin Core with known CVEs. These images have signet backported +and the addrman and isroutable patches applied. + +# Build incantations + +Run from top-level of project + +## v0.21.1 + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.17" \ + --build-arg BITCOIN_VERSION="0.21.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="boost-filesystem sqlite-dev" \ + --build-arg REPO="josibake/bitcoin" \ + --build-arg COMMIT_SHA="e0a22f14c15b4877ef6221f9ee2dfe510092d734" \ + --tag bitcoindevproject/bitcoin:0.21.1 \ + resources/images/bitcoin/insecure +``` + +## v0.20.0 + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.12.12" \ + --build-arg BITCOIN_VERSION="0.20.0" \ + --build-arg EXTRA_PACKAGES="sqlite-dev miniupnpc" \ + --build-arg EXTRA_RUNTIME_PACKAGES="boost-filesystem sqlite-dev" \ + --build-arg REPO="josibake/bitcoin" \ + --build-arg COMMIT_SHA="0bbff8feff0acf1693dfe41184d9a4fd52001d3f" \ + --tag bitcoindevproject/bitcoin:0.20.0 \ + resources/images/bitcoin/insecure +``` + +## v0.19.2 + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.12.12" \ + --build-arg BITCOIN_VERSION="0.19.2" \ + --build-arg EXTRA_PACKAGES="sqlite-dev libressl-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="boost-chrono boost-filesystem libressl sqlite-dev" \ + --build-arg REPO="josibake/bitcoin" \ + --build-arg COMMIT_SHA="e20f83eb5466a7d68227af14a9d0cf66fb520ffc" \ + --tag bitcoindevproject/bitcoin:0.19.2 \ + resources/images/bitcoin/insecure +``` + +## v0.17.0 + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.9" \ + --build-arg BITCOIN_VERSION="0.17.0" \ + --build-arg EXTRA_PACKAGES="protobuf-dev libressl-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="boost boost-program_options libressl sqlite-dev" \ + --build-arg REPO="josibake/bitcoin" \ + --build-arg COMMIT_SHA="f6b2db49a707e7ad433d958aee25ce561c66521a" \ + --tag bitcoindevproject/bitcoin:0.17.0 \ + resources/images/bitcoin/insecure +``` + +## v0.16.1 + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.7" \ + --build-arg BITCOIN_VERSION="0.16.1" \ + --build-arg EXTRA_PACKAGES="protobuf-dev libressl-dev" \ + --build-arg PRE_CONFIGURE_COMMANDS="sed -i '/AC_PREREQ/a\AR_FLAGS=cr' src/univalue/configure.ac && sed -i '/AX_PROG_CC_FOR_BUILD/a\AR_FLAGS=cr' src/secp256k1/configure.ac && sed -i 's:sys/fcntl.h:fcntl.h:' src/compat.h" \ + --build-arg EXTRA_RUNTIME_PACKAGES="boost boost-program_options libressl" \ + --build-arg REPO="josibake/bitcoin" \ + --build-arg COMMIT_SHA="dc94c00e58c60412a4e1a540abdf0b56093179e8" \ + --tag bitcoindevproject/bitcoin:0.16.1 \ + resources/images/bitcoin/insecure +``` diff --git a/resources/images/bitcoin/insecure/entrypoint.sh b/resources/images/bitcoin/insecure/entrypoint.sh new file mode 100755 index 000000000..56d55b5d6 --- /dev/null +++ b/resources/images/bitcoin/insecure/entrypoint.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +if [ "$(echo "$1" | cut -c1)" = "-" ]; then + echo "$0: assuming arguments for bitcoind" + + set -- bitcoind "$@" +fi + +if [ "$(echo "$1" | cut -c1)" = "-" ] || [ "$1" = "bitcoind" ]; then + mkdir -p "$BITCOIN_DATA" + chmod 700 "$BITCOIN_DATA" + echo "$0: setting data directory to $BITCOIN_DATA" + set -- "$@" -datadir="$BITCOIN_DATA" +fi + +# Incorporate additional arguments for bitcoind if BITCOIN_ARGS is set. +if [ -n "$BITCOIN_ARGS" ]; then + IFS=' ' read -ra ARG_ARRAY <<< "$BITCOIN_ARGS" + set -- "$@" "${ARG_ARRAY[@]}" +fi + +# Conditionally add -printtoconsole for Bitcoin version 0.16.1 +if [ "${BITCOIN_VERSION}" == "0.16.1" ]; then + exec "$@" -printtoconsole +else + exec "$@" +fi diff --git a/resources/images/bitcoin/insecure/isroutable_v0.16.1.patch b/resources/images/bitcoin/insecure/isroutable_v0.16.1.patch new file mode 100644 index 000000000..0d9d4ad54 --- /dev/null +++ b/resources/images/bitcoin/insecure/isroutable_v0.16.1.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 81f72879f40..8aae93a6b68 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -231,7 +231,7 @@ bool CNetAddr::IsValid() const + + bool CNetAddr::IsRoutable() const + { +- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsLocal() || IsInternal()); ++ return true; + } + + bool CNetAddr::IsInternal() const diff --git a/resources/images/bitcoin/insecure/isroutable_v0.17.0.patch b/resources/images/bitcoin/insecure/isroutable_v0.17.0.patch new file mode 100644 index 000000000..f8d1fef08 --- /dev/null +++ b/resources/images/bitcoin/insecure/isroutable_v0.17.0.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 778c2700f95..9655b01efba 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -226,7 +226,7 @@ bool CNetAddr::IsValid() const + + bool CNetAddr::IsRoutable() const + { +- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsLocal() || IsInternal()); ++ return true; + } + + bool CNetAddr::IsInternal() const diff --git a/resources/images/bitcoin/insecure/isroutable_v0.19.2.patch b/resources/images/bitcoin/insecure/isroutable_v0.19.2.patch new file mode 100644 index 000000000..295569cd2 --- /dev/null +++ b/resources/images/bitcoin/insecure/isroutable_v0.19.2.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 228caf74a93..d1290d4de49 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -300,7 +300,7 @@ bool CNetAddr::IsValid() const + */ + bool CNetAddr::IsRoutable() const + { +- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal()); ++ return true; + } + + /** diff --git a/resources/images/bitcoin/insecure/isroutable_v0.20.0.patch b/resources/images/bitcoin/insecure/isroutable_v0.20.0.patch new file mode 100644 index 000000000..295569cd2 --- /dev/null +++ b/resources/images/bitcoin/insecure/isroutable_v0.20.0.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index 228caf74a93..d1290d4de49 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -300,7 +300,7 @@ bool CNetAddr::IsValid() const + */ + bool CNetAddr::IsRoutable() const + { +- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal()); ++ return true; + } + + /** diff --git a/resources/images/bitcoin/insecure/isroutable_v0.21.1.patch b/resources/images/bitcoin/insecure/isroutable_v0.21.1.patch new file mode 100644 index 000000000..ab8cbb7a5 --- /dev/null +++ b/resources/images/bitcoin/insecure/isroutable_v0.21.1.patch @@ -0,0 +1,13 @@ +diff --git a/src/netaddress.cpp b/src/netaddress.cpp +index e0d4638dd6a..2615e076b50 100644 +--- a/src/netaddress.cpp ++++ b/src/netaddress.cpp +@@ -465,7 +465,7 @@ bool CNetAddr::IsValid() const + */ + bool CNetAddr::IsRoutable() const + { +- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal()); ++ return true; + } + + /** diff --git a/src/warnet/image.py b/src/warnet/image.py index 6965838b6..c43323f15 100644 --- a/src/warnet/image.py +++ b/src/warnet/image.py @@ -7,22 +7,27 @@ @click.group(name="image") def image(): - """Build a a custom Warnet Bitcoin Core image""" + """Build a custom Warnet Bitcoin Core image""" @image.command() @click.option("--repo", required=True, type=str) @click.option("--commit-sha", required=True, type=str) @click.option("--registry", required=True, type=str) -@click.option("--tag", required=True, type=str) +@click.option( + "--tags", + required=True, + type=str, + help="Comma-separated list of full tags including image names", +) @click.option("--build-args", required=False, type=str) @click.option("--arches", required=False, type=str) @click.option("--action", required=False, type=str, default="load") -def build(repo, commit_sha, registry, tag, build_args, arches, action): +def build(repo, commit_sha, registry, tags, build_args, arches, action): """ - Build bitcoind and bitcoin-cli from at as :. + Build bitcoind and bitcoin-cli from at with the specified . Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. """ - res = build_image(repo, commit_sha, registry, tag, build_args, arches, action) + res = build_image(repo, commit_sha, registry, tags, build_args, arches, action) if not res: sys.exit(1) diff --git a/src/warnet/image_build.py b/src/warnet/image_build.py index 98f502e23..67367afab 100644 --- a/src/warnet/image_build.py +++ b/src/warnet/image_build.py @@ -18,7 +18,7 @@ def build_image( repo: str, commit_sha: str, docker_registry: str, - tag: str, + tags: str, build_args: str, arches: str, action: str, @@ -42,7 +42,7 @@ def build_image( print(f"{repo=:}") print(f"{commit_sha=:}") print(f"{docker_registry=:}") - print(f"{tag=:}") + print(f"{tags=:}") print(f"{build_args=:}") print(f"{build_arches=:}") @@ -52,14 +52,13 @@ def build_image( use_builder_cmd = f"docker buildx use --builder {builder_name}" cleanup_builder_cmd = f"docker buildx rm {builder_name}" - if not run_command(create_builder_cmd): # noqa: SIM102 - # try to use existing - if not run_command(use_builder_cmd): - print(f"Could not create or use builder {builder_name} and create new builder") - return False + if not run_command(create_builder_cmd) and not run_command(use_builder_cmd): + print(f"Could not create or use builder {builder_name} and create new builder") + return False - image_full_name = f"{docker_registry}:{tag}" - print(f"{image_full_name=}") + tag_list = tags.split(",") + tag_args = " ".join([f"--tag {tag.strip()}" for tag in tag_list]) + print(f"{tag_args=}") platforms = ",".join([f"linux/{arch}" for arch in build_arches]) @@ -69,7 +68,7 @@ def build_image( f" --build-arg REPO={repo}" f" --build-arg COMMIT_SHA={commit_sha}" f" --build-arg BUILD_ARGS={build_args}" - f" --tag {image_full_name}" + f" {tag_args}" f" --file {dockerfile_path}" f" {dockerfile_path.parent}" f" --{action}" @@ -82,7 +81,6 @@ def build_image( except Exception as e: print(f"Error:\n{e}") finally: - # Tidy up the buildx builder if not run_command(cleanup_builder_cmd): print("Warning: Failed to remove the buildx builder.") else: diff --git a/src/warnet/network.py b/src/warnet/network.py index 92e5cda16..6c42e0b5d 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -63,6 +63,11 @@ def copy_scenario_defaults(directory: Path): ) +def is_connection_manual(peer): + # newer nodes specify a "connection_type" + return bool(peer.get("connection_type") == "manual" or peer.get("addnode") is True) + + def _connected(end="\n"): tanks = get_mission("tank") for tank in tanks: @@ -70,7 +75,7 @@ def _connected(end="\n"): peerinfo = json.loads(_rpc(tank.metadata.name, "getpeerinfo", "")) actual = 0 for peer in peerinfo: - if peer["connection_type"] == "manual": + if is_connection_manual(peer): actual += 1 expected = int(tank.metadata.annotations["init_peers"]) print(f"Tank {tank.metadata.name} peers expected: {expected}, actual: {actual}", end=end) diff --git a/test/data/bitcoin_conf/network.yaml b/test/data/bitcoin_conf/network.yaml index 0ceb8b059..f1d1a8124 100644 --- a/test/data/bitcoin_conf/network.yaml +++ b/test/data/bitcoin_conf/network.yaml @@ -1,35 +1,35 @@ nodes: - name: tank-0016 image: - tag: "v0.16.1" + tag: "0.16.1" connect: - tank-0017 config: uacomment=tank-0016 - name: tank-0017 image: - tag: "v0.17.0" + tag: "0.17.0" connect: - tank-0019 config: uacomment=tank-0017 - name: tank-0019 image: - tag: "v0.19.2" + tag: "0.19.2" connect: - tank-0020 config: uacomment=tank-0019 - name: tank-0020 image: - tag: "v0.20.0" + tag: "0.20.0" connect: - tank-0021 config: uacomment=tank-0020 - name: tank-0021 image: - tag: "v0.21.1" + tag: "0.21.1" connect: - tank-0024 config: diff --git a/test/data/signet/network.yaml b/test/data/signet/network.yaml index 5677909cf..eb422fddf 100644 --- a/test/data/signet/network.yaml +++ b/test/data/signet/network.yaml @@ -1,8 +1,52 @@ nodes: - name: miner - - name: tank-0001 + - name: tank-1 + image: + tag: "0.16.1" connect: - miner - - name: tank-0002 + - name: tank-2 + image: + tag: "0.17.0" connect: - - miner \ No newline at end of file + - miner + - name: tank-3 + image: + tag: "0.19.2" + connect: + - miner + - name: tank-4 + image: + tag: "0.20.0" + connect: + - miner + - name: tank-5 + image: + tag: "0.21.1" + connect: + - miner + - name: tank-6 + image: + tag: "24.2" + connect: + - miner + - name: tank-7 + image: + tag: "25.1" + connect: + - miner + - name: tank-8 + image: + tag: "26.0" + connect: + - miner + - name: tank-9 + image: + tag: "27.0" + connect: + - miner + - name: tank-10 + image: + tag: "0.16.1" + connect: + - miner diff --git a/test/data/signet/node-defaults.yaml b/test/data/signet/node-defaults.yaml index 4cf7b508b..43523c669 100644 --- a/test/data/signet/node-defaults.yaml +++ b/test/data/signet/node-defaults.yaml @@ -5,6 +5,9 @@ image: chain: signet +spec: + restartPolicy: Always + config: | debug=rpc debug=net diff --git a/test/signet_test.py b/test/signet_test.py index 5307ec9d9..68ec78713 100755 --- a/test/signet_test.py +++ b/test/signet_test.py @@ -38,8 +38,8 @@ def check_signet_miner(self): ) def block_one(): - for tank in ["tank-0001", "tank-0002"]: - height = int(self.warnet(f"bitcoin rpc {tank} getblockcount")) + for n in range(1, 11): + height = int(self.warnet(f"bitcoin rpc tank-{n} getblockcount")) if height != 1: return False return True diff --git a/test/test_base.py b/test/test_base.py index 3250ecd95..fbea5e79d 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -81,8 +81,11 @@ def output_reader(self, pipe, func): def wait_for_predicate(self, predicate, timeout=5 * 60, interval=5): self.log.debug(f"Waiting for predicate with timeout {timeout}s and interval {interval}s") while timeout > 0: - if predicate(): - return + try: + if predicate(): + return + except Exception: + pass sleep(interval) timeout -= interval import inspect From 1da25477bf9444ce0ccb7fcd79d79f0c92991924 Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 10 Sep 2024 11:31:20 +0000 Subject: [PATCH 227/710] Update apidocs on --- docs/warnet.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/warnet.md b/docs/warnet.md index 69f4be092..84e4cc46b 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -146,7 +146,7 @@ Create a cycle graph with nodes imported from lnd `describegraph` JSON file, ## Image ### `warnet image build` -Build bitcoind and bitcoin-cli from \ at \ as \:\. +Build bitcoind and bitcoin-cli from \ at \ with the specified \. Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. options: @@ -155,7 +155,7 @@ options: | repo | String | yes | | | commit_sha | String | yes | | | registry | String | yes | | -| tag | String | yes | | +| tags | String | yes | | | build_args | String | | | | arches | String | | | | action | String | | "load" | From c1c70092d43f971e1cc15601e717990710f6f1bb Mon Sep 17 00:00:00 2001 From: josibake Date: Mon, 9 Sep 2024 17:18:58 +0200 Subject: [PATCH 228/710] create initContainer for loading snapshots if key is set, download and untar a blocks directory into the bitcoin datadir. its up to the user to make sure they are choosing an appropriate snapshot, e.g., if the snapshot was created from a signet chain then it can only be loaded into a signet node with the correct signet challenge. we can expand on this in the future to better export all of the relevant config details in the snapshot, but for now this just adds the ability to load files into pods before the pod starts. --- .../charts/bitcoincore/templates/pod.yaml | 22 +++++++++++++++++-- resources/charts/bitcoincore/values.yaml | 3 +++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index 0607b31d5..bde1c9ea6 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -23,6 +23,20 @@ spec: {{- end }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 4 }} + {{- if .Values.loadSnapshot.enabled }} + initContainers: + - name: download-blocks + image: alpine:latest + command: ["/bin/sh", "-c"] + args: + - | + apk add --no-cache curl + mkdir -p /root/.bitcoin/{{ .Values.chain }} + curl -L {{ .Values.loadSnapshot.url }} | tar -xz -C /root/.bitcoin/{{ .Values.chain }} + volumeMounts: + - name: data + mountPath: /root/.bitcoin + {{- end }} containers: - name: {{ .Chart.Name }} securityContext: @@ -54,6 +68,8 @@ spec: {{- with .Values.volumeMounts }} {{- toYaml . | nindent 8 }} {{- end }} + - mountPath: /root/.bitcoin + name: data - mountPath: /root/.bitcoin/bitcoin.conf name: config subPath: bitcoin.conf @@ -83,9 +99,11 @@ spec: {{- with .Values.volumes }} {{- toYaml . | nindent 4 }} {{- end }} - - configMap: + - name: data + emptyDir: {} + - name: config + configMap: name: {{ include "bitcoincore.fullname" . }} - name: config {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 4 }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 2c9962b8a..3b89023dc 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -134,3 +134,6 @@ baseConfig: | config: "" connect: [] +loadSnapshot: + enabled: false + url: "" From dba4a13588c441cb48372022d3989f07fc78f8c3 Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 10 Sep 2024 13:48:33 +0200 Subject: [PATCH 229/710] add k8s utility for exporting datadir tars a datadir based on a passed in filter (if no filter, zip everything) and copies the zip to the users host. --- src/warnet/k8s.py | 112 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index 0cb223072..bbe32f4ac 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -1,4 +1,5 @@ import json +import os import tempfile from pathlib import Path @@ -6,6 +7,7 @@ from kubernetes import client, config from kubernetes.client.models import CoreV1Event, V1PodList from kubernetes.dynamic import DynamicClient +from kubernetes.stream import stream from .constants import DEFAULT_NAMESPACE, KUBECONFIG from .process import run_command, stream_command @@ -115,3 +117,113 @@ def get_default_namespace() -> str: command = "kubectl config view --minify -o jsonpath='{..namespace}'" kubectl_namespace = run_command(command) return kubectl_namespace if kubectl_namespace else DEFAULT_NAMESPACE + + +def snapshot_bitcoin_datadir( + pod_name: str, chain: str, local_path: str = "./", filters: list[str] = None +) -> None: + namespace = get_default_namespace() + sclient = get_static_client() + + try: + sclient.read_namespaced_pod(name=pod_name, namespace=namespace) + + # Filter down to the specified list of directories and files + # This allows for creating snapshots of only the relevant data, e.g., + # we may want to snapshot the blocks but not snapshot peers.dat or the node + # wallets. + # + # TODO: never snapshot bitcoin.conf, as this is managed by the helm config + if filters: + find_command = [ + "find", + f"/root/.bitcoin/{chain}", + "(", + "-type", + "f", + "-o", + "-type", + "d", + ")", + "(", + "-name", + filters[0], + ] + for f in filters[1:]: + find_command.extend(["-o", "-name", f]) + find_command.append(")") + else: + # If no filters, get everything in the Bitcoin directory (TODO: exclude bitcoin.conf) + find_command = ["find", f"/root/.bitcoin/{chain}"] + + resp = stream( + sclient.connect_get_namespaced_pod_exec, + pod_name, + namespace, + command=find_command, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False, + ) + + file_list = [] + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + file_list.extend(resp.read_stdout().strip().split("\n")) + if resp.peek_stderr(): + print(f"Error: {resp.read_stderr()}") + + resp.close() + if not file_list: + print("No matching files or directories found.") + return + tar_command = ["tar", "-czf", "/tmp/bitcoin_data.tar.gz", "-C", f"/root/.bitcoin/{chain}"] + tar_command.extend( + [os.path.relpath(f, f"/root/.bitcoin/{chain}") for f in file_list if f.strip()] + ) + resp = stream( + sclient.connect_get_namespaced_pod_exec, + pod_name, + namespace, + command=tar_command, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False, + ) + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + print(f"Tar output: {resp.read_stdout()}") + if resp.peek_stderr(): + print(f"Error: {resp.read_stderr()}") + resp.close() + local_file_path = Path(local_path) / f"{pod_name}_bitcoin_data.tar.gz" + copy_command = ( + f"kubectl cp {namespace}/{pod_name}:/tmp/bitcoin_data.tar.gz {local_file_path}" + ) + if not stream_command(copy_command): + raise Exception("Failed to copy tar file from pod to local machine") + + print(f"Bitcoin data exported successfully to {local_file_path}") + cleanup_command = ["rm", "/tmp/bitcoin_data.tar.gz"] + stream( + sclient.connect_get_namespaced_pod_exec, + pod_name, + namespace, + command=cleanup_command, + stderr=True, + stdin=False, + stdout=True, + tty=False, + ) + + print("To untar and repopulate the directory, use the following command:") + print(f"tar -xzf {local_file_path} -C /path/to/destination/.bitcoin/{chain}") + + except Exception as e: + print(f"An error occurred: {str(e)}") From 7157a78f1b8e6fe28d0a8a16c8c9cc766c5fcd39 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 10 Sep 2024 13:36:36 -0400 Subject: [PATCH 230/710] bitcoincore: lazier probe checks to accomdate slower nodes / systems (#572) --- resources/charts/bitcoincore/values.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 2c9962b8a..301b329b3 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -78,16 +78,16 @@ livenessProbe: command: - pidof - bitcoind - failureThreshold: 3 + failureThreshold: 12 initialDelaySeconds: 5 periodSeconds: 5 successThreshold: 1 - timeoutSeconds: 1 + timeoutSeconds: 10 readinessProbe: - failureThreshold: 1 - periodSeconds: 1 + failureThreshold: 12 + periodSeconds: 5 successThreshold: 1 - timeoutSeconds: 1 + timeoutSeconds: 10 # Additional volumes on the output Deployment definition. From 71cbcec8f3d4ea4b7cf90308795ffd8bf0a506e5 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 10 Sep 2024 13:37:10 -0400 Subject: [PATCH 231/710] test: drastically reduce memory in "all versions" test (#558) --- test/data/bitcoin_conf/node-defaults.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/data/bitcoin_conf/node-defaults.yaml b/test/data/bitcoin_conf/node-defaults.yaml index 7e021cad1..a7ca1a373 100644 --- a/test/data/bitcoin_conf/node-defaults.yaml +++ b/test/data/bitcoin_conf/node-defaults.yaml @@ -2,3 +2,11 @@ image: repository: bitcoindevproject/bitcoin pullPolicy: IfNotPresent tag: "27.0" + +resources: + limits: + cpu: 4000m + memory: 500Mi + requests: + cpu: 100m + memory: 200Mi \ No newline at end of file From 2b8764812d437a1526983b30d0004dbb1c167829 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Wed, 11 Sep 2024 12:30:01 +0100 Subject: [PATCH 232/710] bitcoin: save debug log to normal file (#579) --- resources/charts/bitcoincore/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 301b329b3..07e8a29db 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -117,7 +117,7 @@ prometheusMetricsPort: 9332 baseConfig: | checkmempool=0 - debuglogfile=0 + debuglogfile=debug.log logips=1 logtimemicros=1 capturemessages=1 From 866fe39e25a0cc72b63e1bcc9d67d89fc4e2f3cb Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 11 Sep 2024 11:30:23 +0000 Subject: [PATCH 233/710] Update apidocs on --- docs/warnet.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/warnet.md b/docs/warnet.md index 84e4cc46b..dc8bca233 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -138,16 +138,16 @@ options: ### `warnet graph import-json` Create a cycle graph with nodes imported from lnd `describegraph` JSON file, - and additionally include 7 extra random outbounds per node. Include lightning - channels and their policies as well. - Returns XML file as string with or without --outfile option. +and additionally include 7 extra random outbounds per node. Include lightning +channels and their policies as well. +Returns XML file as string with or without --outfile option. ## Image ### `warnet image build` Build bitcoind and bitcoin-cli from \ at \ with the specified \. - Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. +Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. options: | name | type | required | default | From a1dd9457f3ee70030e2b65c41d95467f80d320dc Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 11 Sep 2024 14:00:01 +0100 Subject: [PATCH 234/710] install logging as part of deploy (#566) --- .../6_node_bitcoin/node-defaults.yaml | 2 +- resources/scripts/install_logging.sh | 14 ------- src/warnet/deploy.py | 39 +++++++++++++++++++ src/warnet/network.py | 10 ----- test/data/logging/node-defaults.yaml | 1 + test/logging_test.py | 10 ++--- 6 files changed, 44 insertions(+), 32 deletions(-) delete mode 100755 resources/scripts/install_logging.sh diff --git a/resources/networks/6_node_bitcoin/node-defaults.yaml b/resources/networks/6_node_bitcoin/node-defaults.yaml index 8ecb0c79f..a1454d8a1 100644 --- a/resources/networks/6_node_bitcoin/node-defaults.yaml +++ b/resources/networks/6_node_bitcoin/node-defaults.yaml @@ -1,7 +1,7 @@ chain: regtest collectLogs: true -metricsExport: false +metricsExport: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious diff --git a/resources/scripts/install_logging.sh b/resources/scripts/install_logging.sh deleted file mode 100755 index 15fd1ab1e..000000000 --- a/resources/scripts/install_logging.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -e - -THIS_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -MANIFESTS_DIR=$( cd -- "$THIS_DIR/../manifests" &> /dev/null && pwd ) - -helm repo add grafana https://grafana.github.io/helm-charts -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo update - -helm upgrade --install --namespace warnet-logging --create-namespace --values "${MANIFESTS_DIR}/loki_values.yaml" loki grafana/loki --version 5.47.2 -helm upgrade --install --namespace warnet-logging promtail grafana/promtail -helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false -helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values "${MANIFESTS_DIR}/grafana_values.yaml" \ No newline at end of file diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 5417da5ae..7154c7b0b 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -10,6 +10,7 @@ DEFAULTS_NAMESPACE_FILE, FORK_OBSERVER_CHART, HELM_COMMAND, + LOGGING_HELM_COMMANDS, NAMESPACES_CHART_LOCATION, NAMESPACES_FILE, NETWORK_FILE, @@ -41,6 +42,7 @@ def deploy(directory, debug): directory = Path(directory) if (directory / NETWORK_FILE).exists(): + deploy_logging_stack(directory, debug) deploy_network(directory, debug) deploy_fork_observer(directory, debug) elif (directory / NAMESPACES_FILE).exists(): @@ -51,6 +53,43 @@ def deploy(directory, debug): ) +def check_logging_required(directory: Path): + # check if node-defaults has logging or metrics enabled + default_file_path = directory / DEFAULTS_FILE + with default_file_path.open() as f: + default_file = yaml.safe_load(f) + if default_file.get("collectLogs", False): + return True + if default_file.get("metricsExport", False): + return True + + # check to see if individual nodes have logging enabled + network_file_path = directory / NETWORK_FILE + with network_file_path.open() as f: + network_file = yaml.safe_load(f) + nodes = network_file.get("nodes", []) + for node in nodes: + if node.get("collectLogs", False): + return True + if node.get("metricsExport", False): + return True + + return False + + +def deploy_logging_stack(directory: Path, debug: bool): + if not check_logging_required(directory): + return + + click.echo("Found collectLogs or metricsExport in network definition, Deploying logging stack") + + for command in LOGGING_HELM_COMMANDS: + if not stream_command(command): + print(f"Failed to run Helm command: {command}") + return False + return True + + def deploy_fork_observer(directory: Path, debug: bool): network_file_path = directory / NETWORK_FILE with network_file_path.open() as f: diff --git a/src/warnet/network.py b/src/warnet/network.py index 6c42e0b5d..fcb5a5ee7 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -6,20 +6,10 @@ from .bitcoin import _rpc from .constants import ( - LOGGING_HELM_COMMANDS, NETWORK_DIR, SCENARIOS_DIR, ) from .k8s import get_mission -from .process import stream_command - - -def setup_logging_helm() -> bool: - for command in LOGGING_HELM_COMMANDS: - if not stream_command(command): - print(f"Failed to run Helm command: {command}") - return False - return True def copy_defaults(directory: Path, target_subdir: str, source_path: Path, exclude_list: list[str]): diff --git a/test/data/logging/node-defaults.yaml b/test/data/logging/node-defaults.yaml index 7e021cad1..b914c8bba 100644 --- a/test/data/logging/node-defaults.yaml +++ b/test/data/logging/node-defaults.yaml @@ -1,3 +1,4 @@ +collectLogs: true image: repository: bitcoindevproject/bitcoin pullPolicy: IfNotPresent diff --git a/test/logging_test.py b/test/logging_test.py index 9b8c0f9f3..f5c7134d4 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -5,7 +5,7 @@ import threading from datetime import datetime from pathlib import Path -from subprocess import PIPE, Popen, run +from subprocess import PIPE, Popen import requests from test_base import TestBase @@ -22,8 +22,8 @@ def __init__(self): def run_test(self): try: - self.start_logging() self.setup_network() + self.start_logging() self.test_prometheus_and_grafana() finally: if self.connect_logging_process is not None: @@ -32,10 +32,6 @@ def run_test(self): self.cleanup() def start_logging(self): - self.log.info("Running install_logging.sh") - # Block until complete - run([f"{self.scripts_dir / 'install_logging.sh'}"]) - self.log.info("Running connect_logging.sh") # Stays alive in background self.connect_logging_process = Popen( [f"{self.scripts_dir / 'connect_logging.sh'}"], @@ -51,13 +47,13 @@ def start_logging(self): ) self.connect_logging_thread.daemon = True self.connect_logging_thread.start() + self.wait_for_endpoint_ready() def setup_network(self): self.log.info("Setting up network") self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() - self.wait_for_endpoint_ready() def wait_for_endpoint_ready(self): self.log.info("Waiting for Grafana to be ready to receive API calls...") From c1d76ef52919b25ac1282bbe2cab93c621b1a686 Mon Sep 17 00:00:00 2001 From: hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:26:13 +0200 Subject: [PATCH 235/710] Stop relying on Bash being located at /bin/bash (#580) --- resources/images/bitcoin/insecure/entrypoint.sh | 2 +- resources/scripts/connect_logging.sh | 2 +- resources/scripts/setup_user_contexts.sh | 2 +- src/warnet/bitcoin.py | 2 +- src/warnet/process.py | 6 ++---- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/resources/images/bitcoin/insecure/entrypoint.sh b/resources/images/bitcoin/insecure/entrypoint.sh index 56d55b5d6..c81d95aa9 100755 --- a/resources/images/bitcoin/insecure/entrypoint.sh +++ b/resources/images/bitcoin/insecure/entrypoint.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e if [ "$(echo "$1" | cut -c1)" = "-" ]; then diff --git a/resources/scripts/connect_logging.sh b/resources/scripts/connect_logging.sh index 20ea4a63e..f09ffeeaf 100755 --- a/resources/scripts/connect_logging.sh +++ b/resources/scripts/connect_logging.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # NO `set -e` here so an error does not exit the script POD_NAME=$(kubectl get pods --namespace warnet-logging -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=loki-grafana" -o jsonpath="{.items[0].metadata.name}") diff --git a/resources/scripts/setup_user_contexts.sh b/resources/scripts/setup_user_contexts.sh index 8d1a06eec..5a4b631b2 100755 --- a/resources/scripts/setup_user_contexts.sh +++ b/resources/scripts/setup_user_contexts.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Function to check if a command exists command_exists() { diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index 8be750613..c4a4aae53 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -189,7 +189,7 @@ def get_messages(tank_a: str, tank_b: str, chain: str): import subprocess blob = subprocess.run( - cmd, shell=True, capture_output=True, executable="/bin/bash" + cmd, shell=True, capture_output=True, executable="bash" ).stdout # Parse the blob diff --git a/src/warnet/process.py b/src/warnet/process.py index fd5b2a04b..6161774b1 100644 --- a/src/warnet/process.py +++ b/src/warnet/process.py @@ -2,9 +2,7 @@ def run_command(command: str) -> str: - result = subprocess.run( - command, shell=True, capture_output=True, text=True, executable="/bin/bash" - ) + result = subprocess.run(command, shell=True, capture_output=True, text=True, executable="bash") if result.returncode != 0: raise Exception(result.stderr) return result.stdout @@ -12,7 +10,7 @@ def run_command(command: str) -> str: def stream_command(command: str) -> bool: process = subprocess.Popen( - ["/bin/bash", "-c", command], + ["bash", "-c", command], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, From ec1680f47f37a753e8dda2a9e392a577aec27359 Mon Sep 17 00:00:00 2001 From: hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:31:37 +0200 Subject: [PATCH 236/710] Project.py messages clarification (#581) * project.py: create_warnet_project - Include directory path in warning * project.py: new_internal - Clarify that it is a directory Although it says files plural, I accidentally tried to edit the folder as if it was a file. --- src/warnet/project.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/warnet/project.py b/src/warnet/project.py index e3a2fed0e..33fb8ce47 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -255,7 +255,7 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: def create_warnet_project(directory: Path, check_empty: bool = False): """Common function to create a warnet project""" if check_empty and any(directory.iterdir()): - click.secho("Warning: Directory is not empty", fg="yellow") + click.secho(f"Warning: Directory {directory} is not empty", fg="yellow") if not click.confirm("Do you want to continue?", default=True): return @@ -308,7 +308,7 @@ def new_internal(directory: Path, from_init=False): if custom_network_path: click.echo( - f"\nEdit the network files found in {custom_network_path} before deployment if you want to customise the network." + f"\nEdit the network files found under {custom_network_path}/ before deployment if you want to customise the network." ) click.echo("\nWhen you're ready, run the following command to deploy this network:") click.echo(f" warnet deploy {custom_network_path}") From 278a15b1871ed7586814126aa04e4e6d6be58194 Mon Sep 17 00:00:00 2001 From: mplsgrant <58152638+mplsgrant@users.noreply.github.com> Date: Wed, 11 Sep 2024 09:14:54 -0500 Subject: [PATCH 237/710] provide users with appropriate access to resources (#543) Users need access to a number of resources so that they can run scenarios. --- resources/charts/namespaces/values.yaml | 26 +++++++++- .../namespace-defaults.yaml | 2 +- .../two_namespaces_two_users/namespaces.yaml | 52 ++++++++++++++++++- 3 files changed, 76 insertions(+), 4 deletions(-) diff --git a/resources/charts/namespaces/values.yaml b/resources/charts/namespaces/values.yaml index c28d2d0df..61f946879 100644 --- a/resources/charts/namespaces/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -9,8 +9,32 @@ roles: - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get"] - name: pod-manager rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch", "create", "update", "delete"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get"] \ No newline at end of file diff --git a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml index c28d2d0df..91ac2fc67 100644 --- a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml @@ -12,5 +12,5 @@ roles: - name: pod-manager rules: - apiGroups: [""] - resources: ["pods"] + resources: ["pods", "configmaps"] verbs: ["get", "list", "watch", "create", "update", "delete"] diff --git a/resources/namespaces/two_namespaces_two_users/namespaces.yaml b/resources/namespaces/two_namespaces_two_users/namespaces.yaml index 03b31696a..4172657b8 100644 --- a/resources/namespaces/two_namespaces_two_users/namespaces.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespaces.yaml @@ -14,11 +14,35 @@ namespaces: - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get"] - name: pod-manager rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch", "create", "update", "delete"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get"] - name: warnet-blue-team users: - name: mallory @@ -34,8 +58,32 @@ namespaces: - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get"] - name: pod-manager rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch", "create", "update", "delete"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get"] From b1a3fcfb9bea95d83075dbb69e3596c20810248e Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 11 Sep 2024 15:23:35 +0100 Subject: [PATCH 238/710] Updated install docs with guidance about testing minikube (#574) --- README.md | 2 +- docs/contributing.md | 20 ++++++++ docs/install.md | 107 +++++++++++++++++++++---------------------- docs/quickstart.md | 57 ----------------------- 4 files changed, 74 insertions(+), 112 deletions(-) create mode 100644 docs/contributing.md delete mode 100644 docs/quickstart.md diff --git a/README.md b/README.md index 6e050e7c5..7d0aef39c 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,6 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. ## Documentation - [Installation](/docs/install.md) -- [Quick Start](/docs/quickstart.md) - [CLI Commands](/docs/warnet.md) - [Scenarios](/docs/scenarios.md) - [Monitoring](/docs/logging_monitoring.md) @@ -24,5 +23,6 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Scaling](/docs/scaling.md) - [Connecting to local nodes](https://github.com/bitcoin-dev-project/warnet/blob/main/docs/) - [Understanding network configuration](/docs/config.md) +- [Contributing](/docs/contributing.md) ![warnet-art](https://raw.githubusercontent.com/bitcoin-dev-project/warnet/main/docs/machines.webp) diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 000000000..8d5671ce7 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,20 @@ +# Contributing / Local Warnet Development + +## Download the code repository + +```bash +git clone https://github.com/bitcoin-dev-project/warnet +cd warnet +``` + +## Recommended: use a virtual Python environment such as `venv` + +```bash +python3 -m venv .venv # Use alternative venv manager if desired +source .venv/bin/activate +``` + +```bash +pip install --upgrade pip +pip install -e . +``` \ No newline at end of file diff --git a/docs/install.md b/docs/install.md index e6c4bcdfd..c7ef14c69 100644 --- a/docs/install.md +++ b/docs/install.md @@ -1,93 +1,92 @@ -# Install Warnet +# Installing Warnet -Warnet requires Kubernetes in order to run the network. Kubernetes can be run -remotely or locally (with minikube or Docker Desktop). `kubectl` must be run -locally to administer the network. +Warnet requires Kubernetes (k8s) and helm in order to run the network. Kubernetes can be run remotely or locally (with minikube or Docker Desktop). `kubectl` and `helm` must be run locally to administer the network. ## Dependencies -### Kubernetes +### Remote (cloud) cluster -Install [`kubectl`](https://kubernetes.io/docs/setup/) (or equivalent) and -configure your cluster. This can be done locally with `minikube` (or Docker Desktop) -or using a managed cluster. +The only two dependencies of Warnet are `helm` and `kubectl` configured to talk to your cloud cluster. -#### Docker engine with minikube +### Running Warnet Locally -If using Minikube to run a smaller-sized local cluster, you will require docker engine. -To install docker engine, see: https://docs.docker.com/engine/install/ +If the number of nodes you are running can run on one machine (think a dozen or so) then Warnet can happily run on a local Kubernetes. Two supported k8s implementations are Minikube and K8s as part of Docker Desktop. -e.g. For Ubuntu: +#### Docker Desktop -```bash -# First uninstall any old versions -for pkg in docker.io docker-doc podman-docker containerd runc; do sudo apt-get remove $pkg; done - -# Add Docker's official GPG key: -sudo apt-get update -sudo apt-get install ca-certificates curl gnupg -sudo install -m 0755 -d /etc/apt/keyrings -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -sudo chmod a+r /etc/apt/keyrings/docker.gpg - -# Add the repository to Apt sources: -echo \ - "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ - "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt-get update - -# Install the docker packages -sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin +[Docker desktop](https://www.docker.com/products/docker-desktop/) includes the docker engine itself and has an option to enable Kubernetes. Simply installing it and enabling Kubernetes should be enough. + +[Helm](https://helm.sh/docs/intro/install/) is also required to be installed. + +#### Minikube + +Minikube requires a backend to run on with the supported backend being Docker. So if installing Minikube, you may need to install docker first. Please see [Installing Docker](https://docs.docker.com/engine/install/) and [Installing Minkube](https://minikube.sigs.k8s.io/docs/start/). + +After installing Minikube don't forget to start it with: + +```shell +minikube start ``` -#### Using Docker +Minikube has a [guide](https://kubernetes.io/docs/tutorials/hello-minikube/) on getting started which could be useful to validate that your minikube is running correctly. -If you have never used Docker before you may need to take a few more steps to run the Docker daemon on your system. -The Docker daemon MUST be running before stating Warnet. +### Testing kubectl and helm + +The following commands should run on both local and remote clusters. Do not proceed unless kubectl and helm are working. + +```shell +helm repo add examples https://helm.github.io/examples +helm install hello examples/hello-world +helm list +kubectl get pods +helm uninstall hello +``` #### Managing Kubernetes cluster The use of a k8s cluster management tool is highly recommended. We like to use `k9s`: https://k9scli.io/ -##### Linux +## Install Warnet -- [Check Docker user/group permissions](https://stackoverflow.com/a/48957722/1653320) -- or [`chmod` the Docker UNIX socket](https://stackoverflow.com/a/51362528/1653320) +Either install warnet via pip, or clone the source and install: -## Install Warnet +### via pip -### Recommended: use a virtual Python environment such as `venv` +You can install warnet via `pip` into a virtual environment with ```bash -python3 -m venv .venv # Use alternative venv manager if desired +python3 -m venv .venv source .venv/bin/activate -``` - -```bash -pip install --upgrade pip pip install warnet ``` -## Contributing / Local Warnet Development +### via cloned source -### Download the code repository +You can install warnet from source into a virtual environment with ```bash -git clone https://github.com/bitcoin-dev-project/warnet +git clone https://github.com/bitcoin-dev-project/warnet.git cd warnet +python3 -m venv .venv +source .venv/bin/activate +pip install -e . ``` -### Recommended: use a virtual Python environment such as `venv` +## Running + +To get started first check you have all the necessary requirements: ```bash -python3 -m venv .venv # Use alternative venv manager if desired -source .venv/bin/activate +warnet setup ``` +Then create your first network: + ```bash -pip install --upgrade pip -pip install -e . -``` +# Create a new network in the current directory +warnet init +# Or in a directory of choice +warnet new +``` \ No newline at end of file diff --git a/docs/quickstart.md b/docs/quickstart.md deleted file mode 100644 index dcf3ca41c..000000000 --- a/docs/quickstart.md +++ /dev/null @@ -1,57 +0,0 @@ -# Quick run - -## Installation - -Either install warnet via pip, or clone the source and install: - -### via pip - -You can install warnet via `pip` into a virtual environment with - -```bash -python3 -m venv .venv -source .venv/bin/activate -pip install warnet -``` - -### via cloned source - -You can install warnet from source into a virtual environment with - -```bash -git clone https://github.com/bitcoin-dev-project/warnet.git -cd warnet -python3 -m venv .venv -source .venv/bin/activate -pip install -e . -``` - -## Running - -To get started first check you have all the necessary requirements: - -```bash -warnet setup -``` - -Then create your first network: - -```bash -# Create a new network in the current directory -warnet init - -# Or in a directory of choice -warnet new -``` - -Follow the guide to configure network variables. - -## fork-observer - -If you enabled [fork-observer](https://github.com/0xB10C/fork-observer), you must forward the port from the cluster to your local machine: - -```bash -kubectl port-forward fork-observer 2323 -``` - -And then the GUI can be accessed via `localhost:2323` in a web browser. From 20d514c063c10f89f7518abb0987388c688e9d1e Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Wed, 11 Sep 2024 15:35:49 +0100 Subject: [PATCH 239/710] moved doc/contributing.md to CONTRIBUTING.md --- docs/contributing.md => CONTRIBUTING.md | 0 README.md | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename docs/contributing.md => CONTRIBUTING.md (100%) diff --git a/docs/contributing.md b/CONTRIBUTING.md similarity index 100% rename from docs/contributing.md rename to CONTRIBUTING.md diff --git a/README.md b/README.md index 7d0aef39c..d1770eaeb 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,6 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Scaling](/docs/scaling.md) - [Connecting to local nodes](https://github.com/bitcoin-dev-project/warnet/blob/main/docs/) - [Understanding network configuration](/docs/config.md) -- [Contributing](/docs/contributing.md) +- [Contributing](CONTRIBUTING.md) ![warnet-art](https://raw.githubusercontent.com/bitcoin-dev-project/warnet/main/docs/machines.webp) From 5ba11a106ca267c9f303d51fd56b077730cb1149 Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 10 Sep 2024 13:49:34 +0200 Subject: [PATCH 240/710] add snapshot cli command allow a user to snapshot a single tank (with a filter) or snapshot all tanks with a filter. this creates a zip that can then be loaded via the initContainer in the first commit --- src/warnet/control.py | 103 ++++++++++++++++++++++++++++++++++++++++++ src/warnet/main.py | 3 +- 2 files changed, 105 insertions(+), 1 deletion(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 203130f80..5c35b131a 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -1,5 +1,6 @@ import base64 import json +import os import subprocess import time from concurrent.futures import ThreadPoolExecutor, as_completed @@ -18,6 +19,7 @@ get_default_namespace, get_mission, get_pods, + snapshot_bitcoin_datadir, ) from .process import run_command, stream_command @@ -291,3 +293,104 @@ def logs(pod_name: str, follow: bool): print(f"Please consider waiting for the pod to become available. Encountered: {e}") else: pass # cancelled by user + + +@click.command() +@click.argument("tank_name", required=False) +@click.option("--all", "-a", "snapshot_all", is_flag=True, help="Snapshot all running tanks") +@click.option( + "--output", + "-o", + type=click.Path(), + default="./warnet-snapshots", + help="Output directory for snapshots", +) +@click.option( + "--filter", + "-f", + type=str, + help="Comma-separated list of directories and/or files to include in the snapshot", +) +def snapshot(tank_name, snapshot_all, output, filter): + """Create a snapshot of a tank's Bitcoin data or snapshot all tanks""" + tanks = get_mission("tank") + + if not tanks: + console.print("[bold red]No active tanks found.[/bold red]") + return + + # Create the output directory if it doesn't exist + os.makedirs(output, exist_ok=True) + + filter_list = [f.strip() for f in filter.split(",")] if filter else None + if snapshot_all: + snapshot_all_tanks(tanks, output, filter_list) + elif tank_name: + snapshot_single_tank(tank_name, tanks, output, filter_list) + else: + select_and_snapshot_tank(tanks, output, filter_list) + + +def find_tank_by_name(tanks, tank_name): + for tank in tanks: + if tank.metadata.name == tank_name: + return tank + return None + + +def snapshot_all_tanks(tanks, output_dir, filter_list): + with console.status("[bold yellow]Snapshotting all tanks...[/bold yellow]"): + for tank in tanks: + tank_name = tank.metadata.name + chain = tank.metadata.labels["chain"] + snapshot_tank(tank_name, chain, output_dir, filter_list) + console.print("[bold green]All tank snapshots completed.[/bold green]") + + +def snapshot_single_tank(tank_name, tanks, output_dir, filter_list): + tank = find_tank_by_name(tanks, tank_name) + if tank: + chain = tank.metadata.labels["chain"] + snapshot_tank(tank_name, chain, output_dir, filter_list) + else: + console.print(f"[bold red]No active tank found with name: {tank_name}[/bold red]") + + +def select_and_snapshot_tank(tanks, output_dir, filter_list): + table = Table(title="Active Tanks", show_header=True, header_style="bold magenta") + table.add_column("Number", style="cyan", justify="right") + table.add_column("Tank Name", style="green") + + for idx, tank in enumerate(tanks, 1): + table.add_row(str(idx), tank.metadata.name) + + console.print(table) + + choices = [str(i) for i in range(1, len(tanks) + 1)] + ["q"] + choice = Prompt.ask( + "[bold yellow]Enter the number of the tank to snapshot, or 'q' to quit[/bold yellow]", + choices=choices, + show_choices=False, + ) + + if choice == "q": + console.print("[bold blue]Operation cancelled.[/bold blue]") + return + + selected_tank = tanks[int(choice) - 1] + tank_name = selected_tank.metadata.name + chain = selected_tank.metadata.labels["chain"] + snapshot_tank(tank_name, chain, output_dir, filter_list) + + +def snapshot_tank(tank_name, chain, output_dir, filter_list): + try: + output_path = Path(output_dir).resolve() + snapshot_bitcoin_datadir(tank_name, chain, str(output_path), filter_list) + console.print( + f"[bold green]Successfully created snapshot for tank: {tank_name}[/bold green]" + ) + except Exception as e: + console.print( + f"[bold red]Failed to create snapshot for tank {tank_name}: {str(e)}[/bold red]" + ) diff --git a/src/warnet/main.py b/src/warnet/main.py index 28b933ef2..341d5757e 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -2,7 +2,7 @@ from .admin import admin from .bitcoin import bitcoin -from .control import down, logs, run, stop +from .control import down, logs, run, snapshot, stop from .deploy import deploy from .graph import create, graph from .image import image @@ -28,6 +28,7 @@ def cli(): cli.add_command(new) cli.add_command(run) cli.add_command(setup) +cli.add_command(snapshot) cli.add_command(status) cli.add_command(stop) cli.add_command(create) From 366465c99dae07d2e1c4880702b6a7ef6d2bd345 Mon Sep 17 00:00:00 2001 From: josibake Date: Wed, 11 Sep 2024 17:16:49 +0200 Subject: [PATCH 241/710] docs: add snapshot doc --- docs/snapshots.md | 94 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 docs/snapshots.md diff --git a/docs/snapshots.md b/docs/snapshots.md new file mode 100644 index 000000000..ab0001fbd --- /dev/null +++ b/docs/snapshots.md @@ -0,0 +1,94 @@ +# Creating and loading warnet snapshots + +The `snapshot` command allows users to create snapshots of Bitcoin data directories from active bitcoin nodes. These snapshots can be used for backup purposes, to recreate specific network states, or to quickly initialize new bitcoin nodes with existing data. + +## Usage Examples + +### Snapshot a Specific bitcoin node + +To create a snapshot of a specific bitcoin node: + +```bash +warnet snapshot my-node-name -o +``` + +This will create a snapshot of the bitcoin node named "my-node-name" in the ``. If a directory the directory does not exist, it will be created. If no directory is specified, snapshots will be placed in `./warnet-snapshots` by default. + +### Snapshot all nodes + +To snapshot all running bitcoin nodes: + +```bash +warnet snapshot --all -o `` +``` + +### Use Filters + +In the previous examples, everything in the bitcoin datadir was included in the snapshot, e.g., peers.dat. But there maybe use cases where only certain directories are needed. For example, assuming you only want to save the chain up to that point, you can use the filter argument: + +```bash +warnet snapshot my-node-name --filter "chainstate,blocks" +``` + +This will create a snapshot containing only the 'blocks' and 'chainstate' directories. You would only need to snapshot this for a single node since the rest of the nodes will get this data via IBD when this snapshot is later loaded. A few other useful filters are detailed below: + +```bash +# snapshot the logs from all nodes +warnet snapshot --all -f debug.log -o ./node-logs + +# snapshot the chainstate and wallet from a mining node +# this is particularly userful for premining a signet chain that +# can be used later for starting a signet network +warnet snapshot mining-node -f "chainstate,blocks,wallets" + +# snapshot only the wallets from a node +warnet snapshot my-node -f wallets + +# snapshot a specific wallet +warnet snapshot my-node -f mining_wallet +``` + +## End-to-End Example + +Here's a step-by-step guide on how to create a snapshot, upload it, and configure Warnet to use this snapshot when deploying. This particular example is for creating a premined signet chain: + +1. Create a snapshot of the mining node: + ```bash + warnet snapshot miner --output /tmp/snapshots --filter "blocks,chainstate,wallets" + ``` + +2. The snapshot will be created as a tar.gz file in the specified output directory. The filename will be in the format `{node_name}_{chain}_bitcoin_data.tar.gz`, i.e., `miner_bitcoin_data.tar.gz`. + +3. Upload the snapshot to a location accessible by your Kubernetes cluster. This could be a cloud storage service like AWS S3, Google Cloud Storage, or a GitHub repository. If working in a warnet project directory, you can commit your snapshot in a `snapshots/` folder. + +4. Note the URL of the uploaded snapshot, e.g., `https://github.com/your-username/your-repo/raw/main/my-warnet-project/snapshots/miner_bitcoin_data.tar.gz` + +5. Update your Warnet configuration to use this snapshot. This involves modifying your `network.yaml` configuration file. Here's an example of how to configure the mining node to use the snapshot: + + ```yaml + nodes: + - name: miner + image: + tag: "27.0" + loadSnapshot: + enabled: true + url: "https://github.com/your-username/your-repo/raw/main/snapshots/miner_bitcoin_data.tar.gz" + # ... other nodes ... + ``` + +6. Deploy Warnet with the updated configuration: + ```bash + warnet deploy networks/your_cool_network/network.yaml + ``` + +7. Warnet will now use the uploaded snapshot to initialize the Bitcoin data directory when creating the "miner" node. In this particular example, the blocks will then be distibuted to the other nodes via IBD and the mining node can resume signet mining off the chaintip by loading the wallet from the snapshot: + ```bash + warnet bitcoin rpc miner loadwallet mining_wallet + ``` + +## Notes + +- Snapshots are specific to the chain (signet, regtest) of the bitcoin node they were created from. Ensure you're using snapshots with the correct network when deploying. +- Large snapshots may take considerable time to upload and download. Consider using filters to reduce snapshot size if you don't need the entire data directory. +- Ensure that your Kubernetes cluster has the necessary permissions to access the location where you've uploaded the snapshot. +- When using GitHub to host snapshots, make sure to use the "raw" URL of the file for direct download. From 6ac8f9f7f42c8505e5549310671e9211eca1c78f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 11 Sep 2024 11:58:45 -0400 Subject: [PATCH 242/710] scenarios: add recon scenario (#577) * scenarios: add recon scenario * scenario: support signet in recon * scenarios: support signet in recon and test * lint --- resources/scenarios/reconnaissance.py | 90 +++++++++++++++++++++++++++ test/scenarios_test.py | 19 +++--- test/signet_test.py | 14 +++++ 3 files changed, 116 insertions(+), 7 deletions(-) create mode 100755 resources/scenarios/reconnaissance.py diff --git a/resources/scenarios/reconnaissance.py b/resources/scenarios/reconnaissance.py new file mode 100755 index 000000000..1440b119b --- /dev/null +++ b/resources/scenarios/reconnaissance.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +import socket + +# The base class exists inside the commander container when deployed, +# but requires a relative path inside the python source code for other functions. +try: + from commander import Commander +except ImportError: + from resources.scenarios.commander import Commander + +# The entire Bitcoin Core test_framework directory is available as a library +from test_framework.messages import MSG_TX, CInv, hash256, msg_getdata +from test_framework.p2p import MAGIC_BYTES, P2PInterface + + +# This message is provided to the user when they describe the scenario +def cli_help(): + return "Demonstrate network reconnaissance using a scenario and P2PInterface" + + +def get_signet_network_magic_from_node(node): + template = node.getblocktemplate({"rules": ["segwit", "signet"]}) + challenge = template["signet_challenge"] + challenge_bytes = bytes.fromhex(challenge) + data = len(challenge_bytes).to_bytes() + challenge_bytes + digest = hash256(data) + return digest[0:4] + + +# The actual scenario is a class like a Bitcoin Core functional test. +# Commander is a subclass of BitcoinTestFramework instide Warnet +# that allows to operate on containerized nodes instead of local nodes. +class Reconnaissance(Commander): + def set_test_params(self): + # This setting is ignored but still required as + # a sub-class of BitcoinTestFramework + self.num_nodes = 1 + + # Scenario entrypoint + def run_test(self): + self.log.info("Getting peer info") + + # Just like a typical Bitcoin Core functional test, this executes an + # RPC on a node in the network. The actual node at self.nodes[0] may + # be different depending on the user deploying the scenario. Users in + # Warnet may have different namepsace access but everyone should always + # have access to at least one node. + peerinfo = self.nodes[0].getpeerinfo() + for peer in peerinfo: + # You can print out the the scenario logs with `warnet logs` + # which have a list of all this node's peers' addresses and version + self.log.info(f"{peer['addr']} {peer['subver']}") + + # We pick a node on the network to attack + victim = peerinfo[0] + + # regtest or signet + chain = self.nodes[0].chain + + # The victim's address could be an explicit IP address + # OR a kubernetes hostname (use default chain p2p port) + if ":" in victim["addr"]: + dstaddr = victim["addr"].split(":")[0] + else: + dstaddr = socket.gethostbyname(victim["addr"]) + if chain == "regtest": + dstport = 18444 + if chain == "signet": + dstport = 38333 + MAGIC_BYTES["signet"] = get_signet_network_magic_from_node(self.nodes[0]) + + # Now we will use a python-based Bitcoin p2p node to send very specific, + # unusual or non-standard messages to a "victim" node. + self.log.info(f"Attacking {dstaddr}:{dstport}") + attacker = P2PInterface() + attacker.peer_connect(dstaddr=dstaddr, dstport=dstport, net=chain, timeout_factor=1)() + attacker.wait_until(lambda: attacker.is_connected, check_connected=False) + + # Send a harmless network message we expect a response to and wait for it + # Ask for TX with a 0 hash + msg = msg_getdata() + msg.inv.append(CInv(t=MSG_TX, h=0)) + attacker.send_and_ping(msg) + attacker.wait_until(lambda: attacker.message_count["notfound"] > 0) + self.log.info(f"Got notfound message from {dstaddr}:{dstport}") + + +if __name__ == "__main__": + Reconnaissance().main() diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 8be7f4a14..0765c3ebf 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -31,6 +31,7 @@ def setup_network(self): def test_scenarios(self): self.run_and_check_miner_scenario_from_file() self.run_and_check_scenario_from_file() + self.check_regtest_recon() def scenario_running(self, scenario_name: str): """Check that we are only running a single scenario of the correct name""" @@ -40,15 +41,9 @@ def scenario_running(self, scenario_name: str): def run_and_check_scenario_from_file(self): scenario_file = "test/data/scenario_p2p_interface.py" - - def check_scenario_clean_exit(): - active = scenarios_active() - assert len(active) == 1 - return active[0]["status"] == "succeeded" - self.log.info(f"Running scenario from: {scenario_file}") self.warnet(f"run {scenario_file}") - self.wait_for_predicate(lambda: check_scenario_clean_exit()) + self.wait_for_predicate(self.check_scenario_clean_exit) def run_and_check_miner_scenario_from_file(self): scenario_file = "resources/scenarios/miner_std.py" @@ -59,6 +54,16 @@ def run_and_check_miner_scenario_from_file(self): self.wait_for_predicate(lambda: self.check_blocks(2, start=start)) self.stop_scenario() + def check_regtest_recon(self): + scenario_file = "resources/scenarios/reconnaissance.py" + self.log.info(f"Running scenario from file: {scenario_file}") + self.warnet(f"run {scenario_file}") + self.wait_for_predicate(self.check_scenario_clean_exit) + + def check_scenario_clean_exit(self): + active = scenarios_active() + return all(scenario["status"] == "succeeded" for scenario in active) + def check_blocks(self, target_blocks, start: int = 0): count = int(self.warnet("bitcoin rpc tank-0000 getblockcount")) self.log.debug(f"Current block count: {count}, target: {start + target_blocks}") diff --git a/test/signet_test.py b/test/signet_test.py index 68ec78713..f7ac74ee1 100755 --- a/test/signet_test.py +++ b/test/signet_test.py @@ -6,6 +6,8 @@ from test_base import TestBase +from warnet.status import _get_active_scenarios as scenarios_active + class SignetTest(TestBase): def __init__(self): @@ -19,6 +21,7 @@ def run_test(self): try: self.setup_network() self.check_signet_miner() + self.check_signet_recon() finally: self.cleanup() @@ -46,6 +49,17 @@ def block_one(): self.wait_for_predicate(block_one) + def check_signet_recon(self): + scenario_file = "resources/scenarios/reconnaissance.py" + self.log.info(f"Running scenario from file: {scenario_file}") + self.warnet(f"run {scenario_file}") + + def check_scenario_clean_exit(): + active = scenarios_active() + return all(scenario["status"] == "succeeded" for scenario in active) + + self.wait_for_predicate(check_scenario_clean_exit) + if __name__ == "__main__": test = SignetTest() From cf5e3632cf448e4f04ae5de92d786f6ea416fb7a Mon Sep 17 00:00:00 2001 From: Abubakar Sadiq Ismail Date: Wed, 11 Sep 2024 18:20:00 +0100 Subject: [PATCH 243/710] doc: link `Connecting to local nodes` page correctly (#584) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d1770eaeb..beeb264b1 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Monitoring](/docs/logging_monitoring.md) - [Lightning Network](/docs/lightning.md) - [Scaling](/docs/scaling.md) -- [Connecting to local nodes](https://github.com/bitcoin-dev-project/warnet/blob/main/docs/) +- [Connecting to local nodes](/docs/connecting-local-nodes.md) - [Understanding network configuration](/docs/config.md) - [Contributing](CONTRIBUTING.md) From bc02ce6763dab8488a55392338a573c9ac8fa789 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 09:51:47 +0100 Subject: [PATCH 244/710] add caddy charts --- resources/charts/caddy/.helmignore | 23 ++++ resources/charts/caddy/Chart.yaml | 24 ++++ resources/charts/caddy/templates/NOTES.txt | 1 + resources/charts/caddy/templates/_helpers.tpl | 57 ++++++++ .../charts/caddy/templates/configmap.yaml | 11 ++ resources/charts/caddy/templates/pod.yaml | 38 ++++++ resources/charts/caddy/templates/service.yaml | 16 +++ resources/charts/caddy/values.yaml | 123 ++++++++++++++++++ .../networks/6_node_bitcoin/network.yaml | 4 +- 9 files changed, 296 insertions(+), 1 deletion(-) create mode 100644 resources/charts/caddy/.helmignore create mode 100644 resources/charts/caddy/Chart.yaml create mode 100644 resources/charts/caddy/templates/NOTES.txt create mode 100644 resources/charts/caddy/templates/_helpers.tpl create mode 100644 resources/charts/caddy/templates/configmap.yaml create mode 100644 resources/charts/caddy/templates/pod.yaml create mode 100644 resources/charts/caddy/templates/service.yaml create mode 100644 resources/charts/caddy/values.yaml diff --git a/resources/charts/caddy/.helmignore b/resources/charts/caddy/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/charts/caddy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/charts/caddy/Chart.yaml b/resources/charts/caddy/Chart.yaml new file mode 100644 index 000000000..4fbb87241 --- /dev/null +++ b/resources/charts/caddy/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: caddy-server +description: A Helm chart for Caddy + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/caddy/templates/NOTES.txt b/resources/charts/caddy/templates/NOTES.txt new file mode 100644 index 000000000..9d2cc4cf0 --- /dev/null +++ b/resources/charts/caddy/templates/NOTES.txt @@ -0,0 +1 @@ +Caddy is serving your every need. diff --git a/resources/charts/caddy/templates/_helpers.tpl b/resources/charts/caddy/templates/_helpers.tpl new file mode 100644 index 000000000..7cfc3d479 --- /dev/null +++ b/resources/charts/caddy/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "caddy.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "caddy.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "caddy.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "caddy.labels" -}} +helm.sh/chart: {{ include "caddy.chart" . }} +{{ include "caddy.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "caddy.selectorLabels" -}} +app.kubernetes.io/name: {{ include "caddy.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "caddy.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "caddy.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/caddy/templates/configmap.yaml b/resources/charts/caddy/templates/configmap.yaml new file mode 100644 index 000000000..a80ac8ea9 --- /dev/null +++ b/resources/charts/caddy/templates/configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "caddy.fullname" . }} + labels: + {{- include "caddy.labels" . | nindent 4 }} +data: + Caddyfile: | + {{- .Values.caddyConfig | nindent 4 }} + index: | + {{- .Values.htmlConfig | nindent 4 }} diff --git a/resources/charts/caddy/templates/pod.yaml b/resources/charts/caddy/templates/pod.yaml new file mode 100644 index 000000000..6e034934a --- /dev/null +++ b/resources/charts/caddy/templates/pod.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "caddy.fullname" . }} + labels: + {{- include "caddy.labels" . | nindent 4 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + app: {{ include "caddy.fullname" . }} +spec: + restartPolicy: "{{ .Values.restartPolicy }}" + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 4 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 4 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 8 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: web + containerPort: {{ .Values.port }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 8 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 8 }} + resources: + {{- toYaml .Values.resources | nindent 8 }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 8 }} + volumes: + {{- toYaml .Values.volumes | nindent 4 }} diff --git a/resources/charts/caddy/templates/service.yaml b/resources/charts/caddy/templates/service.yaml new file mode 100644 index 000000000..a25c46946 --- /dev/null +++ b/resources/charts/caddy/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "caddy.fullname" . }} + labels: + {{- include "caddy.labels" . | nindent 4 }} + app: {{ include "caddy.fullname" . }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.port }} + targetPort: web + protocol: TCP + name: http + selector: + {{- include "caddy.selectorLabels" . | nindent 4 }} diff --git a/resources/charts/caddy/values.yaml b/resources/charts/caddy/values.yaml new file mode 100644 index 000000000..509ef9e37 --- /dev/null +++ b/resources/charts/caddy/values.yaml @@ -0,0 +1,123 @@ +# Default values for caddy. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +namespace: warnet + +restartPolicy: Always + +image: + repository: caddy + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "2.8.4" + +imagePullSecrets: [] + +nameOverride: "" + +fullnameOverride: "" + +podLabels: + app: "warnet" + mission: "proxy" + +podSecurityContext: {} +# fsGroup: 2000 + +securityContext: {} +# capabilities: +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsNonRoot: true +# runAsUser: 1000 + +service: + type: ClusterIP + +resources: {} +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +livenessProbe: + httpGet: + path: /live + port: 80 + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + +readinessProbe: + httpGet: + path: /ready + port: 80 + failureThreshold: 1 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 1 + +volumes: + - name: caddy-config + configMap: + name: caddy + items: + - key: Caddyfile + path: Caddyfile + - key: index + path: index + +volumeMounts: + - name: caddy-config + mountPath: /etc/caddy/Caddyfile + subPath: Caddyfile + - name: caddy-config + mountPath: /usr/share/caddy/index.html + subPath: index + +port: 80 + +caddyConfig: | + :80 { + respond /live 200 + respond /ready 200 + + root * /usr/share/caddy + file_server + + handle_path /fork-observer/* { + reverse_proxy fork-observer:2323 + } + + handle_path /grafana/* { + reverse_proxy grafana:3000 + } + + } + +htmlConfig: | + + + + + + Welcome + + +

Welcome to the Warnet dashboard

+

You can access the following services:

+ + + diff --git a/resources/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml index 6103d8a9b..21b05875d 100644 --- a/resources/networks/6_node_bitcoin/network.yaml +++ b/resources/networks/6_node_bitcoin/network.yaml @@ -29,4 +29,6 @@ nodes: - tank-0006 - name: tank-0006 fork_observer: - enabled: false + enabled: true +caddy: + enabled: true From 1429a316b8c4f8523cddbfd96262e8d768c7b08a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 09:52:14 +0100 Subject: [PATCH 245/710] deploy caddy --- src/warnet/constants.py | 3 +++ src/warnet/deploy.py | 21 +++++++++++++++++++++ src/warnet/network.py | 11 +++++++++++ 3 files changed, 35 insertions(+) diff --git a/src/warnet/constants.py b/src/warnet/constants.py index bdd9dce9d..aaabe5258 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -32,6 +32,9 @@ FORK_OBSERVER_CHART = str(CHARTS_DIR.joinpath("fork-observer")) COMMANDER_CHART = str(CHARTS_DIR.joinpath("commander")) NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") +FORK_OBSERVER_CHART = str(files("resources.charts").joinpath("fork-observer")) +CADDY_CHART = str(files("resources.charts").joinpath("caddy")) + DEFAULT_NETWORK = Path("6_node_bitcoin") DEFAULT_NAMESPACES = Path("two_namespaces_two_users") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 7154c7b0b..7ee919f9c 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -6,6 +6,7 @@ from .constants import ( BITCOIN_CHART_LOCATION, + CADDY_CHART, DEFAULTS_FILE, DEFAULTS_NAMESPACE_FILE, FORK_OBSERVER_CHART, @@ -45,6 +46,7 @@ def deploy(directory, debug): deploy_logging_stack(directory, debug) deploy_network(directory, debug) deploy_fork_observer(directory, debug) + deploy_caddy(directory, debug) elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: @@ -90,6 +92,25 @@ def deploy_logging_stack(directory: Path, debug: bool): return True +def deploy_caddy(directory: Path, debug: bool): + network_file_path = directory / NETWORK_FILE + with network_file_path.open() as f: + network_file = yaml.safe_load(f) + + # Only start if configured in the network file + if not network_file.get("fork_observer", {}).get("enabled", False): + return + + namespace = get_default_namespace() + cmd = f"{HELM_COMMAND} 'caddy' {CADDY_CHART} --namespace {namespace}" + if debug: + cmd += " --debug" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") + return + + def deploy_fork_observer(directory: Path, debug: bool): network_file_path = directory / NETWORK_FILE with network_file_path.open() as f: diff --git a/src/warnet/network.py b/src/warnet/network.py index fcb5a5ee7..c4950861c 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -6,6 +6,8 @@ from .bitcoin import _rpc from .constants import ( + CHARTS_DIR, + DEFAULT_NETWORK, NETWORK_DIR, SCENARIOS_DIR, ) @@ -41,6 +43,15 @@ def copy_network_defaults(directory: Path): NETWORK_DIR, ["node-defaults.yaml", "__pycache__", "__init__.py"], ) + # Copy caddy files to the network directory + networks_dir = directory / NETWORK_DIR.name + copy_caddy_files(networks_dir / DEFAULT_NETWORK) + + +def copy_caddy_files(directory: Path): + """Copy caddy files to the specified directory""" + copy_defaults(directory, "caddy", CHARTS_DIR.joinpath("caddy"), []) + print(f"Copied caddy files to {directory / 'caddy'}") def copy_scenario_defaults(directory: Path): From 258fb0aca67560d7cce46ccf75b41cac09def2aa Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 11:34:21 +0100 Subject: [PATCH 246/710] auto port-forward and teardown --- src/warnet/control.py | 3 +++ src/warnet/deploy.py | 51 +++++++++++++++++++++++++++++++++++++++++-- src/warnet/k8s.py | 20 ++++++++++++++++- 3 files changed, 71 insertions(+), 3 deletions(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 5c35b131a..5faf30762 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -15,6 +15,7 @@ from rich.table import Table from .constants import COMMANDER_CHART +from .deploy import _port_stop_internal from .k8s import ( get_default_namespace, get_mission, @@ -162,6 +163,8 @@ def delete_pod(pod_name, namespace): for future in as_completed(futures): console.print(f"[yellow]{future.result()}[/yellow]") + # Shutdown any port forwarding + _port_stop_internal() console.print("[bold yellow]Teardown process initiated for all components.[/bold yellow]") console.print("[bold yellow]Note: Some processes may continue in the background.[/bold yellow]") console.print("[bold green]Warnet teardown process completed.[/bold green]") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 7ee919f9c..33d4eaac4 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -1,3 +1,6 @@ +import os +import subprocess +import sys import tempfile from pathlib import Path @@ -16,7 +19,7 @@ NAMESPACES_FILE, NETWORK_FILE, ) -from .k8s import get_default_namespace +from .k8s import get_default_namespace, wait_for_caddy_ready from .process import stream_command @@ -102,7 +105,8 @@ def deploy_caddy(directory: Path, debug: bool): return namespace = get_default_namespace() - cmd = f"{HELM_COMMAND} 'caddy' {CADDY_CHART} --namespace {namespace}" + name = "caddy" + cmd = f"{HELM_COMMAND} {name} {CADDY_CHART} --namespace {namespace}" if debug: cmd += " --debug" @@ -110,6 +114,9 @@ def deploy_caddy(directory: Path, debug: bool): click.echo(f"Failed to run Helm command: {cmd}") return + wait_for_caddy_ready(name, namespace) + _port_start_internal() + def deploy_fork_observer(directory: Path, debug: bool): network_file_path = directory / NETWORK_FILE @@ -242,3 +249,43 @@ def deploy_namespaces(directory: Path): finally: if temp_override_file_path.exists(): temp_override_file_path.unlink() + + +def is_windows(): + return sys.platform.startswith("win") + + +def run_detached_process(command): + if is_windows(): + # For Windows, use CREATE_NEW_PROCESS_GROUP and DETACHED_PROCESS + subprocess.Popen( + command, + shell=True, + stdin=None, + stdout=None, + stderr=None, + close_fds=True, + creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.DETACHED_PROCESS, + ) + else: + # For Unix-like systems, use nohup and redirect output + command = f"nohup {command} > /dev/null 2>&1 &" + subprocess.Popen(command, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) + + print(f"Started detached process: {command}") + + +def _port_start_internal(): + command = "kubectl port-forward service/caddy 2019:80" + run_detached_process(command) + click.echo( + "Port forwarding on port 2019 started in the background. To access landing page visit localhost:2019." + ) + + +def _port_stop_internal(): + if is_windows(): + os.system("taskkill /F /IM kubectl.exe") + else: + os.system("pkill -f 'kubectl port-forward service/caddy-service 2019:80'") + click.echo("Port forwarding stopped.") diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index bbe32f4ac..62a918320 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -4,7 +4,7 @@ from pathlib import Path import yaml -from kubernetes import client, config +from kubernetes import client, config, watch from kubernetes.client.models import CoreV1Event, V1PodList from kubernetes.dynamic import DynamicClient from kubernetes.stream import stream @@ -227,3 +227,21 @@ def snapshot_bitcoin_datadir( except Exception as e: print(f"An error occurred: {str(e)}") + + +def wait_for_caddy_ready(name, namespace, timeout=300): + sclient = get_static_client() + w = watch.Watch() + for event in w.stream( + sclient.list_namespaced_pod, namespace=namespace, timeout_seconds=timeout + ): + pod = event["object"] + if pod.metadata.name == name and pod.status.phase == "Running": + conditions = pod.status.conditions or [] + ready_condition = next((c for c in conditions if c.type == "Ready"), None) + if ready_condition and ready_condition.status == "True": + print(f"Caddy pod {name} is ready.") + w.stop() + return True + print(f"Timeout waiting for Caddy pod {name} to be ready.") + return False From a28d262fdce857309bd78ada91f66c1283fad04a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 11:35:53 +0100 Subject: [PATCH 247/710] document FO access via landing page --- docs/quickstart.md | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 docs/quickstart.md diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 000000000..8aea137b3 --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,51 @@ +# Quick run + +## Installation + +Either install warnet via pip, or clone the source and install: + +### via pip + +You can install warnet via `pip` into a virtual environment with + +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install warnet +``` + +### via cloned source + +You can install warnet from source into a virtual environment with + +```bash +git clone https://github.com/bitcoin-dev-project/warnet.git +cd warnet +python3 -m venv .venv +source .venv/bin/activate +pip install -e . +``` + +## Running + +To get started first check you have all the necessary requirements: + +```bash +warnet setup +``` + +Then create your first network: + +```bash +# Create a new network in the current directory +warnet init + +# Or in a directory of choice +warnet new +``` + +Follow the guide to configure network variables. + +## fork-observer + +If you enabled [fork-observer](https://github.com/0xB10C/fork-observer), it will be available from the landing page at `localhost:2019`. From 5df196d18344e0bf8becf1f7875dd76a3c85013c Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 13:55:52 +0100 Subject: [PATCH 248/710] move fork-observer and caddy into warnet-logging namespace --- src/warnet/constants.py | 1 + src/warnet/control.py | 6 ++-- src/warnet/deploy.py | 24 +++++++++------- src/warnet/graph.py | 5 ++++ src/warnet/project.py | 62 ----------------------------------------- 5 files changed, 23 insertions(+), 75 deletions(-) diff --git a/src/warnet/constants.py b/src/warnet/constants.py index aaabe5258..3f837328d 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -12,6 +12,7 @@ ] DEFAULT_NAMESPACE = "warnet" +LOGGING_NAMESPACE = "warnet-logging" HELM_COMMAND = "helm upgrade --install --create-namespace" # Directories and files for non-python assets, e.g., helm charts, example scenarios, default configs diff --git a/src/warnet/control.py b/src/warnet/control.py index 5faf30762..aa5991a92 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -14,7 +14,7 @@ from rich.prompt import Confirm, Prompt from rich.table import Table -from .constants import COMMANDER_CHART +from .constants import COMMANDER_CHART, LOGGING_NAMESPACE from .deploy import _port_stop_internal from .k8s import ( get_default_namespace, @@ -130,7 +130,7 @@ def down(): """Bring down a running warnet quickly""" console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") - namespaces = [get_default_namespace(), "warnet-logging"] + namespaces = [get_default_namespace(), LOGGING_NAMESPACE] def uninstall_release(namespace, release_name): cmd = f"helm uninstall {release_name} --namespace {namespace} --wait=false" @@ -164,7 +164,7 @@ def delete_pod(pod_name, namespace): console.print(f"[yellow]{future.result()}[/yellow]") # Shutdown any port forwarding - _port_stop_internal() + _port_stop_internal("caddy", namespaces[1]) console.print("[bold yellow]Teardown process initiated for all components.[/bold yellow]") console.print("[bold yellow]Note: Some processes may continue in the background.[/bold yellow]") console.print("[bold green]Warnet teardown process completed.[/bold green]") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 33d4eaac4..3453db3b2 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -15,6 +15,7 @@ FORK_OBSERVER_CHART, HELM_COMMAND, LOGGING_HELM_COMMANDS, + LOGGING_NAMESPACE, NAMESPACES_CHART_LOCATION, NAMESPACES_FILE, NETWORK_FILE, @@ -100,12 +101,14 @@ def deploy_caddy(directory: Path, debug: bool): with network_file_path.open() as f: network_file = yaml.safe_load(f) + namespace = LOGGING_NAMESPACE + # TODO: get this from the helm chart + name = "caddy" + # Only start if configured in the network file - if not network_file.get("fork_observer", {}).get("enabled", False): + if not network_file.get(name, {}).get("enabled", False): return - namespace = get_default_namespace() - name = "caddy" cmd = f"{HELM_COMMAND} {name} {CADDY_CHART} --namespace {namespace}" if debug: cmd += " --debug" @@ -115,7 +118,7 @@ def deploy_caddy(directory: Path, debug: bool): return wait_for_caddy_ready(name, namespace) - _port_start_internal() + _port_start_internal(name, namespace) def deploy_fork_observer(directory: Path, debug: bool): @@ -127,7 +130,8 @@ def deploy_fork_observer(directory: Path, debug: bool): if not network_file.get("fork_observer", {}).get("enabled", False): return - namespace = get_default_namespace() + default_namespace = get_default_namespace() + namespace = LOGGING_NAMESPACE cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace}" if debug: cmd += " --debug" @@ -143,7 +147,7 @@ def deploy_fork_observer(directory: Path, debug: bool): id = {i} name = "{node_name}" description = "A node. Just A node." -rpc_host = "{node_name}" +rpc_host = "{node_name}.{default_namespace}.svc" rpc_port = 18443 rpc_user = "forkobserver" rpc_password = "tabconf2024" @@ -275,17 +279,17 @@ def run_detached_process(command): print(f"Started detached process: {command}") -def _port_start_internal(): - command = "kubectl port-forward service/caddy 2019:80" +def _port_start_internal(name, namespace): + command = f"kubectl port-forward -n {namespace} service/{name} 2019:80" run_detached_process(command) click.echo( "Port forwarding on port 2019 started in the background. To access landing page visit localhost:2019." ) -def _port_stop_internal(): +def _port_stop_internal(name, namespace): if is_windows(): os.system("taskkill /F /IM kubectl.exe") else: - os.system("pkill -f 'kubectl port-forward service/caddy-service 2019:80'") + os.system(f"pkill -f 'kubectl port-forward -n {namespace} service/{name} 2019:80'") click.echo("Port forwarding stopped.") diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 34b98960d..2e1e31a3e 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -33,6 +33,7 @@ def custom_graph( datadir: Path, fork_observer: bool, fork_obs_query_interval: int, + caddy: bool, ): datadir.mkdir(parents=False, exist_ok=False) # Generate network.yaml @@ -68,6 +69,9 @@ def custom_graph( "enabled": fork_observer, "configQueryInterval": fork_obs_query_interval, } + network_yaml_data["caddy"] = { + "enabled": caddy, + } with open(os.path.join(datadir, "network.yaml"), "w") as f: yaml.dump(network_yaml_data, f, default_flow_style=False) @@ -180,6 +184,7 @@ def inquirer_create_network(project_path: Path): custom_network_path, fork_observer, fork_observer_query_interval, + fork_observer, # This enables caddy whenever fork-observer is enabled ) return custom_network_path diff --git a/src/warnet/project.py b/src/warnet/project.py index 33fb8ce47..4b54e2bd3 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -1,17 +1,14 @@ import os import platform -import random import subprocess import sys from dataclasses import dataclass from enum import Enum, auto -from importlib.resources import files from pathlib import Path from typing import Callable import click import inquirer -import yaml from .graph import inquirer_create_network from .network import copy_network_defaults, copy_scenario_defaults @@ -319,62 +316,3 @@ def init(): """Initialize a warnet project in the current directory""" current_dir = Path.cwd() new_internal(directory=current_dir, from_init=True) - - -def custom_graph( - num_nodes: int, - num_connections: int, - version: str, - datadir: Path, - fork_observer: bool, - fork_obs_query_interval: int, -): - datadir.mkdir(parents=False, exist_ok=False) - # Generate network.yaml - nodes = [] - connections = set() - - for i in range(num_nodes): - node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} - - # Add round-robin connection - next_node = (i + 1) % num_nodes - node["connect"].append(f"tank-{next_node:04d}") - connections.add((i, next_node)) - - # Add random connections - available_nodes = list(range(num_nodes)) - available_nodes.remove(i) - if next_node in available_nodes: - available_nodes.remove(next_node) - - for _ in range(min(num_connections - 1, len(available_nodes))): - random_node = random.choice(available_nodes) - # Avoid circular loops of A -> B -> A - if (random_node, i) not in connections: - node["connect"].append(f"tank-{random_node:04d}") - connections.add((i, random_node)) - available_nodes.remove(random_node) - - nodes.append(node) - - network_yaml_data = {"nodes": nodes} - network_yaml_data["fork_observer"] = { - "enabled": fork_observer, - "configQueryInterval": fork_obs_query_interval, - } - - with open(os.path.join(datadir, "network.yaml"), "w") as f: - yaml.dump(network_yaml_data, f, default_flow_style=False) - - # Generate node-defaults.yaml - default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") - with open(str(default_yaml_path)) as f: - defaults_yaml_content = f.read() - - with open(os.path.join(datadir, "node-defaults.yaml"), "w") as f: - f.write(defaults_yaml_content) - - click.echo( - f"Project '{datadir}' has been created with 'network.yaml' and 'node-defaults.yaml'." - ) From 31c08e1d64dc1d5a70b9c9591cc348aff54399f4 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 19:29:01 +0100 Subject: [PATCH 249/710] deploy caddy if either fork-observer or logging enabled --- src/warnet/deploy.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 3453db3b2..a4a8970f1 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -47,10 +47,11 @@ def deploy(directory, debug): directory = Path(directory) if (directory / NETWORK_FILE).exists(): - deploy_logging_stack(directory, debug) + dl = deploy_logging_stack(directory, debug) deploy_network(directory, debug) - deploy_fork_observer(directory, debug) - deploy_caddy(directory, debug) + df = deploy_fork_observer(directory, debug) + if dl | df: + deploy_caddy(directory, debug) elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: @@ -83,9 +84,9 @@ def check_logging_required(directory: Path): return False -def deploy_logging_stack(directory: Path, debug: bool): +def deploy_logging_stack(directory: Path, debug: bool) -> bool: if not check_logging_required(directory): - return + return False click.echo("Found collectLogs or metricsExport in network definition, Deploying logging stack") @@ -121,14 +122,14 @@ def deploy_caddy(directory: Path, debug: bool): _port_start_internal(name, namespace) -def deploy_fork_observer(directory: Path, debug: bool): +def deploy_fork_observer(directory: Path, debug: bool) -> bool: network_file_path = directory / NETWORK_FILE with network_file_path.open() as f: network_file = yaml.safe_load(f) # Only start if configured in the network file if not network_file.get("fork_observer", {}).get("enabled", False): - return + return False default_namespace = get_default_namespace() namespace = LOGGING_NAMESPACE @@ -170,7 +171,8 @@ def deploy_fork_observer(directory: Path, debug: bool): if not stream_command(cmd): click.echo(f"Failed to run Helm command: {cmd}") - return + return False + return True def deploy_network(directory: Path, debug: bool = False): From 4879b0342ed90e65ee5bd524b70039d8f9792598 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 22:08:04 +0100 Subject: [PATCH 250/710] add warnet dashboard --- src/warnet/dashboard.py | 11 +++++++++++ src/warnet/deploy.py | 6 +++--- src/warnet/main.py | 2 ++ 3 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 src/warnet/dashboard.py diff --git a/src/warnet/dashboard.py b/src/warnet/dashboard.py new file mode 100644 index 000000000..28ac0cba6 --- /dev/null +++ b/src/warnet/dashboard.py @@ -0,0 +1,11 @@ +import click + + +@click.command() +def dashboard(): + """Open the Warnet dashboard in default browser""" + import webbrowser + + url = "http://localhost:2019" + webbrowser.open(url) + click.echo("warnet dashboard opened in default browser") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index a4a8970f1..0bcbd2c0f 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -282,11 +282,11 @@ def run_detached_process(command): def _port_start_internal(name, namespace): + click.echo("Starting port-forwarding to warnet dashboard") command = f"kubectl port-forward -n {namespace} service/{name} 2019:80" run_detached_process(command) - click.echo( - "Port forwarding on port 2019 started in the background. To access landing page visit localhost:2019." - ) + click.echo("Port forwarding on port 2019 started in the background.") + click.echo("\nTo access the warnet dashboard visit localhost:2019 or run:\n warnet dashboard") def _port_stop_internal(name, namespace): diff --git a/src/warnet/main.py b/src/warnet/main.py index 341d5757e..76893575c 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -3,6 +3,7 @@ from .admin import admin from .bitcoin import bitcoin from .control import down, logs, run, snapshot, stop +from .dashboard import dashboard from .deploy import deploy from .graph import create, graph from .image import image @@ -21,6 +22,7 @@ def cli(): cli.add_command(bitcoin) cli.add_command(deploy) cli.add_command(down) +cli.add_command(dashboard) cli.add_command(graph) cli.add_command(image) cli.add_command(init) From 48acddeff5e1ad17cf51cd5b873c04c2af40c5c3 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Sep 2024 22:17:44 +0100 Subject: [PATCH 251/710] test fork-observer access via caddy dashboard --- test/data/services/network.yaml | 2 ++ test/services_test.py | 17 ++++------------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/test/data/services/network.yaml b/test/data/services/network.yaml index 6c19027a2..d523fbf97 100644 --- a/test/data/services/network.yaml +++ b/test/data/services/network.yaml @@ -33,3 +33,5 @@ nodes: rpcwhitelistdefault=0 fork_observer: enabled: true +caddy: + enabled: true diff --git a/test/services_test.py b/test/services_test.py index 59048d3ce..a80717db9 100755 --- a/test/services_test.py +++ b/test/services_test.py @@ -2,7 +2,6 @@ import os from pathlib import Path -from subprocess import PIPE, Popen import requests from test_base import TestBase @@ -30,21 +29,14 @@ def check_fork_observer(self): self.log.info("Creating chain split") self.warnet("bitcoin rpc john createwallet miner") self.warnet("bitcoin rpc john -generate 1") - self.log.info("Forwarding port 2323...") - # Stays alive in background - self.fo_port_fwd_process = Popen( - ["kubectl", "port-forward", "fork-observer", "2323"], - stdout=PIPE, - stderr=PIPE, - bufsize=1, - universal_newlines=True, - ) + # Port will be auto-forwarded by `warnet deploy`, routed through the enabled Caddy pod def call_fo_api(): + fo_root = "http://localhost:2019/fork-observer" try: - fo_res = requests.get("http://localhost:2323/api/networks.json") + fo_res = requests.get(f"{fo_root}/api/networks.json") network_id = fo_res.json()["networks"][0]["id"] - fo_data = requests.get(f"http://localhost:2323/api/{network_id}/data.json") + fo_data = requests.get(f"{fo_root}/api/{network_id}/data.json") # fork observed! return len(fo_data.json()["header_infos"]) == 2 except Exception as e: @@ -54,7 +46,6 @@ def call_fo_api(): self.wait_for_predicate(call_fo_api) self.log.info("Fork observed!") - self.fo_port_fwd_process.terminate() if __name__ == "__main__": From 121d303b9fc5bc7220f636e38e74796ec17e8b33 Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Fri, 6 Sep 2024 11:19:49 +0100 Subject: [PATCH 252/710] grafana running behind caddy --- resources/charts/caddy/values.yaml | 2 +- resources/manifests/grafana_values.yaml | 3 +++ src/warnet/graph.py | 2 +- test/data/logging/network.yaml | 4 +++- test/logging_test.py | 8 +++++--- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/resources/charts/caddy/values.yaml b/resources/charts/caddy/values.yaml index 509ef9e37..01338e9ac 100644 --- a/resources/charts/caddy/values.yaml +++ b/resources/charts/caddy/values.yaml @@ -99,7 +99,7 @@ caddyConfig: | } handle_path /grafana/* { - reverse_proxy grafana:3000 + reverse_proxy loki-grafana:80 } } diff --git a/resources/manifests/grafana_values.yaml b/resources/manifests/grafana_values.yaml index 110622911..a6d16e3ab 100644 --- a/resources/manifests/grafana_values.yaml +++ b/resources/manifests/grafana_values.yaml @@ -16,6 +16,9 @@ grafana.ini: auth: disable_login_form: true disable_signout_menu: true + server: + # this is required to use Grafana behind a reverse proxy (caddy) + root_url: "%(protocol)s://%(domain)s:%(http_port)s/grafana/" auth.anonymous: enabled: true org_name: Main Org. diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 2e1e31a3e..5ddaad417 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -184,7 +184,7 @@ def inquirer_create_network(project_path: Path): custom_network_path, fork_observer, fork_observer_query_interval, - fork_observer, # This enables caddy whenever fork-observer is enabled + fork_observer, # This enables caddy whenever fork-observer is enabled ) return custom_network_path diff --git a/test/data/logging/network.yaml b/test/data/logging/network.yaml index 59de12158..a06a5ea24 100644 --- a/test/data/logging/network.yaml +++ b/test/data/logging/network.yaml @@ -10,4 +10,6 @@ nodes: metrics: txrate=getchaintxstats(10)["txrate"] - name: tank-0002 connect: - - tank-0000 \ No newline at end of file + - tank-0000 +caddy: + enabled: true \ No newline at end of file diff --git a/test/logging_test.py b/test/logging_test.py index f5c7134d4..218f51380 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -10,6 +10,8 @@ import requests from test_base import TestBase +GRAFANA_URL = "http://localhost:2019/grafana/" + class LoggingTest(TestBase): def __init__(self): @@ -60,7 +62,7 @@ def wait_for_endpoint_ready(self): def check_endpoint(): try: - response = requests.get("http://localhost:3000/login") + response = requests.get(f"{GRAFANA_URL}login") return response.status_code == 200 except requests.RequestException: return False @@ -75,7 +77,7 @@ def make_grafana_api_request(self, ds_uid, start, metric): "from": f"{start}", "to": "now", } - reply = requests.post("http://localhost:3000/api/ds/query", json=data) + reply = requests.post(f"{GRAFANA_URL}api/ds/query", json=data) if reply.status_code != 200: self.log.error(f"Grafana API request failed with status code {reply.status_code}") self.log.error(f"Response content: {reply.text}") @@ -92,7 +94,7 @@ def test_prometheus_and_grafana(self): self.warnet(f"run {miner_file} --allnodes --interval=5 --mature") self.warnet(f"run {tx_flood_file} --interval=1") - prometheus_ds = requests.get("http://localhost:3000/api/datasources/name/Prometheus") + prometheus_ds = requests.get(f"{GRAFANA_URL}api/datasources/name/Prometheus") assert prometheus_ds.status_code == 200 prometheus_uid = prometheus_ds.json()["uid"] self.log.info(f"Got Prometheus data source uid from Grafana: {prometheus_uid}") From 20b97a0a78a1788ea93a4ea51330d6b58d758f5b Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 19:30:43 +0100 Subject: [PATCH 253/710] Update fork-observer chat note --- resources/charts/fork-observer/templates/NOTES.txt | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/resources/charts/fork-observer/templates/NOTES.txt b/resources/charts/fork-observer/templates/NOTES.txt index 9894b7843..36a37bdc8 100644 --- a/resources/charts/fork-observer/templates/NOTES.txt +++ b/resources/charts/fork-observer/templates/NOTES.txt @@ -1,5 +1 @@ -To view forkobserver you must forward the port from the cluster to your local machine - -kubectl port-forward fork-observer 2323 - -fork-observer will then be available at web address: http://localhost:2323 \ No newline at end of file +Fork observer enabled. From 13f14c3d94ffe23911d21f65938ceaf03682d742 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 19:37:43 +0100 Subject: [PATCH 254/710] dont' copy caddy files to project dir --- src/warnet/network.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/warnet/network.py b/src/warnet/network.py index c4950861c..fcb5a5ee7 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -6,8 +6,6 @@ from .bitcoin import _rpc from .constants import ( - CHARTS_DIR, - DEFAULT_NETWORK, NETWORK_DIR, SCENARIOS_DIR, ) @@ -43,15 +41,6 @@ def copy_network_defaults(directory: Path): NETWORK_DIR, ["node-defaults.yaml", "__pycache__", "__init__.py"], ) - # Copy caddy files to the network directory - networks_dir = directory / NETWORK_DIR.name - copy_caddy_files(networks_dir / DEFAULT_NETWORK) - - -def copy_caddy_files(directory: Path): - """Copy caddy files to the specified directory""" - copy_defaults(directory, "caddy", CHARTS_DIR.joinpath("caddy"), []) - print(f"Copied caddy files to {directory / 'caddy'}") def copy_scenario_defaults(directory: Path): From e75cace74d1e5b594471136e09cfc2f80cf2b137 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 19:55:30 +0100 Subject: [PATCH 255/710] logging: add to create inquirer --- src/warnet/graph.py | 20 +++++++++++++++++--- test/graph_test.py | 2 ++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 5ddaad417..0f25b9d02 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -34,6 +34,7 @@ def custom_graph( fork_observer: bool, fork_obs_query_interval: int, caddy: bool, + logging: bool, ): datadir.mkdir(parents=False, exist_ok=False) # Generate network.yaml @@ -79,10 +80,13 @@ def custom_graph( # Generate node-defaults.yaml default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") with open(str(default_yaml_path)) as f: - defaults_yaml_content = f.read() + defaults_yaml_content = yaml.safe_load(f) + + # Configure logging + defaults_yaml_content["collectLogs"] = logging with open(os.path.join(datadir, "node-defaults.yaml"), "w") as f: - f.write(defaults_yaml_content) + yaml.dump(defaults_yaml_content, f, default_flow_style=False, sort_keys=False) click.echo( f"Project '{datadir}' has been created with 'network.yaml' and 'node-defaults.yaml'." @@ -175,6 +179,15 @@ def inquirer_create_network(project_path: Path): type=int, default=20, ) + + logging = click.prompt( + click.style( + "\nWould you like to enable grafana logging on the network?", fg="blue", bold=True + ), + type=bool, + default=False, + ) + caddy = fork_observer | logging custom_network_path = project_path / "networks" / net_answers["network_name"] click.secho("\nGenerating custom network...", fg="yellow", bold=True) custom_graph( @@ -184,7 +197,8 @@ def inquirer_create_network(project_path: Path): custom_network_path, fork_observer, fork_observer_query_interval, - fork_observer, # This enables caddy whenever fork-observer is enabled + caddy, + logging, ) return custom_network_path diff --git a/test/graph_test.py b/test/graph_test.py index 5cc3200a6..482c555ab 100755 --- a/test/graph_test.py +++ b/test/graph_test.py @@ -40,6 +40,8 @@ def directory_exists(self): self.sut.sendline("") self.sut.expect("seconds", timeout=10) self.sut.sendline("") + self.sut.expect("enable grafana", timeout=10) + self.sut.sendline("") self.sut.expect("successfully", timeout=50) From 3a4606434d0b83574589b8b10770f0b90106d156 Mon Sep 17 00:00:00 2001 From: Grant Date: Wed, 11 Sep 2024 15:19:05 -0400 Subject: [PATCH 256/710] check if docker and minikube are running --- src/warnet/project.py | 45 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/src/warnet/project.py b/src/warnet/project.py index 33fb8ce47..0ec22e8a2 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -53,6 +53,36 @@ def is_minikube_installed() -> tuple[bool, str]: except FileNotFoundError as err: return False, str(err) + def is_minikube_running() -> tuple[bool, str]: + try: + result = subprocess.run( + ["minikube", "status"], + capture_output=True, + text=True, + ) + if result.returncode == 0 and "Running" in result.stdout: + return True, "minikube is running" + else: + return False, "" + except FileNotFoundError: + # Minikube command not found + return False, "" + + def is_docker_running() -> tuple[bool, str]: + try: + result = subprocess.run( + ["docker", "info"], + capture_output=True, + text=True, + ) + if result.returncode == 0: + return True, "docker is running" + else: + return False, "" + except FileNotFoundError: + # Docker command not found + return False, "" + def is_minikube_version_valid_on_darwin() -> tuple[bool, str]: try: version_result = subprocess.run( @@ -171,6 +201,18 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: install_instruction="Make sure Docker Desktop is installed and running.", install_url="https://docs.docker.com/desktop/", ) + docker_running_info = ToolInfo( + tool_name="Running Docker", + is_installed_func=is_docker_running, + install_instruction="Please make sure docker is running", + install_url="https://docs.docker.com/engine/install/", + ) + minikube_running_info = ToolInfo( + tool_name="Running Minikube", + is_installed_func=is_minikube_running, + install_instruction="Please make sure minikube is running", + install_url="https://minikube.sigs.k8s.io/docs/start/", + ) kubectl_info = ToolInfo( tool_name="Kubectl", is_installed_func=is_kubectl_installed, @@ -223,11 +265,14 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: if answers["platform"] == "Docker Desktop": check_results.append(check_installation(docker_info)) check_results.append(check_installation(docker_desktop_info)) + check_results.append(check_installation(docker_running_info)) elif answers["platform"] == "Minikube": check_results.append(check_installation(docker_info)) + check_results.append(check_installation(docker_running_info)) check_results.append(check_installation(minikube_info)) if is_platform_darwin(): check_results.append(check_installation(minikube_version_info)) + check_results.append(check_installation(minikube_running_info)) check_results.append(check_installation(kubectl_info)) check_results.append(check_installation(helm_info)) else: From d912b218e8e3d67fe9f2b4d5290d2834cc542865 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 11 Sep 2024 15:23:16 -0400 Subject: [PATCH 257/710] "Running Minikube is not satisfied" --- src/warnet/project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/project.py b/src/warnet/project.py index 0ec22e8a2..6b0567506 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -181,7 +181,7 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: url_label = click.style(" URL: ", fg="yellow", bold=True) url_text = click.style(f"{tool_info.install_url}", fg="yellow") - click.secho(f" 💥 {tool_info.tool_name} is not installed. {location}", fg="yellow") + click.secho(f" 💥 {tool_info.tool_name} is not satisfied. {location}", fg="yellow") click.echo(instruction_label + instruction_text) click.echo(url_label + url_text) return ToolStatus.Unsatisfied From be589c212daa396c565f76560307a50773abe9f3 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 10 Sep 2024 11:21:20 +0100 Subject: [PATCH 258/710] improve help if custom network exists --- src/warnet/graph.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 34b98960d..b0c49f309 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -1,5 +1,6 @@ import os import random +import sys from importlib.resources import files from pathlib import Path @@ -34,7 +35,13 @@ def custom_graph( fork_observer: bool, fork_obs_query_interval: int, ): - datadir.mkdir(parents=False, exist_ok=False) + try: + datadir.mkdir(parents=False, exist_ok=False) + except FileExistsError as e: + print(e) + print("Exiting network builder without overwriting") + sys.exit(1) + # Generate network.yaml nodes = [] connections = set() From 4d40531329006ef90f0d6e73f3e728790b139f0a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 21:04:05 +0100 Subject: [PATCH 259/710] graph: fix helptext --- src/warnet/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/graph.py b/src/warnet/graph.py index b0c49f309..48351afe1 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -199,7 +199,7 @@ def create(): # Check if the project has a networks directory if not (project_path / "networks").exists(): click.secho( - "The current directory does not have a 'networks' directory. Please run 'warnet init' or 'warnet create' first.", + "The current directory does not have a 'networks' directory. Please run 'warnet init' or 'warnet new' first.", fg="red", bold=True, ) From 14c9ca0caa29ec8232def0581c80720307baecca Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 11 Sep 2024 16:10:43 -0400 Subject: [PATCH 260/710] fix scenario help --- resources/scenarios/ln_init.py | 8 ++--- resources/scenarios/miner_std.py | 6 ++-- resources/scenarios/reconnaissance.py | 9 +++--- resources/scenarios/sens_relay.py | 46 --------------------------- resources/scenarios/signet_miner.py | 2 ++ resources/scenarios/tx_flood.py | 8 ++--- src/warnet/control.py | 8 ++++- 7 files changed, 23 insertions(+), 64 deletions(-) delete mode 100644 resources/scenarios/sens_relay.py diff --git a/resources/scenarios/ln_init.py b/resources/scenarios/ln_init.py index e915ee721..59df5e38e 100644 --- a/resources/scenarios/ln_init.py +++ b/resources/scenarios/ln_init.py @@ -9,14 +9,14 @@ from resources.scenarios.commander import Commander -def cli_help(): - return "Fund LN wallets and open channels" - - class LNInit(Commander): def set_test_params(self): self.num_nodes = None + def add_options(self, parser): + parser.description = "Fund LN wallets and open channels" + parser.usage = "warnet run /path/to/ln_init.py" + def run_test(self): self.log.info("Lock out of IBD") miner = self.ensure_miner(self.nodes[0]) diff --git a/resources/scenarios/miner_std.py b/resources/scenarios/miner_std.py index fcfea9841..5aa368d40 100755 --- a/resources/scenarios/miner_std.py +++ b/resources/scenarios/miner_std.py @@ -9,10 +9,6 @@ from resources.scenarios.commander import Commander -def cli_help(): - return "Generate blocks over time. Options: [--allnodes | --interval= | --mature ]" - - class Miner: def __init__(self, node, mature): self.node = node @@ -28,6 +24,8 @@ def set_test_params(self): self.miners = [] def add_options(self, parser): + parser.description = "Generate blocks over time" + parser.usage = "warnet run /path/to/miner_std.py [options]" parser.add_argument( "--allnodes", dest="allnodes", diff --git a/resources/scenarios/reconnaissance.py b/resources/scenarios/reconnaissance.py index 1440b119b..3fc2269e4 100755 --- a/resources/scenarios/reconnaissance.py +++ b/resources/scenarios/reconnaissance.py @@ -14,11 +14,6 @@ from test_framework.p2p import MAGIC_BYTES, P2PInterface -# This message is provided to the user when they describe the scenario -def cli_help(): - return "Demonstrate network reconnaissance using a scenario and P2PInterface" - - def get_signet_network_magic_from_node(node): template = node.getblocktemplate({"rules": ["segwit", "signet"]}) challenge = template["signet_challenge"] @@ -37,6 +32,10 @@ def set_test_params(self): # a sub-class of BitcoinTestFramework self.num_nodes = 1 + def add_options(self, parser): + parser.description = "Demonstrate network reconnaissance using a scenario and P2PInterface" + parser.usage = "warnet run /path/to/reconnaissance.py" + # Scenario entrypoint def run_test(self): self.log.info("Getting peer info") diff --git a/resources/scenarios/sens_relay.py b/resources/scenarios/sens_relay.py deleted file mode 100644 index 210b43760..000000000 --- a/resources/scenarios/sens_relay.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 - -# The base class exists inside the commander container -try: - from commander import Commander -except ImportError: - from resources.scenarios.commander import Commander - - -def cli_help(): - return "Send a transaction using sensitive relay" - - -class MinerStd(Commander): - def set_test_params(self): - self.num_nodes = 12 - - def run_test(self): - # PR branch node - test_node = self.nodes[11] - test_wallet = self.ensure_miner(test_node) - addr = test_wallet.getnewaddress() - - self.log.info("generating 110 blocks...") - self.generatetoaddress(test_node, 110, addr, sync_fun=self.no_op) - - self.log.info("adding onion addresses from all peers...") - for i in range(11): - info = self.nodes[i].getnetworkinfo() - for addr in info["localaddresses"]: - if "onion" in addr["address"]: - self.log.info(f"adding {addr['address']}:{addr['port']}") - test_node.addpeeraddress(addr["address"], addr["port"]) - - self.log.info("getting address from recipient...") - # some other node - recip = self.nodes[5] - recip_wallet = self.ensure_miner(recip) - recip_addr = recip_wallet.getnewaddress() - - self.log.info("sending transaction...") - self.log.info(test_wallet.sendtoaddress(recip_addr, 0.5)) - - -if __name__ == "__main__": - MinerStd().main() diff --git a/resources/scenarios/signet_miner.py b/resources/scenarios/signet_miner.py index 2a09b1f0c..0edc635e3 100644 --- a/resources/scenarios/signet_miner.py +++ b/resources/scenarios/signet_miner.py @@ -46,6 +46,8 @@ def set_test_params(self): self.num_nodes = 1 def add_options(self, parser): + parser.description = "Generate blocks on a signet network" + parser.usage = "warnet run /path/to/signet_miner.py [options]" parser.add_argument( "--tank", dest="tank", diff --git a/resources/scenarios/tx_flood.py b/resources/scenarios/tx_flood.py index 5da5f8b53..a4896e958 100755 --- a/resources/scenarios/tx_flood.py +++ b/resources/scenarios/tx_flood.py @@ -10,10 +10,6 @@ from resources.scenarios.commander import Commander -def cli_help(): - return "Make a big transaction mess. Options: [--interval=]" - - class TXFlood(Commander): def set_test_params(self): self.num_nodes = 1 @@ -21,6 +17,10 @@ def set_test_params(self): self.threads = [] def add_options(self, parser): + parser.description = ( + "Sends random transactions between all nodes with available balance in their wallet" + ) + parser.usage = "warnet run /path/to/tx_flood.py [options]" parser.add_argument( "--interval", dest="interval", diff --git a/src/warnet/control.py b/src/warnet/control.py index 5c35b131a..5b7b2af14 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -2,6 +2,7 @@ import json import os import subprocess +import sys import time from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path @@ -185,7 +186,10 @@ def get_active_network(namespace): @click.argument("scenario_file", type=click.Path(exists=True, file_okay=True, dir_okay=False)) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) def run(scenario_file: str, additional_args: tuple[str]): - """Run a scenario from a file""" + """ + Run a scenario from a file. + Pass `-- --help` to get individual scenario help + """ scenario_path = Path(scenario_file).resolve() scenario_name = scenario_path.stem @@ -230,6 +234,8 @@ def run(scenario_file: str, additional_args: tuple[str]): # Add additional arguments if additional_args: helm_command.extend(["--set", f"args={' '.join(additional_args)}"]) + if "--help" in additional_args or "-h" in additional_args: + return subprocess.run([sys.executable, scenario_path, "--help"]) helm_command.extend([name, COMMANDER_CHART]) From 571a65eb410f7e621baf44e4ebcb1a3caadf915f Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 21:09:19 +0100 Subject: [PATCH 261/710] handle more pathtypes --- resources/scripts/apidocs.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index 79d664e85..eb64fce64 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -26,11 +26,7 @@ def print_cmd(cmd, super=""): p["name"], p["type"]["param_type"] if p["type"]["param_type"] != "Unprocessed" else "String", "yes" if p["required"] else "", - '"' + p["default"] + '"' - if p["default"] and p["type"]["param_type"] == "String" - else Path(p["default"]).relative_to(Path.cwd()) - if p["default"] and p["type"]["param_type"] == "Path" - else p["default"], + format_default_value(p["default"], p["type"]["param_type"]), ] for p in cmd["params"] if p["name"] != "help" @@ -39,6 +35,16 @@ def print_cmd(cmd, super=""): doc += "\n\n" +def format_default_value(default, param_type): + if default is None: + return "" + if param_type == "String": + return f'"{default}"' + if param_type == "Path": + return str(default) + return default + + with Context(cli) as ctx: info = ctx.to_info_dict() # root-level commands first From 50d724e53868062a6732f7dab0e7f4af451f39b2 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 11 Sep 2024 21:09:34 +0100 Subject: [PATCH 262/710] run apidocs.py --- docs/warnet.md | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/docs/warnet.md b/docs/warnet.md index dc8bca233..06f4047b6 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -48,7 +48,7 @@ Show the logs of a pod options: | name | type | required | default | |----------|--------|------------|-----------| -| pod_name | String | | | +| pod_name | String | | "" | | follow | Bool | | False | ### `warnet new` @@ -72,6 +72,17 @@ options: Setup warnet +### `warnet snapshot` +Create a snapshot of a tank's Bitcoin data or snapshot all tanks + +options: +| name | type | required | default | +|--------------|--------|------------|--------------------| +| tank_name | String | | | +| snapshot_all | Bool | | False | +| output | Path | | ./warnet-snapshots | +| filter | String | | | + ### `warnet status` Display the unified status of the Warnet network and active scenarios @@ -138,16 +149,16 @@ options: ### `warnet graph import-json` Create a cycle graph with nodes imported from lnd `describegraph` JSON file, -and additionally include 7 extra random outbounds per node. Include lightning -channels and their policies as well. -Returns XML file as string with or without --outfile option. + and additionally include 7 extra random outbounds per node. Include lightning + channels and their policies as well. + Returns XML file as string with or without --outfile option. ## Image ### `warnet image build` Build bitcoind and bitcoin-cli from \ at \ with the specified \. -Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. + Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. options: | name | type | required | default | From 6afb54057accc9977913665f59c3e1ba0d34e2eb Mon Sep 17 00:00:00 2001 From: Will Clark Date: Thu, 12 Sep 2024 09:39:08 +0100 Subject: [PATCH 263/710] add k8s log collector (#553) --- .github/workflows/test.yml | 21 +++++++++++++++++++++ resources/scripts/k8s-log-collector.sh | 23 +++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100755 resources/scripts/k8s-log-collector.sh diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d26a10fa5..ac678a67f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -91,6 +91,27 @@ jobs: run: | source .venv/bin/activate ./test/${{matrix.test}} + - name: Collect Kubernetes logs + if: always() + run: | + echo "Installing stern..." + STERN_VERSION="1.30.0" + curl -Lo stern.tar.gz https://github.com/stern/stern/releases/download/v${STERN_VERSION}/stern_${STERN_VERSION}_linux_amd64.tar.gz + tar zxvf stern.tar.gz + chmod +x stern + sudo mv stern /usr/local/bin/ + + # Run script + curl -O https://raw.githubusercontent.com/willcl-ark/warnet/main/resources/scripts/k8s-log-collector.sh + chmod +x k8s-log-collector.sh + ./k8s-log-collector.sh default + - name: Upload log artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: kubernetes-logs-${{ matrix.test }} + path: ./k8s-logs + retention-days: 5 test-without-mk: runs-on: ubuntu-latest strategy: diff --git a/resources/scripts/k8s-log-collector.sh b/resources/scripts/k8s-log-collector.sh new file mode 100755 index 000000000..d98c5a38c --- /dev/null +++ b/resources/scripts/k8s-log-collector.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Set variables +NAMESPACE=${1:-default} +LOG_DIR="./k8s-logs" +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") + +# Ensure log directory exists +mkdir -p "$LOG_DIR" + +# Collect logs using stern (includes logs from terminated pods) +echo "Collecting stern logs..." +stern "(tank|commander).*" --namespace="$NAMESPACE" --output default --since 1h --no-follow > "$LOG_DIR/${TIMESTAMP}_stern_logs" + +# Collect descriptions of all resources +echo "Collecting resource descriptions..." +kubectl describe all --namespace="$NAMESPACE" > "$LOG_DIR/${TIMESTAMP}_resource_descriptions.txt" + +# Collect events +echo "Collecting events..." +kubectl get events --namespace="$NAMESPACE" --sort-by='.metadata.creationTimestamp' > "$LOG_DIR/${TIMESTAMP}_events.txt" + +echo "Log collection complete. Logs saved in $LOG_DIR" From b439ad26b7f03f955764c4666535202044382169 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 12 Sep 2024 12:29:58 +0000 Subject: [PATCH 264/710] Update apidocs on --- docs/warnet.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/warnet.md b/docs/warnet.md index 06f4047b6..dab8f62d4 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -149,16 +149,16 @@ options: ### `warnet graph import-json` Create a cycle graph with nodes imported from lnd `describegraph` JSON file, - and additionally include 7 extra random outbounds per node. Include lightning - channels and their policies as well. - Returns XML file as string with or without --outfile option. +and additionally include 7 extra random outbounds per node. Include lightning +channels and their policies as well. +Returns XML file as string with or without --outfile option. ## Image ### `warnet image build` Build bitcoind and bitcoin-cli from \ at \ with the specified \. - Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. +Optionally deploy to remote registry using --action=push, otherwise image is loaded to local registry. options: | name | type | required | default | From c3504a5831db6bdde14830d9bdb9c5015e6f4b3d Mon Sep 17 00:00:00 2001 From: m3dwards Date: Thu, 12 Sep 2024 12:53:46 +0000 Subject: [PATCH 265/710] Update apidocs on --- docs/warnet.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/warnet.md b/docs/warnet.md index dab8f62d4..0e03b97df 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -25,6 +25,10 @@ options: Create a new warnet network +### `warnet dashboard` +Open the Warnet dashboard in default browser + + ### `warnet deploy` Deploy a warnet with topology loaded from \ From 8aca2c1407615ba4c59e1e7ddb1839b1889c0693 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 12 Sep 2024 10:51:40 -0400 Subject: [PATCH 266/710] add podname-indexed dict "self.tanks" to Commander --- resources/scenarios/commander.py | 5 +++++ test/data/scenario_connect_dag.py | 20 ++++++++++---------- test/data/scenario_p2p_interface.py | 2 +- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index 661f68727..1ecf0b6c4 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -9,6 +9,7 @@ import sys import tempfile from pathlib import Path +from typing import Dict from test_framework.authproxy import AuthServiceProxy from test_framework.p2p import NetworkThread @@ -80,6 +81,9 @@ def setup(self): ch.setFormatter(formatter) self.log.addHandler(ch) + # Keep a separate index of tanks by pod name + self.tanks: Dict[str, TestNode] = {} + for i, tank in enumerate(WARNET): self.log.info( f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" @@ -106,6 +110,7 @@ def setup(self): node.rpc_connected = True node.init_peers = tank["init_peers"] self.nodes.append(node) + self.tanks[tank["tank"]] = node self.num_nodes = len(self.nodes) diff --git a/test/data/scenario_connect_dag.py b/test/data/scenario_connect_dag.py index 039f161c3..0508fee62 100644 --- a/test/data/scenario_connect_dag.py +++ b/test/data/scenario_connect_dag.py @@ -56,16 +56,16 @@ def run_test(self): self.connect_nodes(6, 7) self.sync_all() - zero_peers = self.nodes[0].getpeerinfo() - one_peers = self.nodes[1].getpeerinfo() - two_peers = self.nodes[2].getpeerinfo() - three_peers = self.nodes[3].getpeerinfo() - four_peers = self.nodes[4].getpeerinfo() - five_peers = self.nodes[5].getpeerinfo() - six_peers = self.nodes[6].getpeerinfo() - seven_peers = self.nodes[7].getpeerinfo() - eight_peers = self.nodes[8].getpeerinfo() - nine_peers = self.nodes[9].getpeerinfo() + zero_peers = self.tanks["tank-0000"].getpeerinfo() + one_peers = self.tanks["tank-0001"].getpeerinfo() + two_peers = self.tanks["tank-0002"].getpeerinfo() + three_peers = self.tanks["tank-0003"].getpeerinfo() + four_peers = self.tanks["tank-0004"].getpeerinfo() + five_peers = self.tanks["tank-0005"].getpeerinfo() + six_peers = self.tanks["tank-0006"].getpeerinfo() + seven_peers = self.tanks["tank-0007"].getpeerinfo() + eight_peers = self.tanks["tank-0008"].getpeerinfo() + nine_peers = self.tanks["tank-0009"].getpeerinfo() for node in self.nodes: self.log.info(f"Node {node.index}: tank={node.tank} ip={node.rpchost}") diff --git a/test/data/scenario_p2p_interface.py b/test/data/scenario_p2p_interface.py index 27508681f..9c3b38a52 100644 --- a/test/data/scenario_p2p_interface.py +++ b/test/data/scenario_p2p_interface.py @@ -34,7 +34,7 @@ def run_test(self): self.log.info("Adding the p2p connection") p2p_block_store = self.nodes[0].add_p2p_connection( - P2PStoreBlock(), dstaddr=self.nodes[0].rpchost, dstport=18444 + P2PStoreBlock(), dstaddr=self.tanks["tank-0000"].rpchost, dstport=18444 ) self.log.info("test that an invalid GETDATA doesn't prevent processing of future messages") From f8a6c04a6dd12acaee8619fd916a8a9a791c9d78 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Thu, 12 Sep 2024 12:00:58 -0400 Subject: [PATCH 267/710] docs: overhaul all (#585) --- CONTRIBUTING.md | 20 ----- README.md | 20 +++-- docs/connecting-local-nodes.md | 3 +- docs/developer-notes.md | 54 ++++++++++++++ docs/lightning.md | 97 ------------------------- docs/logging_monitoring.md | 93 +++++++++--------------- docs/random_internet_as_graph_n100.png | Bin 471797 -> 0 bytes docs/release-process.md | 29 -------- docs/scaling.md | 2 +- docs/scenarios.md | 7 +- src/warnet/graph.py | 11 --- 11 files changed, 106 insertions(+), 230 deletions(-) delete mode 100644 CONTRIBUTING.md delete mode 100644 docs/lightning.md delete mode 100644 docs/random_internet_as_graph_n100.png delete mode 100644 docs/release-process.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 8d5671ce7..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,20 +0,0 @@ -# Contributing / Local Warnet Development - -## Download the code repository - -```bash -git clone https://github.com/bitcoin-dev-project/warnet -cd warnet -``` - -## Recommended: use a virtual Python environment such as `venv` - -```bash -python3 -m venv .venv # Use alternative venv manager if desired -source .venv/bin/activate -``` - -```bash -pip install --upgrade pip -pip install -e . -``` \ No newline at end of file diff --git a/README.md b/README.md index beeb264b1..383e7faf1 100644 --- a/README.md +++ b/README.md @@ -5,24 +5,22 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. ## Major Features -* Launch a bitcoin network with a specified number of nodes connected to each other according to a network topology from a graphml file. -* Scenarios can be run across the network which can be programmed using the Bitcoin Core functional [test_framework language](https://github.com/bitcoin/bitcoin/tree/master/test/functional). -* Nodes can have traffic shaping parameters assigned to them via the graph using [tc-netem](https://manpages.ubuntu.com/manpages/trusty/man8/tc-netem.8.html) tool. -* Data from nodes can be collected and searched including log files and p2p messages. -* Performance data from containers can be monitored and visualized. -* Lightning Network nodes can be deployed and operated. -* Networks can be deployed using Kubernetes, e.g. via MiniKube (small network graphs) or a managed cluster for larger network graphs. +* Launch a bitcoin network with a specified number of nodes connected to each other according to a network topology. +* Run scenarios of network behavior across the network which can be programmed using the Bitcoin Core functional [test_framework language](https://github.com/bitcoin/bitcoin/tree/master/test/functional). +* Collect and search data from nodes including log files and p2p messages. +* Monitor and visualize performance data from Bitcoin nodes. +* Connect to a large network running in a remote cluster, or a smaller network running locally. ## Documentation - [Installation](/docs/install.md) - [CLI Commands](/docs/warnet.md) +- [Network configuration with yaml files](/docs/config.md) - [Scenarios](/docs/scenarios.md) - [Monitoring](/docs/logging_monitoring.md) -- [Lightning Network](/docs/lightning.md) +- [Snapshots](/docs/snapshots.md) +- [Connecting to local nodes outside the cluster](/docs/connecting-local-nodes.md) - [Scaling](/docs/scaling.md) -- [Connecting to local nodes](/docs/connecting-local-nodes.md) -- [Understanding network configuration](/docs/config.md) -- [Contributing](CONTRIBUTING.md) +- [Contributing](/docs/developer-notes.md) ![warnet-art](https://raw.githubusercontent.com/bitcoin-dev-project/warnet/main/docs/machines.webp) diff --git a/docs/connecting-local-nodes.md b/docs/connecting-local-nodes.md index 96407b3f2..89bade682 100644 --- a/docs/connecting-local-nodes.md +++ b/docs/connecting-local-nodes.md @@ -7,8 +7,7 @@ ### Run Warnet network ```shell -warnet cluster deploy -warnet network start +warnet deploy path/to/network/directory ``` ### Install Telepresence diff --git a/docs/developer-notes.md b/docs/developer-notes.md index 0c51bc8b3..14d1b8d5b 100644 --- a/docs/developer-notes.md +++ b/docs/developer-notes.md @@ -1,8 +1,62 @@ # Developer notes +## Contributing / Local Warnet Development + +### Download the code repository + +```bash +git clone https://github.com/bitcoin-dev-project/warnet +cd warnet +``` + +### Recommended: use a virtual Python environment such as `venv` + +```bash +python3 -m venv .venv # Use alternative venv manager if desired +source .venv/bin/activate +``` + +```bash +pip install --upgrade pip +pip install -e . +``` + +## Lint + This project primarily uses the `uv` python packaging tool: https://docs.astral.sh/uv/ along with the sister formatter/linter `ruff` https://docs.astral.sh/ruff/ With `uv` installed you can add/remove dependencies using `uv add ` or `uv remove . This will update the [`uv.lock`](https://docs.astral.sh/uv/guides/projects/#uvlock) file automatically. + `uv` can also run tools (like `ruff`) without external installation, simply run `uvx ruff check .` or `uvx ruff format .` to use a uv-managed format/lint on the project. + +## Release process + +Once a tag is pushed to GH this will start an image build using the tag + +### Prerequisites + +- [ ] Update version in pyproject.toml +- [ ] Tag git with new version +- [ ] Push tag to GitHub + +### Manual Builds + +```bash +# Install build dependencies +pip install -e .[build] + +# Remove previous release metadata +rm -i -Rf build/ dist/ + +# Build wheel +python3 -m build +``` + +#### Upload + +```bash +# Upload to Pypi +python3 -m twine upload dist/* +``` \ No newline at end of file diff --git a/docs/lightning.md b/docs/lightning.md deleted file mode 100644 index 72404bc4c..000000000 --- a/docs/lightning.md +++ /dev/null @@ -1,97 +0,0 @@ -# Lightning Network - -## Adding LN nodes to graph - -LN nodes can be added to any Bitcoin Core node by adding a data element with key -`"ln"` to the node in the graph file. The value is the LN implementation desired. - -**Currently only `lnd` is supported** - -Example: - -``` - - lnd - -``` - -## Adding LN channels to graph - -LN channels are represented in the graphml file as edges with extra data elements -that correspond to arguments to the lnd `openchannel` and `updatechanpolicy` RPC -commands. The keys are: - -- `"channel_open"` (arguments added to `openchannel`) -- `"target_policy"` or `"source_policy"` (arguments added to `updatechanpolicy`) - -The key `"channel_open"` is required to open a LN channel in warnet, and to -identify an edge in the graphml file as a LN channel. - -Example: - -``` - - --local_amt=100000 - --base_fee_msat=100 --fee_rate_ppm=5 --time_lock_delta=18 - --base_fee_msat=2200 --fee_rate_ppm=13 --time_lock_delta=20 - -``` - -A complete example graph with LN nodes and channels is included in the test -data directory: [ln.graphml](../test/data/ln.graphml) - -## Running the Lightning network - -When warnet is started with `warnet network start` the bitcoin containers will -be started first followed by the lightning node containers. It may require a few -automatic restarts before the lightning nodes start up and connect to their -corresponding bitcoin nodes. Use `warnet network status` to monitor container status -and wait for all containers to be `running`. - -To create the lightning channels specified in the graph file, run the included -scenario: - -`warnet scenarios run ln_init` - -This [scenario](../src/scenarios/ln_init.py) will generate blocks, fund the wallets -in the bitcoin nodes, and open the channels from the graph. Each of these steps -requires some waiting as transactions are confirmed in the warnet blockchain -and lightning nodes gossip their channel announcements to each other. -Use `warnet scenarios active` to monitor the status of the scenario. When it is -complete the subprocess will exit and it will indicate `Active: False`. At that -point, the lightning network is ready for activity. - -## sim-ln compatibility - -Warnet can export data required to run [sim-ln](https://github.com/bitcoin-dev-project/sim-ln) -with a warnet network. - -With a network running, execute: `warnet network export` with optional argument -`--network=` (default is "warnet"). This will copy all lightning -node credentials like SSL certificates and macaroons into a local directory as -well as generate a JSON file required by sim-ln. - -Example (see sim-ln docs for exact API): - -``` -$ warnet network export -/Users/bitcoin-dev-project/.warnet/warnet/warnet/simln - -$ ls /Users/bitcoin-dev-project/.warnet/warnet/warnet/simln -sim.json warnet_ln_000000_tls.cert warnet_ln_000001_tls.cert warnet_ln_000002_tls.cert -warnet_ln_000000_admin.macaroon warnet_ln_000001_admin.macaroon warnet_ln_000002_admin.macaroon - -$ sim-cli --data-dir /Users/bitcoin-dev-project/.warnet/warnet/warnet/simln -2023-11-18T16:58:28.731Z INFO [sim_cli] Connected to warnet_ln_000000 - Node ID: 031b1404744431b01ee4fa2bfc3c5caa1f1044ff5a9cb553d2c8ec6eb0f9d8040c. -2023-11-18T16:58:28.747Z INFO [sim_cli] Connected to warnet_ln_000001 - Node ID: 02318b75bd91bf6265b30fe97f8ebbb0eda85194cf9d4467d43374de0248c7bf05. -2023-11-18T16:58:28.760Z INFO [sim_cli] Connected to warnet_ln_000002 - Node ID: 0393aa24d777e2391b5238c485ecce08b35bd9aa4ddf4f2226016107c6829804d5. -2023-11-18T16:58:28.760Z INFO [sim_lib] Running the simulation forever. -2023-11-18T16:58:28.815Z INFO [sim_lib] Simulation is running on regtest. -2023-11-18T16:58:28.815Z INFO [sim_lib] Simulating 0 activity on 3 nodes. -2023-11-18T16:58:28.815Z INFO [sim_lib] Summary of results will be reported every 60s. -2023-11-18T16:58:28.826Z INFO [sim_lib] Generating random activity with multiplier: 2, average payment amount: 3800000. -2023-11-18T16:58:28.826Z INFO [sim_lib] Created network generator: network graph view with: 3 channels. -2023-11-18T16:58:28.826Z INFO [sim_lib] Started random activity producer for warnet_ln_000000(031b14...d8040c): activity generator for capacity: 50000000 with multiplier 2: 26.31578947368421 payments per month (0.03654970760233918 per hour). -2023-11-18T16:58:28.826Z INFO [sim_lib] Started random activity producer for warnet_ln_000001(02318b...c7bf05): activity generator for capacity: 100000000 with multiplier 2: 52.63157894736842 payments per month (0.07309941520467836 per hour). -2023-11-18T16:58:28.826Z INFO [sim_lib] Started random activity producer for warnet_ln_000002(0393aa...9804d5): activity generator for capacity: 50000000 with multiplier 2: 26.31578947368421 payments per month (0.03654970760233918 per hour). -``` diff --git a/docs/logging_monitoring.md b/docs/logging_monitoring.md index 07699469f..cd3a037dd 100644 --- a/docs/logging_monitoring.md +++ b/docs/logging_monitoring.md @@ -1,31 +1,23 @@ # Logging and Monitoring -Warnet allows different granularity of logging. - ## Logging -### Warnet network level logging - -Fetch logs from the warnet RPC server `rpc-0`, which is in charge of orchestrating the network. - -Examples of information provided: +### Pod logs -- how many tanks are running -- what scenarios are running -- warnet RPC requests +The command `warnet logs` will bring up a menu of pods to print log output from, +such as Bitcoin tanks, or scenario commanders. Follow the output with the `-f` option. -Commands: `warnet network logs` or `warnet network logs --follow`. - -See more details in [warnet](/docs/warnet.md#warnet-network-logs) +See command [`warnet logs`](/docs/warnet.md#warnet-logs) ### Bitcoin Core logs -These are tank level or pod level log output from a Bitcoin Core node, useful for things like net logging and transaction propagation, retrieved by RPC `debug-log` using its network name and graph node index. +Entire debug log files from a Bitcoin tank can be dumped by using the tank's +pod name. Example: ```sh -$ warnet bitcoin debug-log 0 +$ warnet bitcoin debug-log tank-0000 2023-10-11T17:54:39.616974Z Bitcoin Core version v25.0.0 (release build) @@ -34,50 +26,46 @@ $ warnet bitcoin debug-log 0 ... (etc) ``` -For logs of lightning nodes, kubectl is required. +See command [`warnet bitcoin debug-log`](/docs/warnet.md#warnet-bitcoin-debug-log) -### Aggregated logs from all nodes +### Aggregated logs from all Bitcoin nodes Aggregated logs can be searched using `warnet bitcoin grep-logs` with regex patterns. +See more details in [`warnet bitcoin grep-logs`](/docs/warnet.md#warnet-bitcoin-grep-logs) + Example: ```sh $ warnet bitcoin grep-logs 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d -warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.716582Z [miner] AddToWallet 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d newupdate -warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.717787Z [miner] Submitting wtx 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d to mempool for relay -warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.717929Z [validation] Enqueuing TransactionAddedToMempool: txid=94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d wtxid=0cc875e73bb0bd8f892b70b8d1e5154aab64daace8d571efac94c62b8c1da3cf -warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.718040Z [validation] TransactionAddedToMempool: txid=94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d wtxid=0cc875e73bb0bd8f892b70b8d1e5154aab64daace8d571efac94c62b8c1da3cf -warnet_test_uhynisdj_tank_000001: 2023-10-11T17:44:48.723017Z [miner] AddToWallet 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d -warnet_test_uhynisdj_tank_000007: 2023-10-11T17:44:52.173199Z [validation] Enqueuing TransactionAddedToMempool: txid=94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d wtxid=0cc875e73bb0bd8f892b70b8d1e5154aab64daace8d571efac94c62b8c1da3cf +tank-0001: 2023-10-11T17:44:48.716582Z [miner] AddToWallet 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d newupdate +tank-0001: 2023-10-11T17:44:48.717787Z [miner] Submitting wtx 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d to mempool for relay +tank-0001: 2023-10-11T17:44:48.717929Z [validation] Enqueuing TransactionAddedToMempool: txid=94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d wtxid=0cc875e73bb0bd8f892b70b8d1e5154aab64daace8d571efac94c62b8c1da3cf +tank-0001: 2023-10-11T17:44:48.718040Z [validation] TransactionAddedToMempool: txid=94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d wtxid=0cc875e73bb0bd8f892b70b8d1e5154aab64daace8d571efac94c62b8c1da3cf +tank-0001: 2023-10-11T17:44:48.723017Z [miner] AddToWallet 94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d +tank-0002: 2023-10-11T17:44:52.173199Z [validation] Enqueuing TransactionAddedToMempool: txid=94cacabc09b024b56dcbed9ccad15c90340c596e883159bcb5f1d2152997322d wtxid=0cc875e73bb0bd8f892b70b8d1e5154aab64daace8d571efac94c62b8c1da3cf ... (etc) ``` -See more details in [warnet](/docs/warnet.md#warnet-bitcoin-grep-logs) ## Monitoring and Metrics ## Install logging infrastructure -Ensure that [`helm`](https://helm.sh/docs/intro/install/) is installed, then simply run the following scripts: +If any tank in a network is configured with `collectLogs: true` or `metricsExport: true` +then the logging stack will be installed automatically when `warnet deploy` is executed. -```bash -./resources/scripts/install_logging.sh -``` +The logging stack includes Loki, Prometheus, and Grafana. Together these programs +aggregate logs and data from Bitcoin RPC queries into a web-based dashboard. -To forward port `3000` and view the [Grafana](#grafana) dashboard run the `connect_logging` script: +## Connect to logging dashboard -```bash -./resources/scripts/connect_logging.sh -``` - -It might take a couple minutes to get the pod running. If you see `error: unable to forward port because pod is not running. Current status=Pending`, hang tight. - -The Grafana dashboard (and API) will be accessible without requiring authentication -at `http://localhost:3000`. +The logging stack including the user interface web server runs inside the kubernetes cluster. +To access that from a local web browser, you must use kubernetes port-forwarding. -The `install_logging` script will need to be installed before starting the network in order to collect the information for monitoring and metrics. Restart the network with `warnet network down && warnet network up` if necessary. +Run the script `./resources/scripts/connect_logging.sh` to forward port 3000. +The Grafana dashboard will then be available locally at `localhost:3000`. ### Prometheus @@ -86,14 +74,7 @@ to any Bitcoin Tank and configured to scrape any available RPC results. The `bitcoin-exporter` image is defined in `resources/images/exporter` and maintained in the BitcoinDevProject dockerhub organization. To add the exporter -in the Tank pod with Bitcoin Core add the `"exporter"` key to the node in the graphml file: - -```xml - - 27.0 - true - -``` +in the Tank pod with Bitcoin Core add the `metricsExport: true` value to the node in the yaml file. The default metrics are defined in the `bitcoin-exporter` image: - Block count @@ -101,25 +82,23 @@ The default metrics are defined in the `bitcoin-exporter` image: - Number of outbound peers - Mempool size (# of TXs) -Metrics can be configured by setting a `"metrics"` key to the node in the graphml file. -The metrics value is a space-separated list of labels, RPC commands with arguments, and +Metrics can be configured by setting an additional `metrics` value to the node in the yaml file. The metrics value is a space-separated list of labels, RPC commands with arguments, and JSON keys to resolve the desired data: ``` label=method(arguments)[JSON result key][...] ``` -For example, the default metrics listed above are defined as: +For example, the default metrics listed above would be explicitly configured as follows: -```xml - - 27.0 - true - blocks=getblockcount() inbounds=getnetworkinfo()["connections_in"] outbounds=getnetworkinfo()["connections_in"] mempool_size=getmempoolinfo()["size"] - +```yaml +nodes: + - name: tank-0000 + metricsExport: true + metrics: blocks=getblockcount() inbounds=getnetworkinfo()["connections_in"] outbounds=getnetworkinfo()["connections_in"] mempool_size=getmempoolinfo()["size"] ``` -The data can be retrieved from the Prometheus exporter on port `9332`, example: +The data can be retrieved directly from the Prometheus exporter container in the tank pod via port `9332`, example: ``` # HELP blocks getblockcount() @@ -138,7 +117,7 @@ mempool_size 0.0 ### Grafana -Data from Prometheus exporters can be collected and fed into Grafana for a +Data from Prometheus exporters is collected and fed into Grafana for a web-based interface. #### Dashboards diff --git a/docs/random_internet_as_graph_n100.png b/docs/random_internet_as_graph_n100.png deleted file mode 100644 index 3e0fd342225451d4620586eb55d8dc12fa535155..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 471797 zcmeEuWn7fq7wsU3s34&T0)h-;5F#K9A)yW-pyUi4lG5ERiZmi6jnWL=9ny`!&?Vj7 zdC$Q6zW@9Ae!l)tN1S<{^PIEK+H0@94&GAlMF?(C+<-tJ1aC#*G7tzZFZfN5cLjXs z;Ox)_{Dp1#`mG!u9^S}b$w}}}A}b*UD_K)rE87niIuO~9)>c+J7FwUWuR|aYAaCKX zY z&Ky;iXKYx+YHZ^Wsnfg4uWzKOWn;GPg@9UL_2wiB6+}a>;Cku_3pzCpI-HeE+ED2> zeuyoJIIK{&pXI=>>oBYJYY$&OE*DX)c2Na`#QYj7@u$Ch<(BnL0qf$Wpt@+r<@e%Q z9wIMab(4`WUcOqm=}mh1YUfKmW=_Dbqt}{OFJC#+=3-_A0&%PQ|Cjs!b9WgO{||wZ zpoj@0!Fh=eW~hv7+28G81e8r{=??MR$dHeIzB=X*h>L#T?(%>lCh!E~!24+KtQZ=q{}y1;*Q4$=I3%gLLk+z zF2Z-Avx)xe`7Bwrbod+1+tAR@M-QJE0Pnfpc1cat$^S$sn% zE*WGPr!&chU;n@UiIgW&Z|^7J9Q-u`ZoBPwgC~@JfjcYI{xiJ6HcYeiUpyg zxY&U4Hc5;ntcEJe>w&Wi-_UqUGT}|xx067L!Mg%UF`6( z_(uw*f%+gi1q3f19ACg#JjC#J29thoh5`ao(=2rP?dqdm0=67Ju*om?vWt$9S!!9$ z6UP7E0v_g{9e>g~J5Fs~OHS5i0D%ZUxtMv41NwJjcY^Yn{h7cjU1G$7PtluJo$$_B z;vmy!kxiOcAi~f7dGoz#O+;g#wC(SXHynsC?`FR=+5c-a3%}{?eH&IaHRQE2%QkjP zxtX3qG-jX|aAWm~G9)YM{WC7tWL3D!;po1CB&5bm_;OXhh($&+w5detqR|SdqK|3Z zYY>Q^$_22wsW~~D1Vrj0EH?t*5M-877@~1d>iqkB#^rY*5W2q9<$>&c492;+oLDRu zO^{LF2|C!x$oYInk3aG6;9~<@GzcT=+W;TNG?xJodwHiS}e618c zJ02TQcDo({rn`&rWv zLP^!!%Rd!_t(1+ACC2t7#i!^#DZYTX#eE3G0C+r0IX|N$K4DvQNc{MT zb#3lqV~+iz_tHBMo5SzXMr*41)4g9G-*fo}3l_qL97K#LTx>~!R!NwT*IjvU(O3S8 zz9=}{@Vw#PCsRs#*OxjbODj5KU@kl_$3~_^BUL`9q4P#9y>@(+jGod)K0<&Ar4>c0 z(bCr^qdc);uM_HJ)WV`x&SooobZ0l~urxXzLVoX`UBKm(?$XjH$5AE5K1Qy)z9&0f zx7Q!32oto7KiHTLMJR9iGG~u|S7(A{4V7t4Q&=zxo?m`c^@+gPflyNf>AsGVT4`FU z!^qO{s&jK53MrM`jo-{fL_@qr`T18++p18DT_eZ1 zQfWBSc#t7)x4AsZe>3{S?QWL2%l(tHuif!IEiG(-D#M6~_?om_G_3;Mgp!^n44vzS zr+v+b5LIH}NBhA^tiQgUzE+l+oHFJmCy)J3iIg?1T`M-RWgK~2&qP_wX?+=X!&gNf z!D23X%xXSA!6j4)cdu`?SUJ*A-lr~iQ9av(eF!1F((gW1hvGCM4Py2DJo%K6Yn!$b z@wLx|k>Zn^Np2G+M9FnQynsl^^+Cb4WuN1&OisSboJ9d1 zATjVh__Oib11Oj?k&jmwh%Vyvc{B)!XAzM+b{^=(qS)P=Jtl{L%{?cDT09+Z|Gr_s zrT^ec=W+R-cQg5$-%`^I^k4!Q3S^qF;u$aYU`^?74c9)+9BKi9Nq zBcB`Iyt&-INnCpen;5Ua>TO$;${zagS^w>B+RKZIm<(Mm$}_nuLJvEMDxM5=8?7ag zdqHw-bh5lujCdC3%sU4FN(cBGD#-jZ)e!X(9U7_8R2+jF7qf!nDI(8w7YsLncO>Ik z@orQ3wx#9&-koikNBG19a|2VGJTEDHxEmO3S#^gG0n*LnGFPe1OVCKcIY|+cKDVq7 z^Xu<@0^B_WYqn$EOX2iv&;z1+2KL!clo8olcHmh^z5#x zW=8f%HgD1^wWw@U*{-E&*}Q1{Ob67h-3UtiqH8;T7<9yY0AQlaMKk8Eyg%zhiFTD= zr!ancLnpB?hXF7Ux_N&4_=D%ZxG?U27J~ZF?~AoA@lUV>fBuX*=(`@eo>qf=v|XqZ zL%+`R5l^kW+p;t>D}P3Hib3%Fv5KlO2FB?Pk+qgDAv z#`R283O_6d_(u!g$il1x)Ih2%iZTQK-(vd*;w6*gRS*0bJ(^<|P%bmEo*P{WSxmct zA#m4u!?uEIK>(Q&@G72(q}aj2K)$Ut-BvfZ*grRLAP}s-f}0<9#TpMgy*Q_|S6fHt zU3Xp>di}JhB78!fj>7=(?KZKG`-WqfIeJph(Xl_P-Px)^GY<@YPz&&dlQ=+;@Fh0* z)Rl|qtACiUn7MBQd>WFIt8{r4DZ+0$zB2QP&YDdM|K!!UL>1YF0OvFVaYS5Huk`l> zp0PN2q<3@JiRNlO#nZRm+K3v_T1@txoLBKhy%>Jactl73eXG|WIt60G{s8?uFjjpg zt*h!p{tL~SSrE@k5qdwr=y;@XDm*m^3H6#uqWDpL>pb|p(PoOFX8j_RU6W&0x(Z9guKUC>KmoM9 zd&kW5G@3P$Ki-Q9W^KQBOTwS9*T>z7cT#U_V$fxOr==d@HbIBsH)oQ(S|J!@(}^OD zom*_V;P4l7lV{L+grM7li(7QXh{dxxsHBz&vJwRl6cVONz&!0wVwuF&Bx|4G z{uT%c5AkVkUC-8i>IS*I#oa{`r&Aw?h08Tr9TJQG4&zny&-+D==W1sb8scJqa>tyM zND70RZWHO5rbNUXpIV(`d2XHTk?wXMyG?;x3j61g__&iV5AY8%>w5*EDiW=_=2pS*Ar}nLtWpEda3e zvxshw-}hz5A?uGQ)YN$8A7Oe|_{J0i1cUK4ajSzR!tiIDF;?4Lm@Sil1*^=mDFhFd zjpNh&YvwF>4czS_*^=KQWc>GI)oHo1&U3=HKJMWoN9A%CxH-C0HjD-jm%7pH7%He^ z;Pit+CrJvw9?wo8G-Pe4!g;X_jakvlplK(kDaT;7RrtE~;RB71Bxgz6ZOi(avW)}+?Q#387LhU2lBR4jw>(Sw{ z3m)ZVPln9OPK6o6G#~7EgFSVTc_+`o2N^7Q)Rp^{t(zkvkOR+lc98d%*kL}HiRNuN}UvRaaACZG+j`z3z_DD|bkE^XU=J*i-1ApRWKv91Cw%9E>MBG&QPrJkrJt zK^UkpF`zhrV9f%&Bvvf^fU2a-P>ncV@M&l~7$lq+qKCG{snq^@wVnUz)Wjr*2Lxyp zg=Uh`n4<)|pDE%g^j%D)r0Mxi!aUh#=4Wb9L*#tDn3!jzs@~0jCqvojoW;asetD5k zYZZg~!4bLP}8h28tg(P8K58~&~p0vLb+BLWY5 z`oQ$jge?9%c6#DY+jI)j#g#)GtLD+8Y-|XGi-h=o1gE7>`ZZ`xslAKf>?MKOw{?-; zdKVYMj;m=0R=9*7b6+=Qg|iu-B`rjZ@qH#C!Ri{D;}f^Y#m46o6Xz8}EmuX3x&mA7 zVY9XeJ_0lP{sPcS3Qemwc9gS?wC-Qozq+^BSozrSC5gQ81upO~sSAA=^LIwH<$nhS z$uzI;2fG*Gc4oIQ+^r__4*|Xz^&)m_P5bVLd-hq66gZ-e_Yn;uIAnc4hIFseFtR)y z%eUt>Cxz%;@G7n^CK{7fjr?PFlG-IAoL~ZvuIktAeqj>hj5b;SGfov|u7tPnRp#wt zY{y8P{kBlp6*u+^HW@7hdk(RVULAAHd!%RaT_k|hFI14YB`U@#FHPJrSo49?Ti;9M zfplIxm)YI18i>__+$ktPf@rB1Hq&m5=g3{*2*U*SZ#^#*q{I!g_!@W%xVZgoE0_$I z#y<&|`G<`EK$KF49;AS%CSvde;TKPUPXI3acjyv1`|Ab}W;4RKziB?GIO8%f0nd8e zV~z4gLGVd0CaAlc0H26@1-n&{-b~Hm*EUU0zjn{rb{|Mg8zFm#~P?WgtPo zd9`XZyWA2sg2D`Ck5obM@58F?_WiLJfjzt%j69-fy(}HpyCYy zrzGbh+x3>n_0E>9N!GxqvSi9Ss);$m?DHO7sU-XJ$6Zw{TMLrvzLH`-0kAR~$-DFu zFUozugKj;5W>L_BiV%8Pn4zcDpIZYIYdRsBl9d ze>s1ve&aczNe)TSS9Ru-g+rme@sWP*qlR>yB?~1xFIH`*or7}xS6YRXKUmaEcwJkp z_MCPj@|P&W_mw%K96{}nsmRv5Rb{N~e7+G|PvoKt=mxuaXlF#E5+E0*;x4(Blz?#R zx|rO$`Q^jq)0TB~ot^w`BGc2>$-Co{aAKuZ7m-D}Z@=Z}kdyang)sLh<6wHM*kJ^iqjw={uV0AJtlZQ>CYcO zn2lqgQ}d-|b6@xi!Qu~B{qfrrml3x><~e}Ed~a}%-r;wXl=bJT1;RRnz5)c&~5 z*cznnzS)59whDp=gT_oiD0kzPqtE%35uHT7_|oM+{F*EGv2}g`)&EQT#vcWr8BfhA z*dQXt#9a|bgy&AKeSW5TS~@%K7CXe22;2yixo*HU zbv!@sv@>4SS#&E*b4F(VT6;wFjK9aoOYup)1{xGQwr+z+nayC5zBeeSTf(<5_KI$s zAt)6AP#v1$&{oYk65^rn>Q11zV7#t=L7vRy;Z`$sfh=b` zkm}YzjjxhumWW7-dsBx>qshMv9N7niv_Du>OiM2Zq!ozMtG~&+s0GpEet~$M;v6rm z_qKu|WPl$6!q<1f5Utt(&%MX@Om4M638b^Mv@Z>xyDnhH4l-5~bm}m|>nwPWOFaJF z@FCXN&m`41%((CU2^qOGRk0Jv@#Lxb>k(xstNs?WSv~wxF)|ycs>ceF2lyE5antK2 zp!!)c3>XQJ8gSJQ6*rrfKHa>Xkx_!gkG?}H#!9sIFFc2K>QiQna*Ilx$Pr5i=RhVd z5Of2-%$+V|A5`agKp2v-O0C>a&uMwr_-*r<(}j`)_QZXLEQh%cGQ}HwmaZP5fES<3 z-fIy2X9P`P1okYYk}#J)=8cr#Nm6kdMFu;5tK5vrfCpBKtK8S&I1ux3H`~i)Cz2Ox zY{;QFO5t&)=EnQ)5|957P$}-kW+<|8pdFiv6|Ywejwu54%W+)=l0v4iqBQ>JU@w&L zw=PByMFHQQ>=Vugc|bZTJ4gUiu>00epmH{I1Nmk>yX-=IS(?K|`HscFD)mid$vSOc z7#|t!Oy4e>a**_DKAe*cgYjBUbvY=w|K{1I@P!p>#(Ow_fS4m{%AB3ZeIb`&u#w}J z(cU=h>62%HAo=`W-8#->)G=?aq2f!JJdal#Q+BaW99-LJbS7%t&##&y47xR&5XdAo z0Do653IFs+I2D%N*#7aPB|kZTek`tH+~(}NSG$vTb3*nsel5^2I_A_kdc_oDLr*>^ zjuCF@Zr#2zLKywMhcBv_MYTAx#6WvfW3Yg}`rwd;ujBjgYoZx$v{JIT{+qc#J+8?G zd!b^twTN8H2yXQd#i~NpBt+JrN~#fKhAvE0A$TP>!wByP`nydT!AS3q+D>*n|^$6YApqntzSE>D(qZ86<4cPf^@Wmad`M~5$iNQ={5Wf)a z>FyYZd{;|A%HMl5egckmFjL-&OO{@gE5vHcMt}2p%%cU0rX?0H-3zC5&3Ny2t zeoSTu=od!X`Wr@>VIlw0V#QXZ3S%`gw|M_KJHS0BXxe#Lq&QX?PVbf#z;Ui06hUB}(J|iJ+ zwG6GTl!lbTQm}51&ET^52FX)X2ObxS^ot69?hwm#ao}nbCfBdz%A9Q+u>?Bj@G-xv zFEmC1gqI1ekrzlya z4C?bHauOhuPHw$=R*a6xW>zj|3(sBNxJSY``AA76X%E4DD37S&Zq-$xhu+I)W7E} znW6lgNej!`vk6Mp{p(}_@l)xAjF#T0NpX;0O}d3M%lBT6D_e`Au>&j0Bf4)uH1y42 z-^L`X=A$rW7m&!mY7L;@r*gZ7N8WK6iQ`Y>M_3=dUX2ra2v&%kZQR^Mr^>E0BQ4>m zU6D8fgt6<%pK7i%rknCiM8r*h(EXK7$&Ft-VqALf13_wvLEtv`{@Z&;6LSr}q@B5P(cK&5-pE|MlVI}bF zvbPh@1PEh}3@y8M3wHV#EAPjho)K76I+lj&An849!F# ztK2K&}O&}F|#pYWN4SeHhSa~I56(0t@4UDLl_0EO!-M(ZbI+2d*@nzf1D(t47kUMRn} zjRL1}k*OI;jV;%~(EOL!^-x{Wh@@%^q4*=ipfg8@vXp;Ga^KWy?(8F9?P>JP)FXU97q-Du?1Sd$?tifdaRBvyjJ=0;Wss7eB3bo zvaTjQ6v&7-8U!KT1gfFy!lU-sr}MML*+*`g)g-BG38tqTX}l~&`k-scq>`&~p7o10 zVYYVm=H4t>hdNKn>0^PT1B18@k3fHkqxk_fOEy&A=IxKJ)tJ@W(tWIq(QU}%1H-@f zumr|Iv$nLB`rJ>?%aXLcn^&hzhx^i2Q6xbiu4q(qe}r?A-vzM+hu^wGvvw}jT?yv3 zk(}hH-G6<3xQUYyvQZMm@9>h2F+aEH=dTPWg-%NH5|(P`!AqH91UX@9@86F@MJ4aN z{8jRFP2P0kjN@%7Y&UyA2>v}=buq$9{d4U>2S;EbXW?C=5o-PqeXaBT&PSVnuifpO zIE*S5nJ`aT7I43{uT1a^_gR-%GC8?fAlXzpWaA_-HWuKgaZ!(rG(pj2Rxrx^{TxJ~ z3AxrrO`IBn^;-juoAQlz_6hr?3U{;EE}($q)V5m4elN;=s)cMUJ!)l~d(>8vURY`^ zCTvhg#*sg-UA=oW0s?;Fu!WHRne$(ttA92d0|M5}rW5bw%O*_wCp5_wa27v$w_QV~ z8KJDd&unUA0g82$vJ5`{i&^?4M^4j;jiz&rrzhhlqtOB1sdn{_0eL83md7NtEU%pG z8xEk`JBsOZrQB)&JzV!k&jFUh;UfGe2UC)SRLPVJ3G$OEl;INZcHhxRDONvtS6<;b zzqZsGiCKKFRIkn+KB&y_0W1@LaCp=p9X1wYBq708^rFy5w0`0!%V%din26u(bI9T; z;kOn0p`>f#>s=lETW=S?i-eWB3S65FxC!)o|NlFEUlK3Gtc zosyq#)fMA;FqLpxo`Zgv2DUcQU#9dDEk;}m(lH^u8`jq|svD}1rujfX{Onxg9uWPEPgk&Naxi3|#Gz>H zlp6_&ht*5+z+v4mCyX+m+^I(F9f((CbY*YX$|j_Ol5O=BsXRiyKXqAMv04W#WZcTr z)(+)(lXL$jvb_^+kI`USD6w=uA>%4V;2rGD)Y0^n?Iqaxwy(!IN^tDW?lT{EcQ+u8 z@dKq13T5v}@e;Bhxvw6x4x0ORfjTFNI+ASl2XC)R)=SoTnZ&cGeLk)rv@6S;-<&Zo zw_UqSjK(SO1Nb4#+$h7FUd1V9v3*w}Zs=UMekhhYXpU?8H#cD#cI47KEhZObNpZQI>Q*{O@*;2q(1Y8q|Ehv2*NX`2Yp2R6?6F1}UxxZ1kt z=SXeJM`IIz|G+c>X}UnWvojRGhTZQmC040fNSNgR~kE#>jNZ_MyZ z)K8nc73iOrSAdZ#D<5|f8QIgEZA`d2YD`)AxSypYlp-1s5tjLi1Q1N?)C;4l4P>>C zfwC%hQF|lgnhJmCY|&Y@VEVQRguK)%ZW7xHyRyE?S&&2fv?k4RIh-%P0U_W14iE{G zWVQzg{FRUT=p#1+4}|I4T(6Ve*a#V<*81&+`i37%@1Rqani`kA4$G9sO5=LOrKr{H z<@0LnF1{BnY%RaN`u`=BpB@=May9?D25PNSpyAqyyO@JE_NZt^2!Q{bShp-9fmg*~ z#-hQD4S(AWDL+fkT4WldgAxyk+pdphPBF+<>JMqP_4%U@7|_?`isdOY9K?OzoEuN) zYKfr9pC*;%J+j~OA7zFz{3^@S9nykA#~E?&_D5v81h)JLCGHXTTlQ#?6fW=H-WH2nboJ6!hPa;~C+l`vt8~!Z+gFH%Q0`Iq;h7 z{A2s&-nz?&-eI2v(itS(eREPbVqnxM`3nSKJ%QY;u2DPghX$a?{>Hxtk{_wOu}a}L zu?LecryjeR#9uqiAWV&PgKqs!F2wL!S{Ag%(MI7neZu#mVgpm@08jb|3N{fe|gUO%is|Vc`RW!)1+>F+j zU<;#WQr#QOE1#bYGj<-KY!KvVHQ3k%a%bH5*&^WfI%|3jV1(h?(k@Z;q zh>{yfCD4?*V&w&9t_5U~`;^Tp3=#x(e+X8s|R9H>?{V2pVU=O_xU67x}o`r+FrKMs<>>&LXRi(IW;|$dt2o z{6J7O10#__?`sslrB+I*IcS&;4x55m#4s_?dDuLXe49rUrMja%%)qZUa7>SV+$q}ag1vQjBdik+F&XMwsPTn7<-lt6Q)1H!RJ9hfq@^+%nc zsq2+`Dhm{&MskxAl8mc}R?x@D(3RKG*3s40L576(5`Bgd31GQ+o5Z&{bzkell~%Nw z=tRrM@@>YwV%@*0T9z@D{-cDvO-}syiKU31`Kv$EhOs^T$Po?n_Lk?+c~Nd==3BUl zVumbCs%}89tHq@&s-dPfAtS@by1GUQAW*OX zsk$44pe$;Q4-td^({x>Z=)tM(h)3{h^v33G2*H@(fE{Q75-1k?p!)Q&{M+JA*HB;f zV@wsepJb8vb|4KV%lapKITcj(O-O=R)^DVp&!nOQ();I-QTrn05wvX3xnX*i7}gxX zMs>SGIep#xt53cuFG||KT?}uM%*3$hD;F!t^~>hHb;!(*NG0IeC|H*LP9jwQ3TZvP z&q?tK%k$v68}9;c*_Od*l(S&en|_@KSw78b7?~4}oYT>3Z_VMcjpeLbI>^0YJXuzz z2Ij}MtO{(-So4G>Q)s<3V7bQ8pvsuCHQrY!;9QKC+GBB&sND|=ep+TybL#kb@m?COF}%)vT?_1Xe%i8 zb^nDfq%!KrfydF1zr2@7fc9x+ex>EW+8coJ--2RG9Oq3?C1U>BBtcW3pl$BU!?=t< zE>gRb22(Gl(4@Cv%K${lpex*~JvP{RCRhgWm75KM%QU*`v&bbDU(m!(gXtwo5(fbx zI^5NMJMJ1-Q@0?{_}Um_B>G9ql~F+<5X6VI@u1ATb6bW+%J526j(+Cst}|cEefo7z z2}h|01ZMxBZ>InY#aN|fF>A}mdzhpQ{RJ}4WZ7s%ZF+H{0{rOFgymMQB@U(y;X~~9&79GuTorIGI?hEiuM%9 zfei`FAQ-FwxEAviSLyuj%;M-0dZ1WeG1SH(8tB)3Ur>4@Xq+Ap?mk8YHKSsnnR0D) z4BIW}6PUqW-*490r$0R7>DL=1U#tUHpcS_1>8DX%=bABI+qQVxWj3JSbvkQGeEjT# zx|^lY?dT|~gr=1}MWD}QK|)|lB~0a1_#(1@?w>e6OYm3V6wPfpG9=Eq=AAaKZJHN4 z>u0n64{V^~8&|YhyN|9pJ}72GN2zpXG8H3%^#i9t@;=|Qz9mi|HS4ru1KFeRZJfOE zV3}3|&ijv;-W$dbRJY9oKPjLUJ;8y*zJ_(P2JT}OQW=Y%h0A>z-EySGw`O_1Rc#-+(+Vy}KR>p*#RdpNH3KRAc?_3%fs3p=RIvg8n(l$SE ziYS&!fz=iIh!h6Gjh4(8F9m;8brP_qq2EReX=~1XuT>090>&6HM1)D9L0x|#BP#a% zPp`{)_k*k}lQ1CF{bhwxd;%Nfx!(dVkHfpQMgDkBdxMSVX#j2k)A5*2&)YH}KHTdt z51l<=K=H>9Yv|a|IMo!n#3Dw-b(+hJ}7bU&$KN?f>Eh9Lz67{ zi)GA1D>D(C{{HXpVk|E}paI)rb3F#hkfl^+!eLJ8$kwh8GWR0&V8?cp z%Tqlxk)eN-RP`;rLbZsciH8@jG(mKq4TK z@xPWmFqwv`GG{U$pUK(f6x5FsR23h^S>#zNJSkzJr#Cq%K0B-5>|J65*QqCPC|CH6T?AL3L%_u&Tt|Y2Xx!0Dg$Qbp|n?l6NB-)`xaF zDo9VSAFa0rDS^NMM#(q#K5AV|ToV1B>j*A0=Q!;1)?|01?GFyGEZ}qHIM7$RJWG1Uu7qfKO9T!CGgOm$7Qg7@fpqfnMkMGBE0)(-4 z-I_?=y2v)YBz=GH+wRTReXGn(z}(s!{^%k|t=ByW_2W)kW1SG!jTXNc8w!F{yTKO*4izQo-i?wfP{F6QZ#NtL-puV#_w zs}1jVa8ADmw%NXg*0MGNY7@R;%lbBLF+%_%_>(f#E|G8{{`wQ_l&b|~^7`Qz|5+KU zgtOP|LHcwP1H2xXyF54*{JD1<)NvWyp_&qGx2h;(R$~IHZ)ko0BWK!E^?9<|dDdT3 z#eMah+>@vwyg%yXK!sUJ4ksxRK0=28|rFG5l5w5_XR_MQ4@0H1H(F_wQa|F_) zDE*9EvOyUnQLr};c9zz2zCW96x8+oiXzuO;!`skuA0@ELCrtH^%quMvgY$^*dx^sL zWZFiQ8xf5U6Lbn*0JAhlRqI@hbz!Z+T2EA?lD^(`tLODVZ$fX%Csq-){apr!Dt-x5 zZ*H_UT{tT3>*VlvVu4;XHy^N~SO7ZfC)D1E>bj)UUaGGY@=tr&Qegd@R7}hfY~22@ zR;UBxY)#1?yO5!>KRziI%AApqIj!TS?fH(hs5pd5Ep zcy-NR1)*!K?-c<056Zf4d4EY)-+^vPw05Y?Jt*Dx;n;HJr#mA7*0-CWy>TWaqV>P01h42mkQF_=7>BtWK<3|bz?T*rQsF<0L( zSiW71pAUrB9*?B3ed8_p%JCz*)HM9qJ|Ok@gBsB157&qLGw1%C7kCgQ>pppVb$PQz=2mNAQo}z!?1DMwvSR z@#N&N;UiBl;Y^laYYDOe-H?~>=NeR+eXWv|sLHREUY)LLnoszo2$)HP=r>M6m)WG& zEd9|N*2LFcHvS%EnmSgHb3J9)A)}Gc&fOjd8%rA??G)6)M5@6A|gkOVx8NZ{~ho9E=|wk22iwG6<%Pv)4$AXGO~P2zQEx-Z*a z{kaYPu;Mk0eQj=j?&G@}GS19GTWxD4!KRCxiWNR~H0Hn^>mPx#UD6QZk) z^0(^|>WtTH@d&h9MpbA~f0xxjRd*fPVI!=VrWtSQLJiz-__14VQP-qHYeEw1i4VO1 zjUW;g%}7it6jC+Q^9cR*F5iMScT3|$o7+~!EAGVC0IzDd71RV$*`wN%J8vU_R165f z+qYs5Yp$FqxLIK{AG>8&RdsAFnqZunYk1z@-M`Dnm70SF2^5;Q%gZi9eL831_)ZY0 zdIh~#e1KRVm7WC#5Nba(JpKgK7rBKPCS3BE%O=F(77MT0-uJr4hYr2W%I=(Bvwgi_ zKJJ%JY@6TB3M9aXi3^Ior`F z{Mfg7_S+L;Zgu5H(DUu>;lqiNkLx*mQw*5`3kUU|mdO%z5+A3x?H##s!W0T)($^_y z&bl-FZmPyWd#pFhJR~}X$Ra%6b8tDn-*Z%Zf;-m%PRYo?Y#n~B?mJYIr(Tr+tvH2K z2)_5xb?I|>0MljT+kOw5hzlSRey+v2)Xxv6=T0t1wWf7VV?rl5EYbwEK6zU}sStYe zyqoF?|H~Zj`14TZ^ZdgSsa?kt;EV)~yMaB5l|j*uR}UUnuj8ikYXqj*Z`K5lgYyu# ze%%OF20U&plP572+n66K7L{G0X#xrW9eWbNcMX4h-ZiX(2h$%IT*3Bqd4-ZPpMb}p2dj5-JXYlX8Wi&~C*iV@eJ zdIO0g6L>q&ctjwk-pxvZ-Y96r;YiwRfHNGsab>j&=05=$_DFg8oYk`BAcP6y+gk?7 zM{5z{diqrmezK*Gg}zo{u@*fW92-n41Vq7t$2EXaGf$uu%jSJbpF$neDofJyD|^RF zuQCQ8XUKrR_N6ZYlJgj|l)GVf1{#u3L`y2lvRVlLX<8%@$vT~Khqm)FLHW}>=yy+q z|LN|E?SFRd!b3F#zR}Xk?vi8!5F)_-5Sr2KeM`oFVxc^SH>Lq9i3b`%3U7b01_{+P zJ7;cfO#2Ern9}@2Jm}Uf@sn(kQ4IBW+L@mjEk=y0oA0pYyWX1BVg4tn9f{ zjs&I?V{(|NC**hq7|4OSLkJGaUtaP3QGKt#X0`M&BQurVa`j4M_AHVhppkU?Q>Pf` z$T-*3nunGpd3_cUQC1t;-om~2>5p74YTMeyvfIq6-mlZYoyYR=fFfHUwiFBuG;agPge)*@A%r{;?kme zKW|fR;)Xx)GJx7n0S;QT;pdf_?sI^olghsXKKn?P_f@)4KT6@?NZxspHTLPEFN46? z6DVJ@?rud2I_@hl1=bxtis+&A)Ey*MN;&tS?u%g zpDyRWWGn?G;c6U#mQ%(sE$HQD6fb=XrU4phR=HDl*~#|q`9o-S#)OP{fhNw&yJBMd zM&nghV+KHOa`&!Z!eutK7>Woub4^hnQ{NkPJf1jBJjs{!Hy*3ZV6O1@jCbwTmG_ry zp;(#;_%r>wFb0WSC5O0dOS&CM!*ZsL*2jZ6xPyaLT*iA-Qw@Pqe0Y9l++1+&eiH%^ zG#euMh^2R+Lwv)aJ)OkvMB$4U*Pw<@(z;b8^7@^iU{bWl?w4e9$TH7(Tme4!OjU!!vJ%ws=3sQa5v{i*qF)UC(B7H+RmfSa zq^h;OR%$ApSG!5zB!zz`2$+r~N6`~2yz0uy7L23z`fCR zzM#MQ*#lHQU@p}L3y?dJ%D`}4b=cnJg_>li<|e*3Y5ouih5FJcJv1T1Hmv4K3iuWr za~t-Di~iZ)G;TsZj`^Q@WXWI(-X+P?Tc-@VTwVV|@hK|ql$+pH^-p6zi+P=>yqE{I_Rz2LDczUK5?uqTy_U#v~0i5;Qk;3-V#f@cDlsR&t#L|(6&^TE1Qq`a;`cGy4i7E8^J<3>;dyk-- ze`){R_Bgbud6!PT)z3IW?c$VMhA%LA)>Z_L!-3uqI9|pFq-jOT8kV>d;1dCE5s@|a z>LqL!+xMr2es%N)oqVt;W#$su5UdY){szXpR5-H=i|hw73YRb zWk19WjYsSiXlgipCUgCXNQmJ)*%W|lyO_Kec^h!l+X+nAN-xshKjpeKVi#q%{;AW$ zs4ad#g19A(X#Up=0QP$xsHcJUtLN!uzw9okauqUMJ3Z>7t5zg{g;%(@aEo zewMK1kegrL3VJh@`xi#-S8$)3K1R2|VMPMuKL5ct^e!KZX^A0P0M#}>9Ql&BUJMTX z1x;r(4CB*vJ_Xo5POd`_1r{*t+GN7EC2**;*HPfHK|$~=hrxN@$tV@<+`ZR=k;6lM zWxZlM{SD0-I3EDHqvU=HM2Wr^D1kL5@+CYQ=B%UfC07T7#i_?EVXlgMEtc!6=PPmv z39eg@DW)G$%qMrc8g#zdPI>S0lGpmsFMIZVmH(prM*Dj9pP&~wr{27wf(;f4Sj*QB zmf6pN=Mm|eMQ&ceA3xh0(#Ae;?RcoVnx89cxEj^`N+qdLUVi`_gSmfMF@So?kupP$ zek1Ft{!&MNvg}(+>atlPRIOzimAJU%eU*nhGOT9Od4H|S$J9eBRq>fvp6=R2ChX$u zuQQ}`DgB*J*Z8l~!ap}>M^)oEsAXN(ivmvQYvST(CHW5+VIwUL@jwapVy3}uenX|_N;cJ zp9o}KS+pOrM5rXe$Af=bFml^W_0o~Wv}fL7uH>;j9DP`)NX%`VMH)2jJ5bJIW2;JD zm|r2xl&sEeuYj|KA_J^mjw89Ksy5;$HAFadRx)ewWTk68Zi4kwnY-E&wvvOFaIfh>(>i}!WRZ&%p!UMl(z3W3P^%Jht#JIrf!IOczyQ# zlKy+rduv)V*Is5sRfm2n)=wpaPEm_cdD*Q zW`}hh4A;l!XsO38mOW2Dp0Xy@;0udjXgB$KKb=-V8d(M04f#ViASdI+>UKw9yQP_w zoGI>MkuoEyZGJYLYQ|G&f-_L5<8q50vu95kIln4%P-iO{rw~5b`lz zlZ>$&R-wG}a636UqJ2!2tgNo_os+d|M9%4rJ(r2r);mE!oqB9kbVClwG6Fe3c`Y;- zM92yQw}dWD4rq)Z4?ZX8b(XrG7?K@5ewKD81Mpw-DUE#Q5@5w$Q)e0UMNt`HTADJr zHnSJ$Xj{HM0;f~Xl%j{4kvNyz=`>(L!SoKcP~wG~v&dH3K7o9}V-rz)#@7m(6D%VE zZhWl5oRe8OK?N{rsD~@EN-|;a+_BL&r=;B3T|u80vo#clDT^PluN5umN=fGcw=Cep3~DfUql}A6;w?7;v5z{x1B2x4w@eyS$}VmG-z}S z_o}se={TRML2E_Y-+2~Q0oS+i&#Uqa=SiGA7U1y1N4$Usm$pEgbDa@zSoZF1yeCKN zxVP3YBC#^OYPT*W%V`%aIsSBo9j4|RxAb1k72_ra@(FPe5UP;Vs28BddnYMbuurQA z3VsCrhKh19h30JH58D2To*v`%q%C1qDgV6IZ3L?sIO``qu*lfGDQ5!e7)ZcaBMBD^ z=UlS?b%VB^L&gEgh%T$WJs!)?&5mrkPV?$QGkcO}8-c{&M=O->@44Ef|Dm%r{XKZy z{I);%vXq_IBiBPQc91P+sbu?Jr`cWkUEr!7n>m#}9FPTOA)9|!FNI>H%<*szHxg75 zUuo1&{x}{3TA@e~;|LC^-g~lB`lixoB-flk{00tQCS__@dCHL+;M09kKv@M@9H^|% z9E}v=a-UiGk;Z;ygH}DWl0WHp0XMc;A9ZdkX_Yh0D|0VRu#}Z9^pO0J4jZ~??&9W> z_c`}l8Ccm>z&yce(u1SPWuS7ZgczA^sAZBboIUP&L({!xDtkdEZGgWh*`b$lxj6N} zLR5Vibm6>F@z>Bz=@*g|y4$suXCO(n(~n@i{E_Wf&v0~%%^4mhv5ZzP7^+j>WJiqd zBj!7|C#hzpda&^4rOt=f_(s^JIHnesQ%Wu{7#wb15Rb7oF zB+XL;Sb9MdxKo#rPnI$P9Cp{I`}`H0O&6?@8567=Ljmuxx>a;bbV&EmcqwS~%2qBa z3plkhNV7FI8FaaTA1IQgy6`7<|99h6?*_HUQAV+R8wa^Qc}oG{LlROh7ad=UxD>koL(SZ`aOFSaI~Bu<#NYb7mUrpEo>x zTyNy8=CH{+R>N4IGR$H=T|)13HX!yfmW?-Tg!xo{e#iQ=iJ)se}i9p{65Z7 zB@Rk2n)X*!ry%$+y-s0m`no;u-u@`%ZI`tkQsP6JuMGQfsRF&dj!EkV;_})1Yvi8N zawhIofFsY7iPhHjmgr(LweFG!QC%((wjy(` zPCEv+ZN|2v*2f3lom4>OP+f1)#Q11zl`MXxjnCK(+5;^q@&EAkmr+@D-`g;J5>ir9 zQqmntNO!1!v~bcPDIH3QbV+xYbc1xKv~+iOcgMT%`u*?c+dBrIbjWb6z1N(_JYsEn zNw4zEOSjuwxy}UvT6%s~hW3pQ`-{pr*tWB9f16pa8uu49<^{-i4x29B3b^DRY(Cs} zl*k}q$k`SDa=2`W_$iJI}{DY_AHp>pR%)2wR?Wqh-ewtiMEMa zye=~XK~JZu0xn*vFChDA|9HN=BNLd2Z~ll7;&OKTm|VHi8nylD{m>s~1{wzNL&q&4 zUiEl$DbgyUz^VYPCNK6xfAz2x=|`&8$1ChNrO#myj@`zzs5DzhLF0{TmpC2_w-EW_ z>ti~=AlSv{=0E7f_<84ghf4^(F0_*y$Fx54Dk^?mDj6Zd7Gz)+JbM}X#^6?&th>AS zDIRBnv=cuOvR%acHWOnHkZ(r1ubdp%|02QDG}^)a{g)$|`H^W0&StAdL1Euu|FFHf zT};ESWBE83ZlCTOIe2n@llI21OKWmC{^Qaw9?KmL6l9W}njbxW>SEc-Ng%->b1A@l z!ew4~k5dF^7jPOPYhjVI&m+4z(xg>XSui>|8b3Z$TExD&UopB`!Co}OJ5$X@VRZOr@$gciq)!Jf zh0_t-A)kQ(hOGP4o#agc|83E4 zO`ld>7C{dIsNm>l4sUPoqd13$na+S9y4o4@Y7C(=U2Wx0fhC-b@Sm$p1+IlkS<5_0 z4IR|Y-582Ul$)a|yPc^60%#gk8oefn=4C8mh(<;KsIrUcUCsFfTvdL>opR;ymy}7h zASIls-Xxte$Mya_tMoWwQlW1%27!Nc)^- z$#tA8VSC}IhF7HQi-n${V5Jqerr`9Mo#J74D>~X=RL24;G2wLa*uDthPJIn+7(^|vZ&S)V<$8OWNlyEPvP3I?6 zV1%!SN20II2DX?^*M7*AyhC6tmS4M38r0xZK0Q@g)Uwb~agL(GYp^qUHn4&l$T+A} zHyDraf-Du4KC}a|~hK zAKLtDsCS13{ge~_F!bZ^LXmcM#_`-=7Zf$tzl1C>=fOe@gCHHqU9Q!vFj^{x^z7wP z9|RdqN2^}<^x#RhJ4$;ZI++KRu9TOZd~FvXs)h1;x1a6N~;rlByIn`}fY{)>O90g+9I*JXw48l%SLv!p_v=vtG;AE?5@ zpAZp$7V(ne;em*7@e9pklg}RrO*t^EY#)^jHtqtA87CjlJ`)NJ+4*b3)8n9i;|ofW zS615CpW^K?c%|?6(3@7)@Jd6>_iuovn4cL!NGM>=RLEeCG&TfoEp!Fco@rv z1&Bt9mRzF)YTU#eEFEiD@KIqBxU9QU-MG*l#1Tdp13xxLBI*1c%zDUQCW(~!^ol$v zSkZtx1+F%26w+cLgBuY!$D5Oc_f^Bj=fu6)#pO1B28!i|$)-x3WnAMj(T0_|XhA>1 zHD}@_dna`;d}ETCp7t zf|t*NALRT_KdH<^;@ypM&9VPVLmurwN)+s(wo@f6t=r4b@EwoTu9dLR$a3FHu^ zP;1c$y^|O=GTa-IE8{0?Qz)+c>R(BQy$z*)m(@;r1^-n2i*-ACjcZW3+$cqx$d!#R zV+?x@(#Wh6aXrZ~>7iCE+JU1w+%S;95w@4qUCLZl!7jD9$T*EEptQdbK^C!#-sv`e z$4!p!y;m~PaIxB{va#c#IL)Icq`CIU#Rif({Bv%$Q%+! zMva5v;dX{Pl|-V@i3IsoSIW%Dx@;gu89a-XV>sY;gA1v-yA&cU$pZ`ApH6xXc|*b= zG9q{6o>@>^*0)a26My0Z8RGj=G!&)$rI)v`4!r=Kxr&a*5|m!BTLOt+b$a_nF`6_K z&sWkS1_$%=iDo_bU1V(7%A;%~k3lOdt&{TkK8SKT%=&KTnD$-!6dGYhBB06=fU@ zS+T+hKXzk;5KiP$sC3))^U4jr)~u?z&i7ql{$5lj+S+0t8T^A)`5iSxTCmo1qQT&V zE4r5KbX{u}=2vE}l2!(|6Y7`;#j8~p>33HzX}f}@<_aBtE{8fEbr<#XXCK~Zq5t-r zcgk=en!Y`^kT#i1ui$$rCbC{_>m6{30{MM!D(a5yDe`k*uG~%mg#__wP5O~&OoEzo z@&zL2n^wfkX(@=}w$YB)F$9|7v*Xu5mOrMs&*>kQTB%GPcqnEc|f9%H(OY;k*C zD2S;)wildP9eT=brSFA;ru`VBB_(kfqYyK>@Z*|U!W()~=-}ZLT*j*!OJuU)f&(<| z4n_aAT4k9{^x)d=Rk!Ja0vmU6vs<#Na1@PRKu$&F-L=Lo*c}kGbx@keTR)oRsS0yh z@jj6bhlPUT%iQZv_{<8g8x5;>-y|Ug>t<(WcOLP~9sT~pAmb3)FUXJeXA^$&XMda^ zB|`*rKtKpKl{JCq^6UlZtzs%JiR+yeL%*BIHI&v~i7@yM`Y(SSS*0@DWEjcvY!cyv z{5Z3)=ovRUBjIDN`sI=snfN8%V7qSJcc3``+JD!o3|=Uc{fHc5|f#5lsGDbIw$6R;UnAcz~iyg*N7d*Tg%vtBNjJetnLwkgdI)M z3Dp>Xef>QY@{2=Vn_tNjD;#^}UY5;Tl*MsMrzre+ts^n0xo$y)S^JKS zzUe^x;5Xu`4#e9G?^edJ`hN9ZD`OPTLmeh8=Z2wtSbPlzW=YzD5mGZ?kyES?iC+l$ zwON#0?2OeJ?jA}Fh4Jj26cDJlCg?Baiw|pjtuXbOrk;cn;|XTds<|k;BY%j(?qgQ{ zq^NXrQE<4FL}pYmyvut`rt689gmQ)`7`X~9jX0Y62lt$#$77=7td~S9dyKG4KTr?6 z_lAdYr;ng8%&9BgpO462LPf4??p^Njf21txwXntXpe04c0xP5|X14anMesrn8tz7* zP(Y@J8Kwk~S?R?6j{r?#Hm@lDZdDBm@frgpS4g||+n;1Y`iQ7h?3A4X$kNKRR2M%P zq@j*x+^#etedGh@=6ChxqI6;i{A@~-L%L<|dUxdctM5Wmg_&&IbS?*?rMcJnoee<) zx@3^6SE%AnEy+6fb|b`U`2oG4X%V@-+6wXu;SphA*VSXe@5_XDF8qS9P19{u$;;ss zWe!DZGaq+cBkeRZg&=Jx+Lv@Y+Ey(mJw)gc9b(^n zl%4nYC!)b#D>W1!mByNrA39{c&C8sk7TJ0R_LR&Fms}-JeP&`%^4Rn@;Q=cFqhJo5 z%6u(+gk|zp6soNf&(p^ z&&^=t<7Yq*l?!sgs}Ai#;fDO$**hqkVOv=x?4PHZ-P)m%B^QCHMiKkbEG7P|!);S& zTNj7!KH+jCV^KNc4Q7Cg)(orb$UF25%9BP$F#rQ6;GB1!I z0#^HP*|l!dbkFR>w`p^@FDDKKixSsE< zqUm4a|{%&r!N)l8cWD=p>6 z0Wt&Mc3u+-Wao9B%0`JzUsvU~^P2at$X?@}k3WX@Y2>M_2lR>jKb)aeLWaJHr6w$g zl5(~z6!@z5|4HN*{QK~Lx8K@g=P$|-$b$d}$K;H{*{NG6_F?;UUuChCM+6KxxGe$E zj{q^uNvRZU&?zO)=MNk(LkNy-cmwUuW(tX4IkxSkrtTAYWpAi>u|p$MWfUC-QG07rlT^Z%IU_Pn6oCV*q&Kr$ z(j4-k72`XCIV>I*8-Lq5J=8$lA+%_BL$^+;B_L>t%?Qd-nJGc7U#>6x`CAnKbbjNE z{mo+93+ZeYUf)4AK9ymU(pONEn&r=|KL*yWkkTop6~fIGl)fdAeO|jjf}EkN6V>fl z&D_4X3_U}e27g^lDubFDNkR+$ZTM?kZy#S+_lYml4r#?}rtDn&g`;}U(U3pCd%t8= zQzHJCO8WKb-$osl+P*LG@EF~>Bg6BFFDS#1xgF^M5@kN}Zg_L)8lDv1rpubgRAP2` zw>A1E<@WFA`=#Hr&w9;@V<$BMsY|iWWFy)@)YBzj=*6p%luxe%gebsliIu@SoaSOt~D;J=dmenoi=FMz4{Qc#Usxg z8OMkMX#{LY;cUc*4kghPVsR(ot~R?POk{Bc#s~yPjdxBMe4Tcp!X~B&!aa`HI$&ss zi29MM-|6^&UVvc|)F|X>$k6AiG*Ss}#ql$;+lkD)62g~Y{aKTq(vYy;%o}up3ysfr z*)p>M&V?)@&o5N3dya};K$ckNorzV$Ur?(gcKtQ-(M8Fi?#+TT@~XP}WKabQX|p=d z8QJMstF~>&IKysS*uO^P)CeYV9@L(WDTjpqsmU-qtT>+sKL+I) zM_+Vlc!$~iH+t=EhcK`6Zs#8#t~#1sks<8fHhW`x@vQV|%i-WB zFybD!-sg0MYMJAe8+U$KtmjSF`)$h1%${&-^jaQ^=B0}mvqsF&oM{}1pg>309k|}f zecsaHdvpm2W3l_}@PtU&wn-t~$7*VtkUqb4#zwr^GZS(fy)v_p$g#%<^uhP#Z#bLl zEU<_0y%bI6=> zWswEJvA)F=IpKeU0`YRS=%B0=;0B8q3h>Hf=A55Rx#F^P>z?Eu|FPiPsTNtea${LsjRLpi79%@Ne8DGse{(w@wE+usC?Hy#EzS;Tkj&@MT z%C25`c{?FAK$gMS->Rajp823v*OM3_uxE{}WMV8Un~&(dG{Wff!TSf&%^mkLO&=t* zP=ofYMUuT+%OTF9jL+*k$`3ut9e#n)(^4IK@Q|?fo^CEFZ5?F?$ycfA*1d4BE%7rG zhA18yUgJrK_=9WJN# z{!pqmQXKqH95r6`CfncR)%G!)?7~-K3yzSt=0e4#m(-(X)@#pVUl}nz(*G0J5N7gu z5p4!VZW1S@_;`#nxD}qqvfN{b@ccmNKwu;jk#wEG^S8u=d@R(EQ2RvKH%H7%vshFv zS;*e>AmX}@4zH(5-bHyKo4;BwIESCCCJ_F>?NRvq`*cUBsI+HSGdH1Y?KmOsJ`Qq) z-2zf5G`GO?E7#f$FBGAX+`0pWE~v@iTREEOg3%y|3 z>%R_iCM0ewHY+4FtxPKOd@+YRPb80Obz4|Qt*wB?191Sml`e}us_e5H|C|B45Au(7 z`Q^^fArJ#<8sZvx=4IQ?LeF_z3MwRsba)gwriU0kpH!nvOv7?-46;-){|IU_8LsZn3`s{}o?-&LIO9<&->l_h*5lp>MbEBMXLD+oXC${W?3q zT}Q@}YYcY5ga@3`lcXhcS7vYWo^UOwiRoW1VR+@7gsuJI(>0f)Kha83cwB_oL*$Xy zWy6J?pdn3Shcg|rFY3y?0r!_ORmgS5QJZ2z7?ik!XR@EmO?scHk!l9{sQ1BpbtBAt@O>`CtZ2k& z+%J1~z59p(i905crQRKjA6Q8VRiLqw=i*7(8SciX7A{F|-N?-&boaZTxBtqDHlC(wLhIRapWb(w~@ z-IWoQ2dqyYdu>YJ2U%A181pKaJK)fc0I=qeFb8Bv85Z`_FqCL<=OD=Z^tTd1iyO(=HJRV28lBNI#GGtnCkc(d$G&Pv^RMG={s}#GI{y&v53c(JKZU_t}wD20#-X3?9A`a@PutK z4^n?%Lc(yi-d-;$Az`d|>n{C|wsj=#L=ZLCYacj1ucYI77va&S>lwo**_BYAsHJX`9IOR=n#gqAC+P!;6@7S!CqDZX^3!2H<5+- zFh$AeGg$W}XTHX#F>RoHI{hXekPOj|RQ7rZaLgU^0`I`7{KUn*52m*|5AMyztue?l*KJ7u*l;l98aH*-<&AG=U2Gy8q5 zoZ?RF1h=sR0iq~KgwZkOelVgYsrGO!MM3Qka2(|5-SDI%3oG_ByWpGA%Znb|6Eva4 z-}a|sFMAK-;d9Eb-dc7Wu%w{@{zw+MAnM*n;6_ZUjB6>vxzl=$18D+6Hiq4%`))+n z=SzekceL`kT_v0_88y_p*@O`jkoIBr@7oG-852uPzuXy|zN8^(oLal;E}g$Uwdbx5 z1p^wt;`mz+^xz(HmHB!Sjj-#J;{DoZW?;h?UC&?rLmy9A_nG%USk~cIVH&cc}DD#7l>d{c8HvY%N-6@M^i*mccM}g?gnJeyh2`_DgfED9^6X=47bQhjVF4{L-(!{TALC@SzMe-SmQ*s1P~9OZX8Nz+70& zKq-1(=~7{`is5saTk;*BZ!b)|mfJuVEw+tF-3e8BJ-?(aDoTj$ubg@JWqZRom2A3H zWik^>=o70dj4;sug6SEdi_OE@TKYSoC#L8xN5@l0$h7}X`sjt9UHqE6%3R~?tceMlfG*##E`F>(SUc*U&j(`+}N^MOMF+`7Ha9eXidB4M@fDTRYQr@IMjT`VX0@%WUrlK-lyV5}zUpW_ z4`HM71!@hRB!=R}VUak_FT{-oa#-={u*)W?QTX~ea#OHZH-ovwU3O6RHm=SuPyG3vJW(05iBgVSq$O)lP^)-e2HmLl z+UlUu_gkalGat=HepOxD{Ov(n{(-QVxrH%2P&k2|D3Et~Zt8R${@Wj7byk|!qTdhhng zFc4&4)dsX2Z#mzNJN#Y*Pb`Q}wG<^P1tw8tJ77yksBpx*_Es0FC;GcmjfX<^FHVjj z(4n-%JjKtk@-^D(aNkLc6~KSSg2=0sbkCN6LZ&O~gJ>ffhEmfGe%sLV@MfZ0z7eW- z4ZB`zw&S4usC4w2x#0G~MF6f^SH%P_P4xvDl<4vFT}12@LvNx1K_0vw+X)sMyl(`q1s0xq)B(I#e>)0*p3A!x2;)`tt$2Z}U}H z==W6O)q4Awvg)Ml*2>eVd8TXbhod`Ro0WO{b$w+kPM}M-K-Z-(#0zJZ&AB=nCe+lU#`59;b$Xa4zgeU??v9$f}O%|f$@Mt}f1 z&ZY-ZNMPMRegHJ%j_ zsUn+v25y%Sg63lJ9$M?D@XfFWPz+CzsdOtGMG~i}j{Q*9=N!q1eKp%i3VH2-;jD%A z3@+RfpC8jUI!^t?)EdHh*(yMH2<+O@&iCh&$8=Zy|8XijN_YncrD}-25aI5Go&bRJ z{=Z?&+Vd`%&1V>EeFUaF?R@Rg`#np1+if8tApd4+17rjD<+8?s)P^sJ=*D%Y-+4x$ z5WbfyVV^|i@fQ9mxyI^%zjqtY`PTP+>DRvhDPpC2 zIwdQoVEoIi#j-l5&u*f@ci$dxXsp%VzP|Pqb~VLAxF4(EmF!ALJy){)q^v{(ZgX&) zk9gl`_c_9$qsyo?tJl%|y?7WgrE0OQn-fiN$p3g8-}^&S-QNVRZau86olo?TU{nHt zJI`piTS*FpSr;o5HL=c1>SqdNIp4}gaeZWqBwOl%-}co$#FZw##Z3^ygd7ojhAd$f zl=@{-B=A;SszY>v!^e@Ibrrn~`eaLS6?cZMR;%Sub&|Ot=hgTYLJd{gDd^rrq~gwJ z&5+QQRG^2*v#OpqRF0mD0LJP=O3(x)n(+3J!iMBUS%;~Y5quZH?{V@iNCLPH$uEaOwT`L`f z4%mpW4C&u@u5)q=*X~L`eU0rieqTCNaKgFH8iy+P{8JyCrx+EcAu7>86!ZKM()CA# zvDo?dut0mp=M$=z$x-Qp!bilM;4oAW1}~{FU_zMY@zffE4B8LouzsgA&BvIGigr94 zxB-7hISTZ}DV!8{5E$s`5a2kHC*IMo=3x`?o&C>V_dR(L^Xnn+{zK9)5vvx3R{w6{ zN8zqu`y`Zhc;+ea?r8d2xuJ_TF^qj`T0ijWHd6j?VDdk|==lGTa+?W_NDW`SE|^L` zNBcU$yBTkFj609>eX(FQBZnN~!bjc*#EDt|PuI!bAoh$#D+<*2xtbO11dgpV2b8~l z{T9{wn^zX)qTwxGZjN}9#rmDkfS*UbntlLTVgNYke4io5?y4_aw!cwa$BbKxPhwVUFR*J>ywuP$|em5Mbx%o3>Z6gu(7xB4H%lA~;IBp_S~LBxDBCwEpNPfKW1OXAR0_YAy#b zf8rEyVX`CFjSI@x;-*MjSBNyKd^L<0KU6;a#Syl^D9%!$V`xMVCW$W$gXBLcJ@hep z!D}V#+~{V8zhf%uaE{+s^Q&0F?Pfu+R`lU0+Yc_b0C*hKguYoNF|$E`_}1lGnifdo zrefIDz9h7~yqqd_``E4gQfN_T4FCIdNFc0!|LC3?9 zXozzH6&A8&6eIi}3IMz*HC~A-Xr&5jC$cO@cdL)&>uYQcv= z9;(IZn~M9E&C0+|2drO=#Xv&R(hXJq1xO%4W>Ur_VyMY~1Eu#z!&0}s;T3*Jw_3c*5ylaq&|sFb zs{-VWjFDDb40}BROmdwx4MKYUb&l0GH3~bKxsGS|WyZ^N4L!LrFor_x0*Cl{7Ke2^ zaKA5R(Wt$*^eCs80Jl)9_ygXER;AnIQSj}UDapc`OR2SFkY2G+vt`)IYSqK`R!6q+ zq|lNb+BJO}rLs&jFih@Y;4!HTJ5~DL12Nv$(i&{bH&rxpa&mr}@f4bWvS9FF5)b8D z@$Ljk9JnkA>aJqVWK|2OEQ`-D&;##%u)Uyvnd|5C=1?H=GKx7J7da!QnwA_!zZMu3 zl->^iJ6>D)wBJk8Dem`pU#(s+A_Ufco3|A+o@^3$lo*is{5$T`8und&6#=N?&&}<} zfQ&+ln|ueZ-)^i#B#w}uyvaiH13!8PNLvpqCr>GkhL|^QJd$!E0%Uw z=H=(DOzn+_ej%!WiBW}d&(;=m4#oBA;a0I1G?B%M4)-Vit<^LV7PaAJnU$X_t(~w8 zJq~~3-58@87?ERHUn@gp=QZ%v$!?0k)IS)VZ2X&`W|1L|^_#uTRTBLIejwmK^l*=& z=YZPvt5~&l{^4BtD^AW%qW^;(MAzF#Yj-S1g7)YdmZNO3Z1()6Yz=1_r93?!M0ehF zk3irY<$&`b=l>tFy6OtvnQET9?ZC;?FZnf9h%t42hPp!{w%q(>0hKg z-kgJncoP3v!ON>&$91Pe2j}iE*lgiykQRGoJ=a)3ordQ`@(`n_3D@Cz^|#b1 zhv{$aiPD<#iH~$@HQ`zDOn(1qo5=tsnca1r+Hd z9v()H2A?=6|2s$iMZuSNDjrJ1fS=HR!cV5OJ4TodCAoHp_&@Xe0?==kFMbwS`p}5) z*1Oub&mcpLRpe4dtFGkasHh#lax9PoC9Ac8P-x~2Wq)^`kc%pN3f-hawnwo>%I|71_beI=zfiN;M z^7g^uu_X9>4Y}Dy-p$6kWNEDBH(9nsT$mSFsKL6v-rl~0-qAu5jcH1afLCCfI|B2~;zJ;8riKgG7ctiYkE*&^FPA^_b<37o zi|6HHTIv)Sc1IY4f>qbFKk|-JOYVGd|8}M08dU&#l63Eeu?#vK0T1uqv@x?#kQ3t9 zS&|w_v24qq91yR$5liYaoo+$RB^UilXuJsABI}Y+f}N|=BtNQZ<8&JG_^xT{f9`>} zljNhSL59et!%{nf5q7L#F7+5o$M##9`8U-mN=)iiM)9LalWuk__O2I|W-}YR%+ov` zF%!v*d_sf)xGF70&6=sAMHvj4RdvM~iBc?KR2UL02Bg;)-)TE^m+Eugh6=@TBlw}l z6m;SjYCL0CEp3D6sc}k102U=39g5{r<>Yxuy(T~rMgz)tJ{%5smA>H>JlW2sBXtda zzQ5?Ce6*hJMn})u14Au-@`z=ST#&XI-|_kf9mg}Z3<9dk_s?%&%@wmD!iDZ)#5h#H zP%c2404=g2M9Zoa#yx7VQ|GTPnXzG2DMfwZ9Wg@^f&-A_jeU^^A7}l-$aT z4TjT=iTj1^-u_QUsecq~#bV~=+kdl_SM)Hn1Pob-!^EC1dk0z#aEiWdknn%h{q6CM zQOlVY@+DCNhD`DW2E<$5ZgE!Z9`Uj${W^WTFD|@`nAzLi7Lm1iw`vh>xxY1lYgj^5b83jFpUV2+TgIG1+ zpg&P7s65=b5^!i*%qw)R1~pctugJ}e($wn>dm2P5?f2)Lqp^d2{_K1W7t8uJy;_Y2 zOE7X7AL|=<8y(qNmy9YRy_T$QalJaEWl{y`u{}X~gt&h!^u!z+i^P2MVSc2zNB};9 zn}9LKEo((@CLNXE5}(k9NiByF83_U`Dbi13J3lf9LfMsVM>-cJ@f zJC6Qo-<}zn2Wh2r+jL-%#x2K9tG4>uVKA(&bLtNHya-y{;}t_JkY8Gju%xpk zL&Tno#5dpB{Gm1NrnY4Mk<A1eH!_?2#zRC?Lm!U&|P1J0E8|gFC8<*Pr zEM`e6vK~0zcP}ywvHGKTWfh|$HGRl>X9$?GC%I~lkArm{Tn|I5HrO*%EnkYA!YK~I z!3x*+Ks9H>ZOY@ndyQL+cN7#sFT4Vy0+W*)WN^ETN?gUYGEztcll4C0Aj93mW#p(T zJozmS#UskQ{+2=OO4flDz_#PZP986^pUs&bHb6fAGd%?j$FXAmRTe>-DDs3URQ>Fk zB+n1O+MUl6@TBrQhJ*%A?HQ!~s;ab5Y%&8Osc$`Xt}e6xsf+ve@$Y(UG-Bwv{wYAI zsmf{jl+xA|t38JN*P3b@CsD*WxO=ZEpkI0!-Wmdd(NFoqpny=IAU4LDy zgGCN2GWVQ5m}f}-MObpxXF?{;8hVw1aN?~WV+=$DVDIF#BX|{S6Hx#Wrh4%dlL@eO z)_P3-i-6&;9))-&3G~L8;*_y8%c#SQ5#Uz+CoE15r9Mdr&5^2^j976Atm=~1wW7sl z**8RLba;|h7zx2NlR5G((IwRKv@oqa}XeIaL84v=l12%(8(5R_nmT7|t<%W1Z5i~RBo zB9S72b&65&c0AZZR?Hh&wDWzVXMKk;*lM6HfLn{7Ln_|?1=4K&;Mhf1M?u}K12FQ| zNpYkhV#lh=unm!3D6GCgSS1Axik!oJG3Jf8#dtl+Nwo#(I0O*u2JLcJS(F|@GM*mE zH(#UDK?r8Eew2wC5|)%6?)(vlnazGn(sU9yAB+1gYxjNto*Tl__GIam%=$lRan52~ z7Np7c!3Tc=cck}W?$OWfFeuqKb@+ETe%P5`J~X^)nvOCMy{Hra>=!?xJm*{3o8S+gwfn9_IJd z%^oG?aFS%Ae+CJb_&Su-MI%tm6Z>bbMAhutl8p0W_x_8z2rujE`v4>iUmsr@+77?| zB%9*`_AIm_RFW=>Tjis#`Ncki0qsxkQ zAvXzT%r67M6wHsYC0Q)`%ly|ye)dh@yAw9Y2y?Rh644X+jJkzS{?VjP!tKVJiB%<% zNU3-fDd3y223&I!fV?VBYX-i=&Vj*$eIj|4@eRvvw7c>l&My`MoYmW+1;gA!#*wtQ zkH$g3oFCa?Xi4d3xqMVx+$ya6&hTUeRmf<%VA!5xr)U)mP;=wnlfHh-z#Eu^+P8Wd@ zdZxQqe&ym-6!hYongt+KW;qM;vtxSPUdG1oJZ`JRjc}iYkwKWGLd0IM6l%z5c1kzG zx}$~St`%vGD4dbtTW{z)1(X86=J_(1_Z{MB^)EpYGytPGI@IgEivxen+`!Q;wtpsZRDrYxoPoig= zAO#=-SKc5E0 z1;@(+BK(mBvqu@ES^;`rS;^G+yp7@&@2T4nfKU_U-XhLu$<52r#*WN zNh*Zu#v2s)R2Hu&FTX08K;>P)%Ikdwdeu zx-Ej=*R)z-_wspA41++_%tGO$G4S1s#716@+Wb7~M~~}I>)NlGu_0$KWg(DHRE9`% zI>eOQX&~Y8TS|=Iv)LP)kU&oMj?_dMWU7IGvTKQw^|`?MppR?}8~aBmpq_DB>_#M+ zhl>Y=@MXGM!fchz%+x!>2%A;>vKo8J1-GMcV=Y`sk55h+%ETNURqK^`9{g$&u*DiI z4>^X%^0$3HF;uJ!aL~h1+vgWr3Ejo&^q6Kw0Z#WRc<+%Z7xN=arzET zNbnG~|Mrdp^g)G6Gf6|mohL_!JQFOWe0Z$bgj`nBm7O-m?M!d9e3$fB84C)+1I;7M zV-*N09?oAN#1zmF#!J`j|Oh81%sVl*v)t~KrVftR_e^v-ktf84lCT)V`I zf&J~!wSLYBp5fAP3%AHm@w1phxEAYBNu3j}jY4Vd8{4j~kHwlA_@s$zwxgm7#ZT&8 z9~8?W6csPB$56W{@1snrOp{QE;XV=R;?|W?Mx84!-^eyZ#AmlW0>>3~Tc|l-t(NY* zbGrl#gxE8d5^cbr;HptsmvsGE`yaZM}UPRMM`U@Ulb;nmm^OGYJldqJ6G4wG}%ni^Yj% zn?m(ka4(pKTbtuDjxE**d75T`1|bhjmKm~)ImY4rruo7C+|2Awe?BcSQz6wvxs3wxLK7X_{Rjs4US@AjqcO}$y#Oom2`Z-_c3ku{b;8*$IKJ};` z^RP0Lc>q(8U9D7c7+?G=O|yJ{k*LagK@KzRvQ;5KG2An1et0X6l1a|FgHTZ1xlyn~ z1M;qrpAqe_?s75I3_+$XLz7^!Pin4wVss1xU3?fC1gD_z4*iKf2yp-EiQhK{a^Dr7 zw-3*6E4`sUyhTYV{Xat!c`cwJEbtV1G$OXcD8DT^ID~h`0R(Iw|DPcU9TM^bDjt;+ zMw+-f;_5@`@yQ#I@82_uUx2a$&!85|xKoDb7eVQQ7_$_0LGjUbv z(>k)8;XW98l1b==)OU22>+0U$_e`pse)9fDN>kzJ;40L}QGZJv9#xKhhfEC_k}rr? zt)nHN$!lkm8rm)CGERJ*5K=$b8KWPYZ1gO>| z?4-G0Yq~&cl|ZkGs{}VTC`EVy6MK#~84sP1gg4?JQ8AdM-%_jiB&cY}HoUGbhLgwn zLHHih?#+##VE+%8tbbfFaD1E12C%M$Kvjc`Qw1#y%&-5#y~iiasClQo^BXXEf0gTu zl4)Inr8wn5;?_FMExAkFy9hKcNVynjC6BN?@)ZvMXA?NwtLn?kpd{C%`KaMu-jDytSJnEpy4 z^XjLgXAn@aPiZv*2|YV$t@r0VxUx?}*j$6`C^KTDev=8>RyFsIPW236{$G~GAfSYFhqQDfT@nIHOLs|2w}6y% zcXuN#&A0Kr_x|~2nBfOAj%S~}_j=Y7Ys(ODSN%O$0JpkqVyWbl3yN|FxO(`3zeA;9 zRpm-9%G%m1Li=q^os6_o4n=O7K_E6GXw6ct=b}vTFROA{37mAWmX3gnFE;6qyZhbx z5Q1Rt~qd}#URd66Nt&!IkJop!I-Pt8JP%uaZY z1-27)#U*NXO4XKg9 z#B)Le%vq>J@G9Ny@U?C*Sczvq`py02c7}5!KiI0QWGH+fNqcINgBwts1#Bsa(S(E@ zlZJfa-8R4>8%Xpjvg z9sTyb9ILPWIbXp*q(dnw$!RWkBEui7Xs67i!Sn z$!9e3w=r?#14V8R5p5(|$!)I>6YV7yvdSz;_A^AzHLcBQ^Ho!J+n2|d{c3jejAIrw zF8>SsFp0YWc3DY!@6oaRu!Xi4U;h>3{?@@K!wDg04}zOU@yblU+hB*8S#piuU(uJ3T%G zIZe=gELCw(P!KkwUd6;%4##U4c%vb$_rVr&s7(R}`BR_SZ44$=K$5q^kzV(FBl6t| zB7{#tWLK_rfA@H+n!=IFp_u%0-1{-B;`ba${XZFqS^;KGVu*tB2ceV&3;lC{kv{cL zvr=CRZC3xYI5>K5ST2iYME4r8H_r<+tP3j7iJkddN?P-XaFx}PGm8(tP;GK2d1rWq ziv;Wcbsh!81E3)?==jb0?e;xlmBwr@clw8EhfXO(fp^bK;al3%H9JJ!L|VIvnccCf z9gh+6{a<$hY+}Ibi2<>+XD4tNEgO9aInXdIY<`4l_W6C#Qcf_)7l_9=Z@FuQC38yI z`S7O##KBzt&VpO9TIwH@q_=aR$zrnIHl_JWUTt;E962{;&T7=E(X3u|vdp1zN-adH zbzS=0LP|P~SwJ$^a_a&u&#rCWwS_^H^D{Rmw4bRe8kLTIQG@LD8@4zC(=y=0*4!-5 zl16keZs;{F69$thw(^^w1^{PL=I&rQwK(^+c{5H=TDUDFkFGa`LCtV0Wo`}014w?) zvuuEB5&WEJAM|s{UD+s2D!Udf)OqnKW#;}2!Z~ieR z!e(~J`lQMKhS7YjGlqkFvx{KLkoT3iREJ{~mixXF15-DkNw`~Fl~WG1r6J?D38%8S zj8x^e_m>*p8XbI@9(iM9Gj2fUP8Nrh!0`%J|qqs91wkm>vFr#sG zTyWsLh7i^(h(CCP16Z`#@*UO8v0;Uh$$Fq>_z?;RBognPDqs8A0>PVyhdVNd!O{J! z<3Bf{0`_^w`?HRb;qMSKDp)@$Rb1Q;;=WLWR3Vr?uZzshz9h@#B8j4vHX(AGhApdT(E)zJ&KS}0Am3Dfuy&Ez-be(qNs;VsJ ziD@9g3*uHyVs;sZFZ@?>jyuulS}TH=24CP6uS)Pul$cVL)k^Z+&%_5|DQuN{E+cB) z?l3b198@HzpfWuli^hIvKs{rHEn{|zV+Ih}Z`g#Qe{RK))a`D_Yp~|T?tZg*1;qoePfEohlvm5|>>muv5QkOXP$&HR^&!SBE^@?x%wCwSG(z!+;p z1{Pv6dkKt9_!oj4)+~DEaijFdSLM?q%3d}4TNG4%Sie)>Rjl};O|0BkyYzEJadkiI=T&D8_}vs z@#dp}lKFtsRpXCZ)Z1pmzx9kGK(325dawQ+&yQO*^<$-(*uQS0`COv1b_PEzr&C4p{uehyu1&A9S7_Gz?sRI z)%ZK_hL37*r#r;UQ#vlc3-b?Qd@ol%NO4s|H3UMI^=ftZ!qHX1e9sm}8Chm(ZAopb zTv<^`k#4KwQ7M&ZD9X#yromQPT~9+sEirZCBD#7%uiJwkmi3QCsY^Y|3_y`e$3jwpO$Hx2bQLig$Z<3GP;K*A!{=pG{eb`wZ9Bpq9bqe8d^RLIH;6H>(v50 z^DzTCl9X(T0pwt_c(3zx_vQs(*v*N5SNktHDJzG4%Np62+L0J6j{%4=?;1^KAUiqPPYg8r^MmdX6LaCWvD>C>-ls-2vrsUyF} z(ng^!<^ftR|L{LrF6oZN(iEE}@dhLkZzO!3R2bf%JG~4XQ?0nb*A1>SIgjPHQ$q1o z5(!I=F07){*bl$?xZmCv&}VdOG-C)i78uGaEc8ZB46L-E5tHQ?eb9e#peD_$+&qvAGi~NHYOI<3mAobizzhnt9 z@FIZ}^wMqN^YO-I3|emAuH3VAq%oxZ5{?JaYGT%dn%P^}<-z#)CnNyRY_HDYda=E& zQ#mrsAVT5S`|B5|RGSRF1W^3wH>kcg1aZ8^M@&NcIO7%x-C;reRAE4GeHLBkfBbLC4%#pwn)@6a_X|Eb_o$tu9v-<9)$FtUOt>RQF^kuhk_gSSS_n5d`I{;?6iLkH`)vZb(!h+#f?_*4@0>NO8 z`!81t5u^|hscV!_4AN6qy9Gi8oeEC+Hk!*;uHjM2%YnBBZ1t_$J1ai899m0Sl}YEM1`NWZ1zrP z$DAt#WAL({jQ{v9F>rBBUXHRbivE4J*%*P8a~l~emr$%}uxu+bS|-2ok&s{OWo}N~ zf}b*Gjs42#T=$yuYm)*S)DV^*#?nMFYgj z9D9t7^Mhy@oogMRBd8R6!nX{I8j|YhV4`|H1<`4fPEoBoMD^)-fk7VwZw-%K*V_Da zTYVM7KMiEFJa0jpSAr+}Xn))M$m)&I2oW-(|AKD}T**Q=ggtKV^y{=d}*hT!=aUoO5>`|+)Ay%Ru?`6=D0jM3CD9R9?S8-B%_OHDwx#^$Y3`i_ll@O!5do_Rc(OUW;#2#`t_wx0hg;;*)}Oy#(*m;t88eXOt`BV0!0~oy4eI4Mmo8>%7-he_a|J)Nyg# zL$UoRI+$1$6Vugqc$IzLs|YZ!@tRhZh^JgT7&BJ6?hFW=*bh)h|7Pt30DB7U^Xw3m zMUoI=rB8>_gG9U2qWR^eV`_Sjb8nSk;A|sIoq3zT7-p277df~7os9uJ1(3WJzsb2y zPYoJXY6n_^@=}JxC{{e85v9#A3g$bc!aVgeXeDrsM5KS|Fm*{2qECx^{g&}}s%;0o@?B_-6Jr-Rn!f8=7eE=c<_Kz_tlfke_z63ew1*%|@AR&Gndvgft( z1!23;Q{(Bu%)9-by8Rv=p~&Z{E35{-AVFVTuHw;%85IeF6&xAf9&Z#2yw}nN z>6W^Y6|_UuXWma;qFpxrd}CiZ#_D!53nErvY!n!s)okHx7n_6u`9IA}eB}rv1^gPs z(Tfx4?~O!Xg@1;Q@kd!qk%Epb;d9mmbFKg@e;-})ok`A&D=h>}2|(u_s)fN5h{~XDW1cob&SHT=$2a}i=xA}v>j&B=tosn?&LzfYB)8>ES zs$J8w`Zk}CR~PE#5}()`OY@i@A6<4ro30dv>Ifo9fzmG<^E|Sr1I#D72wG_FkGyVp z?Ofj%V$at~sW_1KbioL509L@fa@~K^#ANG#nS^1d_#Oss%E~=6N*((=a?IkzXs`lGb`QqLVzl>gk%Xe)TaB zrKHDl#unZgIx~Z@C=jrI&$Dmj${FYx^52$w0#}+|rVe0lhn_6{40Wb7o&S8o2(uyo z#Qfj`f}97u>4NOhqCnPg_8QV>6bbS%M%_pq<@ZU zbdKzQ^gy{5gl6L^jSY7FmV`tqZ#NmAXriT<}Ca-6fKPS6|>4FtusGIv!vG^+r zd8tgIP2@zYnB#^tY3ZC!yl#m(R+!2vp1MVq(OQCF^`R@&f@3w3rSpDCcBo)vW|2Ol zMPT=NJg+u}h!x{-LpPlI+F(JrYHK`Jt?GDd*L=R5I)F=rv#G(nv`X_ zR*QIBb0BezgYj-{P)V{|gLy|uuvj-c1-Au_{li}D7eQV>pY=2l3qf?c+*dKlwfiw* zYaK99Q9#1zJcGBjWir+u7o$EP;(Ldat+m@SD*3fidfD%Q!s3!eaH}|F`ihxeZrR5Z zNQe;}ak=qn3`;+3TcuBjTYQVJLP|@2aIsK3jz{BztCs)lx}_lXtcR43U(V+4h5FMB zjCv+MS?$0Xe;T70QLl7!0-dK5MqiN}LyLvAg9-Fw%wyOZ<|cxH97p3L``n$f~Y~@NymjD%|sRYGa<{=3i%_n9~$a z)FXi|j>7BBgb!H-Dlg2;*N#=MDP~|FN2PUC1F^Jbw|k`F|T!CAu%uK zuA)>X=0ys1Q-2xHQ)+(jxPNr@N!DWN8s@U$K3i<&9g(o$cbmr~VT6;kDW6dq;Z zl}Q`8^a{{LJORhq5DZRFJF^W!!Pr8?{9T38pX>UY7ZvR zz6+{Ld)$bOy~pjLjtsE*T79cCJbwMFc)PHUsq(r4PUehO`WFg?_HP2qr;Ki+Gz*rv zx?%8omaT+%O<@_C62X_h96fOz+!`(`umr9bilbb9>3M_u-jbBd*J`hxXu`_~mHc+U z%VF~3bV0;)noL)8S@}0GVg2t9@Rv0ycUGZ&36;I3{0i-ntt%jjh92;-Q!nT4+zk1B zUfG8i1tV&^2jMuVRVZip4rDSrsFe%o7XC0Szr8+E+KpWv%{we5kZ@|FqlfbOqMu~x z$dijD8`m!7O>IRL+;S=y8!VPo0T~Uwbodh6?My zZa%=Ry6uDq#RG4=hW4>xTcb6euibABQ(F@%J3vLd;?2rzJPbzj>r2}P@(;Gzi@x}P432TDspQd7zLEl`&{F8 z@wY~qq@6k$P5;UmS9(N*gYxAxl~sQ}*nYN&L4zC+n7YOJWWdv@9X1k7VE-D zUJa;AR7T@B_CH=T1QrjB52jt(C{h(?rZAeuA=GGJz$-HRusWI+G)1}?S@zBU8FVN= zfVU_*()_wWt+6CsLfOFg>Zg~JdQS>!zxdfYoLmh7jPtjiRNpfC#N~2ZDp>9aCfpDc z@D&LkE@jBgafxpkT*?SInK;P7dwEo)CEH^PbZpvx#X3}XyP^HZxGMD+SepL@A zmB#O}J>5R~EuKnHGn}OnnK%+;K<0@!fEu{Dyck>wZSQB8>LfZ|CyzjhTnZt3ue_f< z4j+o&yuylD`c}EWAEm>FrGcZX-*z(ot%B|!{?2jrmo-e-|4IynNtQ5}tJ&o!S;>yM zp%@~tpOyx_#;9k#5&??}Z_=NZXz2UW9**#3t6%}iF3u;%y>y$3We437L#p+vve|!SDAZA%q}2A1%4)Hc1*wI zY-+syUZLdj5pg-A`U{;1FCEwJ0nSdKL{QGPX`4*K@Vn*7MrRM?R+LrV8CE`!=pv>L z4?{5n@_*FD%AWOZx%JFa#>SBeF%K+)yml`21(7z@x}6@GL01IZ@=GZHzvaK*+SOj{ zqfzFX=nKF+PR)81Ae+-vXy4zUJjohV^U)=J<_$aGb3Tg{xBtBy!>owg{N9c5!`L+m zrTvnL2o$PJV)ZcF4}M9C&(%vEsv69yb#yZ}T9N}C1I1?R9>$?v6|9dt6O%4Tq3(Z> z7nY{kXJ&Tpi_RH&%Vft#em0gXEVohYyxpq+L=~E3)ygV>!0M+GQie= z-?@^-=^*)%DMVC+`@J5rXv+C=(fP^>`*ko%ix13q(O3ITJ-1C?Nxd0ZdNu-2%6)x2 zHb!_i0GI^nZ)r{g1B2EePvB&#^m6q{PWp(#0G6};m#aWL)x(F z*Bd1bCUk;Lz&IAzIyAg;}j4Fa7{z^sjoQC z337a!B_`-ocbSUFzAip$o40$StK~Iimov_3M~0N+=|{?aJvX~%w40`7r5KA^WAF`ZE`wqRoy9R{=(cdR&wSksifJFzyDV;UG#p8UXCer6Km|90sfEu z4pwAVUDZ<0gdrwhVonNV$l23KVaarb(-`|`?miU6se9k;NWlWP*# zidLx+tI103Z5?S(@BQ3lH_b!+4hW|Py%koith$MX1SRWupU@=X%l8|$Y0XqwCv