diff --git a/HelperTasks.yml b/HelperTasks.yml
index 45f488197..9764176bc 100644
--- a/HelperTasks.yml
+++ b/HelperTasks.yml
@@ -8,13 +8,73 @@ vars:
node_list_internal: "{{range $idx, $n := .nodes }}node{{add $n 1}},{{end}}"
node_identifiers: "{{ .node_list_internal | trimSuffix \",\" }}"
- solo_user_dir: "{{ env \"HOME\" }}/.solo"
+ solo_user_dir: "{{ .solo_home_override_dir | default (printf \"%s/.solo\" (env \"HOME\")) }}"
solo_cache_dir: "{{ .solo_user_dir }}/cache"
solo_logs_dir: "{{ .solo_user_dir }}/logs"
solo_keys_dir: "{{ .solo_cache_dir }}/keys"
solo_bin_dir: "{{ .solo_user_dir }}/bin"
+ run_build_file:
+ sh: (echo "/tmp/run-build-$(date +%Y%m%d%H%M%S)")
+ var_check_file:
+ sh: (echo "/tmp/var-check-$(date +%Y%m%d%H%M%S)")
+ minio_flag_file:
+ sh: (echo "/tmp/minio-flag-$(date +%Y%m%d%H%M%S)")
+
+ # TODO: test local build path
+ # TODO: make port forwards optional, doesn't work in Alex's multiple users on the same machine setup
+
+env:
+ SOLO_CLUSTER_SETUP_NAMESPACE: solo-setup
+ SOLO_CLUSTER_RELEASE_NAME: solo-cluster-setup
+ SOLO_CLUSTER_NAME: solo-cluster
+ MIRROR_RELEASE_NAME: mirror
tasks:
+ init:
+ cmds:
+ - task: "var:check"
+ - task: "run:build"
+
+ var:check:
+ silent: true
+ status:
+ - test -f {{ .var_check_file }}
+ requires:
+ vars:
+ - solo_user_dir
+ - solo_cache_dir
+ - solo_logs_dir
+ - solo_keys_dir
+ - solo_bin_dir
+ - nodes
+ - node_list_internal
+ - node_identifiers
+ - run_build_file
+ - SOLO_CHART_VERSION
+ - CONSENSUS_NODE_VERSION
+ - SOLO_NAMESPACE
+ - SOLO_CLUSTER_SETUP_NAMESPACE
+ - SOLO_CLUSTER_RELEASE_NAME
+ - SOLO_NETWORK_SIZE
+ - SOLO_CLUSTER_NAME
+ - MIRROR_RELEASE_NAME
+ cmds:
+ - echo "Checking variables..."
+ - echo "solo_user_dir={{ .solo_user_dir }}"
+ - echo "SOLO_HOME=${SOLO_HOME}"
+ - echo "SOLO_NETWORK_SIZE=${SOLO_NETWORK_SIZE}"
+ - echo "SOLO_CHART_VERSION=${SOLO_CHART_VERSION}"
+ - echo "CONSENSUS_NODE_VERSION=${CONSENSUS_NODE_VERSION}"
+ - echo "SOLO_NAMESPACE=${SOLO_NAMESPACE}"
+ - echo "nodes={{ .nodes }}"
+ - echo "node_identifiers={{ .node_identifiers }}"
+ - echo "VALUES_FLAG=${VALUES_FLAG}"
+ - echo "SETTINGS_FLAG=${SETTINGS_FLAG}"
+ - echo "LOG4J2_FLAG=${LOG4J2_FLAG}"
+ - echo "APPLICATION_PROPERTIES_FLAG=${APPLICATION_PROPERTIES_FLAG}"
+ - echo "LOCAL_BUILD_FLAG=${LOCAL_BUILD_FLAG}"
+ - touch {{ .var_check_file }}
+
readme:
silent: true
cmds:
@@ -31,12 +91,9 @@ tasks:
install:solo:
internal: true
- status:
- - command -v solo
cmds:
- - npm install -g @hashgraph/solo
- - cd ../..
- - npm link
+ - cd ..
+ - npm install
install:kubectl:darwin:
internal: true
@@ -61,6 +118,9 @@ tasks:
solo:init:
internal: true
+ silent: true
+ deps:
+ - task: "init"
status:
- test -f {{ .solo_bin_dir }}/helm
- test -f {{ .solo_cache_dir }}/profiles/custom-spec.yaml
@@ -72,8 +132,7 @@ tasks:
#- test "$(yq -r '.flags."node-ids"' < {{ .solo_user_dir }}/solo.yaml)" == "{{ .node_identifiers }}"
- test "$(jq -r '.flags."node-ids"' < {{ .solo_user_dir }}/solo.config)" == "{{ .node_identifiers }}"
cmds:
- - npm run build
- - solo init
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- init
solo:keys:
internal: true
@@ -85,47 +144,48 @@ tasks:
test -f {{ .solo_keys_dir }}/s-public-node${n}.pem
test -f {{ .solo_keys_dir }}/s-private-node${n}.pem
done
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo node keys --gossip-keys --tls-keys --node-aliases-unparsed {{.node_identifiers}}
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node keys --gossip-keys --tls-keys --node-aliases-unparsed {{.node_identifiers}} -q
solo:network:deploy:
internal: true
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}" --solo-chart-version "${SOLO_CHART_VERSION}"
- - solo node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}"
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}" --solo-chart-version "${SOLO_CHART_VERSION}" ${VALUES_FLAG} ${SETTINGS_FLAG} ${LOG4J2_FLAG} ${APPLICATION_PROPERTIES_FLAG} -q
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}" ${LOCAL_BUILD_FLAG} -q
solo:network:destroy:
internal: true
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo network destroy --namespace "${SOLO_NAMESPACE}" --delete-pvcs --delete-secrets --force
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network destroy --namespace "${SOLO_NAMESPACE}" --delete-pvcs --delete-secrets --force -q
solo:node:start:
internal: true
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} {{ .CLI_ARGS }}
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q {{ .CLI_ARGS }}
- kubectl port-forward -n "${SOLO_NAMESPACE}" svc/haproxy-node1-svc 50211:50211 &
- task: "sleep_after_port_forward"
solo:node:stop:
internal: true
ignore_error: true
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo node stop --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} {{ .CLI_ARGS }}
-
- solo:node:addresses:
- internal: true
- cmds:
- - kubectl get svc -n "${SOLO_NAMESPACE}" -l "solo.hedera.com/type=network-node-svc"
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node stop --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q {{ .CLI_ARGS }}
solo:relay:
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo relay deploy -n "${SOLO_NAMESPACE}" -i node1
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- relay deploy -n "${SOLO_NAMESPACE}" -i node1 -q
- echo "Enable port forwarding for Hedera JSON RPC Relay"
- kubectl port-forward -n "${SOLO_NAMESPACE}" svc/relay-node1-hedera-json-rpc-relay 7546:7546 &
- task: "sleep_after_port_forward"
@@ -133,9 +193,10 @@ tasks:
solo:destroy-relay:
status:
- helm list -n "${SOLO_NAMESPACE}" | grep -vqz relay-node1
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo relay destroy -n "${SOLO_NAMESPACE}" -i node1
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- relay destroy -n "${SOLO_NAMESPACE}" -i node1 -q
solo:cache:remove:
internal: true
@@ -163,11 +224,13 @@ tasks:
- kind get clusters | grep -q "${SOLO_CLUSTER_NAME}"
cmds:
- kind create cluster -n "${SOLO_CLUSTER_NAME}" --image "${KIND_IMAGE}"
+ - sleep 10 # wait for control plane to come up
cluster:setup:
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}"
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" -q
cluster:destroy:
cmds:
@@ -175,10 +238,81 @@ tasks:
clean:port-forward:
cmds:
- - pkill -f "kubectl port-forward -n {{ .SOLO_NAMESPACE }}" || true
+ - pkill -f "kubectl port-forward -n {{ .SOLO_NAMESPACE }}" | grep ${UID} || true
sleep_after_port_forward:
cmds:
# somehow without the sleep, when port-forward is the last command of a series of tasks, port-forward
# prematurely killed when task is exiting
- sleep 4
+
+ run:build:
+ silent: true
+ status:
+ - test -f {{ .run_build_file }}
+ cmds:
+ - npm run build
+ - touch {{ .run_build_file }}
+
+ solo:cluster:minio:
+ internal: true
+ silent: true
+ cmds:
+ - |
+ if ! kubectl get svc -l app.kubernetes.io/instance=minio-operator --all-namespaces --no-headers | grep -q . ; then
+ echo "No services found with label app.kubernetes.io/name=operator app.kubernetes.io/instance=minio-operator"
+ echo "--minio" > {{ .minio_flag_file }}
+ else
+ echo "--no-minio" > {{ .minio_flag_file }}
+ fi
+
+ solo:cluster:setup:
+ silent: true
+ deps:
+ - task: "init"
+ - task: "solo:cluster:minio"
+ status:
+ - helm list --all-namespaces | grep -qz "${SOLO_CLUSTER_RELEASE_NAME}"
+ cmds:
+ - |
+ export MINIO_FLAG=$(cat {{ .minio_flag_file }})
+ SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" ${MINIO_FLAG} -q
+
+ solo:node:addresses:
+ internal: true
+ cmds:
+ - kubectl get svc -n "${SOLO_NAMESPACE}" -l "solo.hedera.com/type=network-node-svc" --output=go-template-file={{ .ip_list_template_file }}
+
+ start:
+ desc: solo node start
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:node:start"
+
+ stop:
+ desc: solo node stop
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:node:stop"
+
+ show:ips:
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:node:addresses"
+
+ clean:cache:
+ desc: remove solo cache directory
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:cache:remove"
+
+ clean:logs:
+ desc: remove solo logs director
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:logs:remove"
diff --git a/Taskfile.yml b/Taskfile.yml
index b26afd878..b3da6b4d5 100644
--- a/Taskfile.yml
+++ b/Taskfile.yml
@@ -1,102 +1,88 @@
version: 3
includes:
- helper: ./HelperTasks.yml
-dotenv:
- - .env
+ helper:
+ taskfile: ./HelperTasks.yml
+ flatten: true
env:
- SOLO_CHART_VERSION: 0.34.0
- CONSENSUS_NODE_VERSION: v0.56.0
- SOLO_NAMESPACE: solo-e2e
- SOLO_CLUSTER_SETUP_NAMESPACE: solo-setup
- SOLO_CLUSTER_RELEASE_NAME: solo-cluster-setup
SOLO_NETWORK_SIZE: 2
- SOLO_CLUSTER_NAME: solo-cluster
- KIND_IMAGE: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
- MIRROR_RELEASE_NAME: mirror
-
+ SOLO_NAMESPACE: solo-e2e
+ SOLO_CHART_VERSION: 0.36.3
+ CONSENSUS_NODE_VERSION: v0.57.2
+vars:
+ ip_list_template_file: "{{.ROOT_DIR}}/list-external-ips.gotemplate"
tasks:
default:
- desc: install Solo, deploy the network, set it up, and start it
+ desc: install Solo, create a kind cluster, deploy the network, set it up, and start it
+ deps:
+ - task: "init"
cmds:
- - task: "helper:install:solo"
+ - task: "install:solo"
- task: "install"
- task: "start"
default-with-mirror:
desc: in addition to the defaults, also deploy the mirror node
+ deps:
+ - task: "init"
cmds:
- task: "default"
- task: "solo:mirror-node"
default-with-relay:
desc: in addition to default-with-mirror, deploy the JSON RPC relay
+ deps:
+ - task: "init"
cmds:
- task: "default"
- task: "solo:mirror-node"
- - task: "helper:solo:relay"
+ - task: "solo:relay"
install:
desc: create the cluster, solo init, solo cluster create, solo node keys, solo network deploy
+ deps:
+ - task: "init"
cmds:
- - task: "helper:cluster:create"
- - task: "helper:solo:init"
- - task: "helper:cluster:setup"
- - task: "helper:solo:keys"
- - task: "helper:solo:network:deploy"
-
- start:
- desc: solo node start
- cmds:
- - task: "helper:solo:node:start"
-
- stop:
- desc: solo node stop
- cmds:
- - task: "helper:solo:node:stop"
+ - task: "cluster:create"
+ - task: "solo:init"
+ - task: "cluster:setup"
+ - task: "solo:keys"
+ - task: "solo:network:deploy"
solo:mirror-node:
desc: solo mirror-node deploy with port forward on explorer
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo mirror-node deploy --namespace "${SOLO_NAMESPACE}"
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- mirror-node deploy --namespace "${SOLO_NAMESPACE}" -q
- echo "Enable port forwarding for Hedera Explorer & Mirror Node Network"
- kubectl port-forward -n "${SOLO_NAMESPACE}" svc/hedera-explorer 8080:80 &
- kubectl port-forward svc/mirror-grpc -n "${SOLO_NAMESPACE}" 5600:5600 &
- - task: "helper:sleep_after_port_forward"
+ - task: "sleep_after_port_forward"
solo:destroy-mirror-node:
desc: solo mirror-node destroy
status:
- helm list -n "${SOLO_NAMESPACE}" | grep -vqz "${MIRROR_RELEASE_NAME}"
+ deps:
+ - task: "init"
cmds:
- - npm run build
- - solo mirror-node destroy --namespace "${SOLO_NAMESPACE}" --force || true
+ - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- mirror-node destroy --namespace "${SOLO_NAMESPACE}" --force -q || true
destroy:
desc: destroy relay, mirror-node, and network
+ deps:
+ - task: "init"
cmds:
- - task: "helper:solo:node:stop"
- - task: "helper:solo:network:destroy"
- - task: "solo:destroy-mirror-node"
- - task: "helper:solo:destroy-relay"
- - task: "helper:cluster:destroy"
+ - task: "cluster:destroy"
clean:
desc: destroy, then remove cache directory, logs directory, config, and port forwards
+ deps:
+ - task: "init"
cmds:
- task: "destroy"
- task: "clean:cache"
- task: "clean:logs"
- - task: "helper:solo:config:remove"
- - task: "helper:clean:port-forward"
-
- clean:cache:
- desc: remove solo cache directory
- cmds:
- - task: "helper:solo:cache:remove"
-
- clean:logs:
- desc: remove solo logs director
- cmds:
- - task: "helper:solo:logs:remove"
+ - task: "solo:config:remove"
+ - task: "clean:port-forward"
diff --git a/examples/Taskfile.yml b/examples/Taskfile.yml
new file mode 100644
index 000000000..7985c607b
--- /dev/null
+++ b/examples/Taskfile.yml
@@ -0,0 +1,40 @@
+version: 3
+includes:
+ helper:
+ taskfile: ../HelperTasks.yml
+ flatten: true
+vars:
+ ip_list_template_file: "{{.ROOT_DIR}}/list-external-ips.gotemplate"
+
+tasks:
+ default:
+ deps:
+ - task: "init"
+ cmds:
+ - task: "install:kubectl:darwin"
+ - task: "install:kubectl:linux"
+ - task: "install:solo"
+ - task: "install"
+ - task: "start"
+
+ install:
+ desc: solo init, solo cluster create, solo node keys, solo network deploy
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:init"
+ - task: "solo:cluster:setup"
+ - task: "solo:keys"
+ - task: "solo:network:deploy"
+
+ destroy:
+ deps:
+ - task: "init"
+ cmds:
+ - task: "solo:network:destroy"
+
+ clean:
+ cmds:
+ - task: "destroy"
+ - task: "clean:cache"
+ - task: "clean:logs"
diff --git a/examples/custom-network-config/Taskfile.yml b/examples/custom-network-config/Taskfile.yml
index 49e760e7a..607904b64 100644
--- a/examples/custom-network-config/Taskfile.yml
+++ b/examples/custom-network-config/Taskfile.yml
@@ -1,90 +1,17 @@
version: 3
includes:
- helper: ../../HelperTasks.yml
-dotenv:
- - .env
-
+ main:
+ taskfile: ../Taskfile.yml
+ flatten: true
env:
- SOLO_CHART_VERSION: 0.34.0
- CONSENSUS_NODE_VERSION: v0.56.0
- SOLO_NAMESPACE: solo-{{ env "USER" | replace "." "-" | trunc 63 | default "test" }}
- SOLO_CLUSTER_SETUP_NAMESPACE: solo-setup
- SOLO_CLUSTER_RELEASE_NAME: solo-cluster-setup
- SOLO_NETWORK_SIZE: 7
- SOLO_CLUSTER_NAME: solo-cluster
- MIRROR_RELEASE_NAME: mirror
-
-vars:
- solo_settings_file: "{{.ROOT_DIR}}/settings.txt"
- solo_values_file: "{{.ROOT_DIR}}/init-containers-values.yaml"
- ip_list_template_file: "{{.ROOT_DIR}}/list-external-ips.gotemplate"
- nodes:
- ref: until (env "SOLO_NETWORK_SIZE" | default .SOLO_NETWORK_SIZE | int)
- node_list_internal: "{{range $idx, $n := .nodes }}node{{add $n 1}},{{end}}"
- node_identifiers: "{{ .node_list_internal | trimSuffix \",\" }}"
- solo_user_dir: "{{ env \"HOME\" }}/.solo"
- solo_cache_dir: "{{ .solo_user_dir }}/cache"
- solo_logs_dir: "{{ .solo_user_dir }}/logs"
- solo_keys_dir: "{{ .solo_cache_dir }}/keys"
- solo_bin_dir: "{{ .solo_user_dir }}/bin"
-
-tasks:
- default:
- cmds:
- - task: "helper:install:kubectl:darwin"
- - task: "helper:install:kubectl:linux"
- - task: "helper:install:solo"
- - task: "install"
- - task: "start"
-
- install:
- cmds:
- - task: "helper:solo:init"
- - task: "helper:solo:keys"
- - task: "solo:network:deploy"
-
- start:
- cmds:
- - task: "helper:solo:node:start"
-
- stop:
- cmds:
- - task: "helper:solo:node:stop"
-
- show:ips:
- cmds:
- - task: "solo:node:addresses"
-
- destroy:
- cmds:
- - task: "helper:solo:node:stop"
- - task: "helper:solo:network:destroy"
-
- clean:
- cmds:
- - task: "destroy"
- - task: "clean:cache"
- - task: "clean:logs"
-
- clean:cache:
- cmds:
- - task: "helper:solo:cache:remove"
-
- clean:logs:
- cmds:
- - task: "helper:solo:logs:remove"
-
- # Do not use network:deploy from HelperTasks.yml since custom network need extra settings and values files
- solo:network:deploy:
- internal: true
- cmds:
- - npm run build
- - solo network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}" --solo-chart-version "${SOLO_CHART_VERSION}" --values-file {{ .solo_values_file }} --settings-txt {{ .solo_settings_file }}
- - solo node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}"
-
- # Do not use node:addresses from HelperTasks.yml since we need to use template file to get output the list of IPs
- solo:node:addresses:
- internal: true
- cmds:
- - kubectl get svc -n "${SOLO_NAMESPACE}" -l "solo.hedera.com/type=network-node-svc" --output=go-template-file={{ .ip_list_template_file }}
-
+ SOLO_NETWORK_SIZE: 10
+ SOLO_NAMESPACE: solo-alex-kuzmin-n4
+ SOLO_CHART_VERSION: 0.36.3
+ CONSENSUS_NODE_VERSION: v0.57.2
+ VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml"
+ SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt"
+ SOLO_HOME: "/Users/user/.solo-alex-kuzmin-n4"
+ LOG4J2_FLAG: "--log4j2-xml {{.USER_WORKING_DIR}}/log4j2.xml"
+ APPLICATION_PROPERTIES_FLAG: "--application-properties {{.USER_WORKING_DIR}}/application.properties"
+ # HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services"
+ # LOCAL_BUILD_FLAG: "--local-build \"{{.HEDERA_SERVICES_ROOT}}/hedera-node/data\""
diff --git a/examples/custom-network-config/application.properties b/examples/custom-network-config/application.properties
new file mode 100644
index 000000000..213d33929
--- /dev/null
+++ b/examples/custom-network-config/application.properties
@@ -0,0 +1,17 @@
+hedera.config.version=0
+ledger.id=0x01
+netty.mode=TEST
+contracts.chainId=298
+hedera.recordStream.logPeriod=1
+balances.exportPeriodSecs=400
+files.maxSizeKb=2048
+hedera.recordStream.compressFilesOnCreation=true
+balances.compressOnCreation=true
+contracts.maxNumWithHapiSigsAccess=0
+autoRenew.targetTypes=
+nodes.gossipFqdnRestricted=false
+hedera.profiles.active=TEST
+# TODO: this is a workaround until prepareUpgrade freeze will recalculate the weight prior to writing the config.txt
+staking.periodMins=1
+nodes.updateAccountIdAllowed=true
+blockStream.streamMode=RECORDS
diff --git a/examples/custom-network-config/init-containers-values.yaml b/examples/custom-network-config/init-containers-values.yaml
index 271872bcb..657aa388c 100644
--- a/examples/custom-network-config/init-containers-values.yaml
+++ b/examples/custom-network-config/init-containers-values.yaml
@@ -78,6 +78,36 @@ hedera:
limits:
cpu: 24
memory: 256Gi
+ - name: node8
+ accountId: 0.0.10
+ root:
+ resources:
+ requests:
+ cpu: 18
+ memory: 256Gi
+ limits:
+ cpu: 24
+ memory: 256Gi
+ - name: node9
+ accountId: 0.0.11
+ root:
+ resources:
+ requests:
+ cpu: 18
+ memory: 256Gi
+ limits:
+ cpu: 24
+ memory: 256Gi
+ - name: node10
+ accountId: 0.0.12
+ root:
+ resources:
+ requests:
+ cpu: 18
+ memory: 256Gi
+ limits:
+ cpu: 24
+ memory: 256Gi
defaults:
envoyProxy:
loadBalancerEnabled: true
@@ -116,7 +146,7 @@ defaults:
memory: 256Gi
extraEnv:
- name: JAVA_OPTS
- value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=14 -XX:ZMarkStackSpaceLimit=16g -XX:MaxDirectMemorySize=64g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true"
+ value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=14 -XX:ZMarkStackSpaceLimit=16g -XX:MaxDirectMemorySize=128g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true"
- name: JAVA_HEAP_MIN
value: "32g"
- name: JAVA_HEAP_MAX
@@ -136,3 +166,23 @@ minio-server:
limits:
cpu: 0
memory: 0
+deployment:
+ podAnnotations: {}
+ podLabels: {}
+ nodeSelector:
+ solo.hashgraph.io/role: "consensus-node"
+ solo.hashgraph.io/owner: "alex.kuzmin"
+ solo.hashgraph.io/network-id: "4"
+ tolerations:
+ - key: "solo.hashgraph.io/role"
+ operator: "Equal"
+ value: "consensus-node"
+ effect: "NoSchedule"
+ - key: "solo.hashgraph.io/owner"
+ operator: "Equal"
+ value: "alex.kuzmin"
+ effect: "NoSchedule"
+ - key: "solo.hashgraph.io/network-id"
+ operator: "Equal"
+ value: "4"
+ effect: "NoSchedule"
diff --git a/examples/custom-network-config/log4j2.xml b/examples/custom-network-config/log4j2.xml
new file mode 100644
index 000000000..47fb2f712
--- /dev/null
+++ b/examples/custom-network-config/log4j2.xml
@@ -0,0 +1,378 @@
+
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p %-4L %c{1} - %m{nolookups}%n
+
+
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p %-4L %c{1} - %m{nolookups}%n
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %-8sn %-5p %-16marker <%t> %c{1}: %msg{nolookups}%n
+
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %-8sn %-5p %-16marker <%t> %c{1}: %msg{nolookups}%n
+
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %-8sn %-5p %-16marker <%t> %c{1}: %msg{nolookups}%n
+
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} - %m{nolookups}%n
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} - %m{nolookups}%n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/hashsphere-0001/Taskfile.yml b/examples/hashsphere-0001/Taskfile.yml
new file mode 100644
index 000000000..74a6a763e
--- /dev/null
+++ b/examples/hashsphere-0001/Taskfile.yml
@@ -0,0 +1,19 @@
+version: 3
+includes:
+ main:
+ taskfile: ../Taskfile.yml
+ flatten: true
+vars:
+ solo_home_override_dir: "/Users/user/.solo-jeromy-20241231"
+env:
+ SOLO_NETWORK_SIZE: 5
+ SOLO_NAMESPACE: solo-jeromy-20241231
+ SOLO_CHART_VERSION: 0.36.3
+ CONSENSUS_NODE_VERSION: v0.57.2
+ VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml"
+ SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt"
+ SOLO_HOME: "{{.solo_home_override_dir}}"
+ # LOG4J2_FLAG: "--log4j2-xml {{.USER_WORKING_DIR}}/log4j2.xml"
+ # APPLICATION_PROPERTIES_FLAG: "--application-properties {{.USER_WORKING_DIR}}/application.properties"
+ # HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services"
+ # LOCAL_BUILD_FLAG: "--local-build \"{{.HEDERA_SERVICES_ROOT}}/hedera-node/data\""
diff --git a/examples/hashsphere-0001/init-containers-values.yaml b/examples/hashsphere-0001/init-containers-values.yaml
new file mode 100644
index 000000000..76e8437a8
--- /dev/null
+++ b/examples/hashsphere-0001/init-containers-values.yaml
@@ -0,0 +1,168 @@
+# hedera node configuration
+hedera:
+ initContainers:
+ - name: init-hedera-node
+ image: busybox:stable-musl
+ command: ["sh", "-c", "cp -r /etc /data-saved"]
+ volumeMounts:
+ - name: hgcapp-data-saved
+ mountPath: /data-saved
+ nodes:
+ - name: node1
+ accountId: 0.0.3
+ root:
+ resources:
+ requests:
+ cpu: 2
+ memory: 16Gi
+ limits:
+ cpu: 4
+ memory: 31Gi
+ - name: node2
+ accountId: 0.0.4
+ root:
+ resources:
+ requests:
+ cpu: 2
+ memory: 16Gi
+ limits:
+ cpu: 4
+ memory: 31Gi
+ - name: node3
+ accountId: 0.0.5
+ root:
+ resources:
+ requests:
+ cpu: 2
+ memory: 16Gi
+ limits:
+ cpu: 4
+ memory: 31Gi
+ - name: node4
+ accountId: 0.0.6
+ root:
+ resources:
+ requests:
+ cpu: 2
+ memory: 16Gi
+ limits:
+ cpu: 4
+ memory: 31Gi
+ - name: node5
+ accountId: 0.0.7
+ root:
+ resources:
+ requests:
+ cpu: 2
+ memory: 16Gi
+ limits:
+ cpu: 4
+ memory: 31Gi
+defaults:
+ envoyProxy:
+ loadBalancerEnabled: true
+ sidecars:
+ recordStreamUploader:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ limits:
+ cpu: 150m
+ memory: 200Mi
+ eventStreamUploader:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ limits:
+ cpu: 150m
+ memory: 200Mi
+ recordStreamSidecarUploader:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ limits:
+ cpu: 150m
+ memory: 200Mi
+ root:
+ resources:
+ requests:
+ cpu: 2
+ memory: 16Gi
+ limits:
+ cpu: 4
+ memory: 31Gi
+ extraEnv:
+ - name: JAVA_OPTS
+ value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=4 -XX:MaxDirectMemorySize=4g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true"
+ - name: JAVA_HEAP_MIN
+ value: "16g"
+ - name: JAVA_HEAP_MAX
+ value: "19g"
+deployment:
+ podAnnotations: {}
+ podLabels: {}
+ nodeSelector:
+ solo.hashgraph.io/role: "consensus-node"
+ tolerations:
+ - key: "solo.hashgraph.io/role"
+ operator: "Equal"
+ value: "consensus-node"
+ effect: "NoSchedule"
+minio-server:
+ secrets:
+ # This secret has [accessKey, secretKey] and will be randomly generated by helm
+ existingSecret: minio-secrets
+ tenant:
+ buckets:
+ - name: solo-streams
+ - name: solo-backups
+ name: minio
+ pools:
+ - servers: 1
+ name: pool-1
+ volumesPerServer: 1
+ size: 512Gi
+ storageClassName: standard-rwo
+ nodeSelector: {}
+ configuration:
+ name: minio-secrets
+ certificate:
+ requestAutoCert: false
+ environment:
+ MINIO_BROWSER_LOGIN_ANIMATION: off # https://github.com/minio/console/issues/2539#issuecomment-1619211962
+haproxyDeployment:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: solo.hedera.com/type
+ operator: In
+ values:
+ - network-node
+ topologyKey: kubernetes.io/hostname
+envoyDeployment:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: solo.hedera.com/type
+ operator: In
+ values:
+ - network-node
+ topologyKey: kubernetes.io/hostname
+minioDeployment:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: solo.hedera.com/type
+ operator: In
+ values:
+ - network-node
+ topologyKey: kubernetes.io/hostname
diff --git a/examples/hashsphere-0001/nlg-values.yaml b/examples/hashsphere-0001/nlg-values.yaml
new file mode 100644
index 000000000..70b0fa9ba
--- /dev/null
+++ b/examples/hashsphere-0001/nlg-values.yaml
@@ -0,0 +1,47 @@
+replicas: 1
+
+resources:
+ limits:
+ memory: 32Gi
+ cpu: '32'
+ requests:
+ memory: 16Gi
+ cpu: '16'
+
+nodeSelector:
+ solo.hashgraph.io/role: "test-clients"
+tolerations:
+ - key: "solo.hashgraph.io/role"
+ operator: "Equal"
+ value: "test-clients"
+ effect: "NoSchedule"
+affinity: {}
+
+loadGenerator:
+ java:
+ maxMemory: '48g'
+ test:
+ className: com.hedera.benchmark.NftTransferLoadTest
+ args:
+ - -c
+ - "7"
+ - -a
+ - "1000"
+ - -T
+ - "10"
+ - -n
+ - "10"
+ - -S
+ - "hot"
+ - -p
+ - "50"
+ - -t
+ - "1m"
+ properties:
+ - '34.118.231.223\:50211=0.0.3'
+ - '34.118.238.41\:50211=0.0.4'
+ - '34.118.235.163\:50211=0.0.5'
+ - '34.118.233.134\:50211=0.0.6'
+ - '34.118.238.65\:50211=0.0.7'
+ - '34.118.230.205\:50211=0.0.8'
+ - '34.118.225.213\:50211=0.0.9'
diff --git a/examples/hashsphere-0001/settings.txt b/examples/hashsphere-0001/settings.txt
new file mode 100644
index 000000000..6f587e119
--- /dev/null
+++ b/examples/hashsphere-0001/settings.txt
@@ -0,0 +1,16 @@
+checkSignedStateFromDisk, 1
+csvFileName, MainNetStats
+doUpnp, false
+loadKeysFromPfxFiles, 0
+maxOutgoingSyncs, 1
+reconnect.active, 1
+reconnect.reconnectWindowSeconds, -1
+showInternalStats, 1
+state.saveStatePeriod, 300
+useLoopbackIp, false
+waitAtStartup, false
+state.mainClassNameOverride, com.hedera.services.ServicesMain
+maxEventQueueForCons, 1000
+merkleDb.hashesRamToDiskThreshold, 8388608
+event.creation.maxCreationRate, 20
+virtualMap.familyThrottleThreshold, 6000000000
diff --git a/examples/custom-network-config/list-external-ips.gotemplate b/examples/list-external-ips.gotemplate
similarity index 92%
rename from examples/custom-network-config/list-external-ips.gotemplate
rename to examples/list-external-ips.gotemplate
index 44d782b49..247d492c6 100644
--- a/examples/custom-network-config/list-external-ips.gotemplate
+++ b/examples/list-external-ips.gotemplate
@@ -3,4 +3,4 @@
{{- range .status.loadBalancer.ingress -}}
{{$name}} {{": "}} {{ .ip }} {{"\n"}}
{{- end -}}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
diff --git a/examples/performance-tuning/HashSphere/Taskfile.yml b/examples/performance-tuning/HashSphere/Taskfile.yml
new file mode 100644
index 000000000..add52c6d9
--- /dev/null
+++ b/examples/performance-tuning/HashSphere/Taskfile.yml
@@ -0,0 +1,17 @@
+version: 3
+includes:
+ main:
+ taskfile: ../Taskfile.yml
+ flatten: true
+env:
+ SOLO_NETWORK_SIZE: 7
+ SOLO_NAMESPACE: solo-perf-hashsphere
+ SOLO_CHART_VERSION: 0.36.3
+ CONSENSUS_NODE_VERSION: v0.57.2
+ VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml"
+ SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt"
+ SOLO_HOME: "/Users/user/.solo-perf-hashsphere"
+ # LOG4J2_FLAG: "--log4j2-xml {{.USER_WORKING_DIR}}/log4j2.xml"
+ # APPLICATION_PROPERTIES_FLAG: "--application-properties {{.USER_WORKING_DIR}}/application.properties"
+ # HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services"
+ # LOCAL_BUILD_FLAG: "--local-build \"{{.HEDERA_SERVICES_ROOT}}/hedera-node/data\""
diff --git a/examples/performance-tuning/HashSphere/settings.txt b/examples/performance-tuning/HashSphere/settings.txt
new file mode 100644
index 000000000..6f587e119
--- /dev/null
+++ b/examples/performance-tuning/HashSphere/settings.txt
@@ -0,0 +1,16 @@
+checkSignedStateFromDisk, 1
+csvFileName, MainNetStats
+doUpnp, false
+loadKeysFromPfxFiles, 0
+maxOutgoingSyncs, 1
+reconnect.active, 1
+reconnect.reconnectWindowSeconds, -1
+showInternalStats, 1
+state.saveStatePeriod, 300
+useLoopbackIp, false
+waitAtStartup, false
+state.mainClassNameOverride, com.hedera.services.ServicesMain
+maxEventQueueForCons, 1000
+merkleDb.hashesRamToDiskThreshold, 8388608
+event.creation.maxCreationRate, 20
+virtualMap.familyThrottleThreshold, 6000000000
diff --git a/examples/performance-tuning/Latitude/Taskfile.yml b/examples/performance-tuning/Latitude/Taskfile.yml
new file mode 100644
index 000000000..55ec6444c
--- /dev/null
+++ b/examples/performance-tuning/Latitude/Taskfile.yml
@@ -0,0 +1,17 @@
+version: 3
+includes:
+ main:
+ taskfile: ../Taskfile.yml
+ flatten: true
+env:
+ SOLO_NETWORK_SIZE: 10
+ SOLO_NAMESPACE: solo-perf-hashsphere-lat
+ SOLO_CHART_VERSION: 0.36.3
+ CONSENSUS_NODE_VERSION: v0.57.2
+ VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml"
+ SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt"
+ SOLO_HOME: "/Users/user/.solo-perf-hashsphere-lat"
+ # LOG4J2_FLAG: "--log4j2-xml {{.USER_WORKING_DIR}}/log4j2.xml"
+ # APPLICATION_PROPERTIES_FLAG: "--application-properties {{.USER_WORKING_DIR}}/application.properties"
+ # HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services"
+ # LOCAL_BUILD_FLAG: "--local-build \"{{.HEDERA_SERVICES_ROOT}}/hedera-node/data\""
diff --git a/examples/performance-tuning/Latitude/settings.txt b/examples/performance-tuning/Latitude/settings.txt
new file mode 100644
index 000000000..6f587e119
--- /dev/null
+++ b/examples/performance-tuning/Latitude/settings.txt
@@ -0,0 +1,16 @@
+checkSignedStateFromDisk, 1
+csvFileName, MainNetStats
+doUpnp, false
+loadKeysFromPfxFiles, 0
+maxOutgoingSyncs, 1
+reconnect.active, 1
+reconnect.reconnectWindowSeconds, -1
+showInternalStats, 1
+state.saveStatePeriod, 300
+useLoopbackIp, false
+waitAtStartup, false
+state.mainClassNameOverride, com.hedera.services.ServicesMain
+maxEventQueueForCons, 1000
+merkleDb.hashesRamToDiskThreshold, 8388608
+event.creation.maxCreationRate, 20
+virtualMap.familyThrottleThreshold, 6000000000
diff --git a/list-external-ips.gotemplate b/list-external-ips.gotemplate
new file mode 100644
index 000000000..247d492c6
--- /dev/null
+++ b/list-external-ips.gotemplate
@@ -0,0 +1,6 @@
+{{- range .items -}}
+ {{ $name := .metadata.name }}
+ {{- range .status.loadBalancer.ingress -}}
+ {{$name}} {{": "}} {{ .ip }} {{"\n"}}
+ {{- end -}}
+{{- end -}}
diff --git a/package-lock.json b/package-lock.json
index c9f184ed6..aaeed7796 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -5592,15 +5592,6 @@
"url": "https://opencollective.com/eslint"
}
},
- "node_modules/eslint/node_modules/@eslint/js": {
- "version": "9.16.0",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz",
- "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==",
- "dev": true,
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- }
- },
"node_modules/eslint/node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
diff --git a/src/commands/cluster.ts b/src/commands/cluster.ts
index 512f55bcc..ee9beb889 100644
--- a/src/commands/cluster.ts
+++ b/src/commands/cluster.ts
@@ -307,6 +307,7 @@ export class ClusterCommand extends BaseCommand {
flags.deployCertManagerCrds,
flags.deployMinio,
flags.deployPrometheusStack,
+ flags.quiet,
flags.soloChartVersion,
),
handler: (argv: any) => {
@@ -328,7 +329,8 @@ export class ClusterCommand extends BaseCommand {
.command({
command: 'reset',
desc: 'Uninstall shared components from cluster',
- builder: (y: any) => flags.setCommandFlags(y, flags.clusterName, flags.clusterSetupNamespace, flags.force),
+ builder: (y: any) =>
+ flags.setCommandFlags(y, flags.clusterName, flags.clusterSetupNamespace, flags.force, flags.quiet),
handler: (argv: any) => {
self.logger.debug("==== Running 'cluster reset' ===", {argv});
diff --git a/src/commands/deployment.ts b/src/commands/deployment.ts
index 67ac83a02..bb278e0c9 100644
--- a/src/commands/deployment.ts
+++ b/src/commands/deployment.ts
@@ -58,7 +58,7 @@ export class DeploymentCommand extends BaseCommand {
title: 'Initialize',
task: async (ctx, task): Promise> => {
self.configManager.update(argv);
- self.logger.debug('Loaded cached config', {config: self.configManager.config});
+ self.logger.debug('Updated config with argv', {config: self.configManager.config});
await self.configManager.executePrompt(task, DeploymentCommand.DEPLOY_FLAGS_LIST);
diff --git a/src/commands/flags.ts b/src/commands/flags.ts
index 23adfdfd9..bb6cc28dd 100644
--- a/src/commands/flags.ts
+++ b/src/commands/flags.ts
@@ -1623,15 +1623,18 @@ export class Flags {
Flags.clusterName,
Flags.clusterSetupNamespace,
Flags.context,
+ Flags.contextClusterUnparsed,
+ Flags.createAmount,
+ Flags.debugNodeAlias,
Flags.deletePvcs,
Flags.deleteSecrets,
Flags.deployCertManager,
Flags.deployCertManagerCrds,
Flags.deployHederaExplorer,
Flags.deployJsonRpcRelay,
- Flags.deploymentClusters,
Flags.deployMinio,
Flags.deployPrometheusStack,
+ Flags.deploymentClusters,
Flags.devMode,
Flags.ecdsaPrivateKey,
Flags.ed25519PrivateKey,
@@ -1639,56 +1642,54 @@ export class Flags {
Flags.enablePrometheusSvcMonitor,
Flags.enableTimeout,
Flags.endpointType,
- Flags.soloChartVersion,
- Flags.generateGossipKeys,
+ Flags.envoyIps,
Flags.generateEcdsaKey,
+ Flags.generateGossipKeys,
Flags.generateTlsKeys,
Flags.gossipEndpoints,
Flags.gossipPrivateKey,
Flags.gossipPublicKey,
Flags.grpcEndpoints,
+ Flags.grpcTlsCertificatePath,
+ Flags.grpcTlsKeyPath,
+ Flags.grpcWebTlsCertificatePath,
+ Flags.grpcWebTlsKeyPath,
+ Flags.haproxyIps,
Flags.hederaExplorerTlsHostName,
Flags.hederaExplorerTlsLoadBalancerIp,
+ Flags.hederaExplorerVersion,
Flags.inputDir,
- Flags.debugNodeAlias,
Flags.localBuildPath,
Flags.log4j2Xml,
+ Flags.mirrorNodeVersion,
Flags.namespace,
Flags.newAccountNumber,
Flags.newAdminKey,
- Flags.createAmount,
Flags.nodeAlias,
Flags.nodeAliasesUnparsed,
Flags.operatorId,
Flags.operatorKey,
Flags.outputDir,
Flags.persistentVolumeClaims,
+ Flags.pinger,
Flags.privateKey,
Flags.profileFile,
Flags.profileName,
- Flags.pinger,
+ Flags.quiet,
Flags.relayReleaseTag,
Flags.releaseTag,
Flags.replicaCount,
- Flags.stateFile,
Flags.setAlias,
Flags.settingTxt,
+ Flags.soloChartVersion,
Flags.stakeAmounts,
+ Flags.stateFile,
Flags.tlsClusterIssuerType,
Flags.tlsPrivateKey,
Flags.tlsPublicKey,
Flags.updateAccountKeys,
Flags.userEmailAddress,
Flags.valuesFile,
- Flags.mirrorNodeVersion,
- Flags.hederaExplorerVersion,
- Flags.grpcTlsCertificatePath,
- Flags.grpcWebTlsCertificatePath,
- Flags.grpcTlsKeyPath,
- Flags.grpcWebTlsKeyPath,
- Flags.contextClusterUnparsed,
- Flags.haproxyIps,
- Flags.envoyIps,
];
/** Resets the definition.disablePrompt for all flags */
@@ -1717,6 +1718,6 @@ export class Flags {
static readonly DEFAULT_FLAGS = {
requiredFlags: [],
requiredFlagsWithDisabledPrompt: [Flags.namespace, Flags.cacheDir, Flags.releaseTag],
- optionalFlags: [Flags.devMode],
+ optionalFlags: [Flags.devMode, Flags.quiet],
};
}
diff --git a/src/commands/network.ts b/src/commands/network.ts
index bfb3e1030..729cd4151 100644
--- a/src/commands/network.ts
+++ b/src/commands/network.ts
@@ -203,7 +203,7 @@ export class NetworkCommand extends BaseCommand {
async prepareConfig(task: any, argv: any) {
this.configManager.update(argv);
- this.logger.debug('Loaded cached config', {config: this.configManager.config});
+ this.logger.debug('Updated config with argv', {config: this.configManager.config});
// disable the prompts that we don't want to prompt the user for
flags.disablePrompts([
@@ -712,9 +712,10 @@ export class NetworkCommand extends BaseCommand {
y,
flags.deletePvcs,
flags.deleteSecrets,
+ flags.enableTimeout,
flags.force,
flags.namespace,
- flags.enableTimeout,
+ flags.quiet,
),
handler: (argv: any) => {
self.logger.debug("==== Running 'network destroy' ===");
diff --git a/src/commands/node/flags.ts b/src/commands/node/flags.ts
index 70204f31e..7ee772358 100644
--- a/src/commands/node/flags.ts
+++ b/src/commands/node/flags.ts
@@ -19,7 +19,7 @@ import {Flags as flags} from '../flags.js';
export const DEFAULT_FLAGS = {
requiredFlags: [],
requiredFlagsWithDisabledPrompt: [flags.namespace, flags.cacheDir, flags.releaseTag],
- optionalFlags: [flags.devMode],
+ optionalFlags: [flags.quiet, flags.devMode],
};
const COMMON_UPDATE_FLAGS_REQUIRED_FLAGS = [flags.cacheDir, flags.namespace, flags.releaseTag];
@@ -179,13 +179,13 @@ export const ADD_EXECUTE_FLAGS = {
export const LOGS_FLAGS = {
requiredFlags: [flags.namespace, flags.nodeAliasesUnparsed],
requiredFlagsWithDisabledPrompt: [],
- optionalFlags: [],
+ optionalFlags: [flags.quiet],
};
export const STATES_FLAGS = {
requiredFlags: [flags.namespace, flags.nodeAliasesUnparsed],
requiredFlagsWithDisabledPrompt: [],
- optionalFlags: [],
+ optionalFlags: [flags.quiet],
};
export const REFRESH_FLAGS = {
@@ -215,5 +215,5 @@ export const START_FLAGS = {
export const SETUP_FLAGS = {
requiredFlags: [flags.cacheDir, flags.namespace, flags.releaseTag],
requiredFlagsWithDisabledPrompt: [flags.app, flags.appConfig, flags.nodeAliasesUnparsed],
- optionalFlags: [flags.devMode, flags.localBuildPath],
+ optionalFlags: [flags.quiet, flags.devMode, flags.localBuildPath],
};
diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts
index b9ca6e371..8cdbf27f9 100644
--- a/src/commands/node/tasks.ts
+++ b/src/commands/node/tasks.ts
@@ -403,8 +403,10 @@ export class NodeCommandTasks {
task.title = `${title} - status ${chalk.yellow(NodeStatusEnums[statusNumber])}, attempt: ${chalk.blueBright(`${attempt}/${maxAttempts}`)}`;
}
clearTimeout(timeoutId);
- } catch {
- // Catch all guard and fetch errors
+ } catch (e: Error | any) {
+ this.logger.debug(
+ `${title} : Error in checking node activeness: attempt: ${attempt}/${maxAttempts}: ${JSON.stringify(e)}`,
+ );
}
attempt++;
@@ -976,7 +978,7 @@ export class NodeCommandTasks {
}
checkAllNodesAreFrozen(nodeAliasesProperty: string) {
- return new Task('Check all nodes are ACTIVE', (ctx: any, task: ListrTaskWrapper) => {
+ return new Task('Check all nodes are FROZEN', (ctx: any, task: ListrTaskWrapper) => {
return this._checkNodeActivenessTask(ctx, task, ctx.config[nodeAliasesProperty], NodeStatusCodes.FREEZE_COMPLETE);
});
}
diff --git a/src/core/k8.ts b/src/core/k8.ts
index 2da693d6f..879628d69 100644
--- a/src/core/k8.ts
+++ b/src/core/k8.ts
@@ -1026,41 +1026,44 @@ export class K8 {
const check = async (resolve: (items: k8s.V1Pod[]) => void, reject: (reason?: any) => void) => {
// wait for the pod to be available with the given status and labels
- const resp = await this.kubeClient.listNamespacedPod(
- ns,
- // @ts-ignore
- false,
- false,
- undefined,
- undefined,
- labelSelector,
- podCount,
- undefined,
- undefined,
- undefined,
- Duration.ofMinutes(5).toMillis(),
- );
+ try {
+ const resp = await this.kubeClient.listNamespacedPod(
+ ns,
+ // @ts-ignore
+ false,
+ false,
+ undefined,
+ undefined,
+ labelSelector,
+ podCount,
+ undefined,
+ undefined,
+ undefined,
+ Duration.ofMinutes(5).toMillis(),
+ );
+ this.logger.debug(
+ `[attempt: ${attempts}/${maxAttempts}] ${resp.body?.items?.length}/${podCount} pod found [labelSelector: ${labelSelector}, namespace:${ns}]`,
+ );
+ if (resp.body?.items?.length === podCount) {
+ let phaseMatchCount = 0;
+ let predicateMatchCount = 0;
- this.logger.debug(
- `[attempt: ${attempts}/${maxAttempts}] ${resp.body?.items?.length}/${podCount} pod found [labelSelector: ${labelSelector}, namespace:${ns}]`,
- );
- if (resp.body?.items?.length === podCount) {
- let phaseMatchCount = 0;
- let predicateMatchCount = 0;
+ for (const item of resp.body.items) {
+ if (phases.includes(item.status?.phase)) {
+ phaseMatchCount++;
+ }
- for (const item of resp.body.items) {
- if (phases.includes(item.status?.phase)) {
- phaseMatchCount++;
+ if (podItemPredicate && podItemPredicate(item)) {
+ predicateMatchCount++;
+ }
}
- if (podItemPredicate && podItemPredicate(item)) {
- predicateMatchCount++;
+ if (phaseMatchCount === podCount && (!podItemPredicate || predicateMatchCount === podCount)) {
+ return resolve(resp.body.items);
}
}
-
- if (phaseMatchCount === podCount && (!podItemPredicate || predicateMatchCount === podCount)) {
- return resolve(resp.body.items);
- }
+ } catch (e: Error | any) {
+ this.logger.info('Error occurred while waiting for pods, retrying', e);
}
if (++attempts < maxAttempts) {
@@ -1573,7 +1576,7 @@ export class K8 {
const scriptName = 'support-zip.sh';
const sourcePath = path.join(constants.RESOURCES_DIR, scriptName); // script source path
await this.copyTo(podName, ROOT_CONTAINER, sourcePath, `${HEDERA_HAPI_PATH}`);
- await sleep(Duration.ofSeconds(1)); // wait for the script to sync to the file system
+ await sleep(Duration.ofSeconds(3)); // wait for the script to sync to the file system
await this.execContainer(podName, ROOT_CONTAINER, [
'bash',
'-c',
diff --git a/src/core/lease/interval_lease.ts b/src/core/lease/interval_lease.ts
index 143417d4d..b55829eed 100644
--- a/src/core/lease/interval_lease.ts
+++ b/src/core/lease/interval_lease.ts
@@ -23,6 +23,7 @@ import {sleep} from '../helpers.js';
import {Duration} from '../time/duration.js';
import type {Lease, LeaseRenewalService} from './lease.js';
import {StatusCodes} from 'http-status-codes';
+import chalk from 'chalk';
/**
* Concrete implementation of a Kubernetes based time-based mutually exclusive lock via the Coordination API.
@@ -219,6 +220,7 @@ export class IntervalLease implements Lease {
* @throws LeaseRelinquishmentError - If the lease is already acquired by another process or an error occurs during relinquishment.
*/
async release(): Promise {
+ this.client.logger.showUser(`${chalk.gray('releasing lease')}`);
const lease = await this.retrieveLease();
if (this.scheduleId) {
diff --git a/version.ts b/version.ts
index 0e7069c82..32f80ccb0 100644
--- a/version.ts
+++ b/version.ts
@@ -21,7 +21,7 @@
export const JAVA_VERSION = '21.0.1+12';
export const HELM_VERSION = 'v3.14.2';
-export const SOLO_CHART_VERSION = '0.36.1';
+export const SOLO_CHART_VERSION = '0.36.3';
export const HEDERA_PLATFORM_VERSION = 'v0.57.2';
export const MIRROR_NODE_VERSION = '0.118.1';
export const HEDERA_EXPLORER_VERSION = '0.2.1';