diff --git a/examples/custom-network-config/init-containers-values.yaml b/examples/custom-network-config/init-containers-values.yaml index 04832ec73..271872bcb 100644 --- a/examples/custom-network-config/init-containers-values.yaml +++ b/examples/custom-network-config/init-containers-values.yaml @@ -53,11 +53,11 @@ hedera: root: resources: requests: - cpu: 18000m - memory: 32000Mi + cpu: 18 + memory: 256Gi limits: - cpu: 24000m - memory: 64000Mi + cpu: 24 + memory: 256Gi - name: node6 accountId: 0.0.8 root: @@ -81,6 +81,31 @@ hedera: defaults: envoyProxy: loadBalancerEnabled: true + sidecars: + recordStreamUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + eventStreamUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + recordStreamSidecarUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi root: resources: requests: diff --git a/examples/performance-tuning/HashSphere/init-containers-values.yaml b/examples/performance-tuning/HashSphere/init-containers-values.yaml new file mode 100644 index 000000000..807185aba --- /dev/null +++ b/examples/performance-tuning/HashSphere/init-containers-values.yaml @@ -0,0 +1,203 @@ +# hedera node configuration +hedera: + initContainers: + - name: init-hedera-node + image: busybox:stable-musl + command: ["sh", "-c", "cp -r /etc /data-saved"] + volumeMounts: + - name: hgcapp-data-saved + mountPath: /data-saved + nodes: + - name: node0 + accountId: 0.0.3 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node1 + accountId: 0.0.4 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node2 + accountId: 0.0.5 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node3 + accountId: 0.0.6 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node4 + accountId: 0.0.7 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node5 + accountId: 0.0.8 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node6 + accountId: 0.0.9 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi +defaults: + envoyProxy: + loadBalancerEnabled: true + sidecars: + recordStreamUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + eventStreamUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + recordStreamSidecarUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + extraEnv: + - name: JAVA_OPTS + value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=4 -XX:MaxDirectMemorySize=4g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true" + - name: JAVA_HEAP_MIN + value: "16g" + - name: JAVA_HEAP_MAX + value: "19g" +minio-server: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 500Gi + storageClassName: local-path + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 0 + memory: 0 +deployment: + podAnnotations: {} + podLabels: {} + nodeSelector: + solo.hashgraph.io/role: "consensus-node" + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" +minio-server: + secrets: + # This secret has [accessKey, secretKey] and will be randomly generated by helm + existingSecret: minio-secrets + tenant: + buckets: + - name: solo-streams + - name: solo-backups + name: minio + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 512Gi + storageClassName: standard-rwo + nodeSelector: {} + configuration: + name: minio-secrets + certificate: + requestAutoCert: false + environment: + MINIO_BROWSER_LOGIN_ANIMATION: off # https://github.com/minio/console/issues/2539#issuecomment-1619211962 +haproxyDeployment: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: solo.hedera.com/type + operator: In + values: + - network-node + topologyKey: kubernetes.io/hostname +envoyDeployment: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: solo.hedera.com/type + operator: In + values: + - network-node + topologyKey: kubernetes.io/hostname +minioDeployment: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: solo.hedera.com/type + operator: In + values: + - network-node + topologyKey: kubernetes.io/hostname diff --git a/examples/performance-tuning/HashSphere/nlg-values.yaml b/examples/performance-tuning/HashSphere/nlg-values.yaml new file mode 100644 index 000000000..70b0fa9ba --- /dev/null +++ b/examples/performance-tuning/HashSphere/nlg-values.yaml @@ -0,0 +1,47 @@ +replicas: 1 + +resources: + limits: + memory: 32Gi + cpu: '32' + requests: + memory: 16Gi + cpu: '16' + +nodeSelector: + solo.hashgraph.io/role: "test-clients" +tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" +affinity: {} + +loadGenerator: + java: + maxMemory: '48g' + test: + className: com.hedera.benchmark.NftTransferLoadTest + args: + - -c + - "7" + - -a + - "1000" + - -T + - "10" + - -n + - "10" + - -S + - "hot" + - -p + - "50" + - -t + - "1m" + properties: + - '34.118.231.223\:50211=0.0.3' + - '34.118.238.41\:50211=0.0.4' + - '34.118.235.163\:50211=0.0.5' + - '34.118.233.134\:50211=0.0.6' + - '34.118.238.65\:50211=0.0.7' + - '34.118.230.205\:50211=0.0.8' + - '34.118.225.213\:50211=0.0.9' diff --git a/examples/performance-tuning/Latitude/init-containers-values.yaml b/examples/performance-tuning/Latitude/init-containers-values.yaml new file mode 100644 index 000000000..669354c96 --- /dev/null +++ b/examples/performance-tuning/Latitude/init-containers-values.yaml @@ -0,0 +1,163 @@ +# hedera node configuration +hedera: + initContainers: + - name: init-hedera-node + image: busybox:stable-musl + command: ["sh", "-c", "cp -r /etc /data-saved"] + volumeMounts: + - name: hgcapp-data-saved + mountPath: /data-saved + nodes: + - name: node0 + accountId: 0.0.3 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node1 + accountId: 0.0.4 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node2 + accountId: 0.0.5 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node3 + accountId: 0.0.6 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node4 + accountId: 0.0.7 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node5 + accountId: 0.0.8 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node6 + accountId: 0.0.9 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node7 + accountId: 0.0.10 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node8 + accountId: 0.0.11 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + - name: node9 + accountId: 0.0.12 + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi +defaults: + envoyProxy: + loadBalancerEnabled: true + root: + resources: + requests: + cpu: 18 + memory: 256Gi + limits: + cpu: 24 + memory: 256Gi + extraEnv: + - name: JAVA_OPTS + value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=14 -XX:ZMarkStackSpaceLimit=16g -XX:MaxDirectMemorySize=64g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true" + - name: JAVA_HEAP_MIN + value: "32g" + - name: JAVA_HEAP_MAX + value: "118g" +minio-server: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 500Gi + storageClassName: local-path + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 0 + memory: 0 +deployment: + podAnnotations: {} + podLabels: {} + nodeSelector: + solo.hashgraph.io/role: "consensus-node" + solo.hashgraph.io/owner: "alex.kuzmin" + solo.hashgraph.io/network-id: "1" + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/owner" + operator: "Equal" + value: "alex.kuzmin" + effect: "NoSchedule" + - key: "solo.hashgraph.io/network-id" + operator: "Equal" + value: "1" + effect: "NoSchedule" diff --git a/examples/performance-tuning/Latitude/nlg-values.yaml b/examples/performance-tuning/Latitude/nlg-values.yaml new file mode 100644 index 000000000..bbd5be199 --- /dev/null +++ b/examples/performance-tuning/Latitude/nlg-values.yaml @@ -0,0 +1,43 @@ +replicas: 1 + +resources: + limits: + memory: 50Gi + requests: + memory: 48Gi + +nodeSelector: {} +tolerations: [] +affinity: {} + +loadGenerator: + java: + maxMemory: '48g' + test: + className: com.hedera.benchmark.NftTransferLoadTest + args: + - -c + - "32" + - -a + - "1000" + - -T + - "10" + - -n + - "100" + - -S + - "hot" + - -p + - "50" + - -t + - "1m" + properties: + - '10.44.14.166\:50211=0.0.3' + - '10.44.14.163\:50211=0.0.4' + - '10.44.14.164\:50211=0.0.5' + - '10.44.14.161\:50211=0.0.6' + - '10.44.14.165\:50211=0.0.7' + - '10.44.14.169\:50211=0.0.8' + - '10.44.14.167\:50211=0.0.9' + - '10.44.14.170\:50211=0.0.10' + - '10.44.14.168\:50211=0.0.11' + - '10.44.14.142\:50211=0.0.12' diff --git a/examples/performance-tuning/README.md b/examples/performance-tuning/README.md new file mode 100644 index 000000000..80a4e56af --- /dev/null +++ b/examples/performance-tuning/README.md @@ -0,0 +1,22 @@ +# The usage of resources in Solo +## Modify Taskfile.yml, task "network:deploy" +add "--values-file init-containers-values.yaml" + +Example: + +> solo:network:deploy: +> internal: true +> cmds: +> - npm run solo-test -- network deploy --namespace "\${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --solo-chart-version "\${SOLO_CHART_VERSION}" --settings-txt settings.txt --log4j2-xml log4j2.xml --values-file init-containers-values.yaml --application-properties application.properties + + +## Provided examples for Consensus nodes: +* HashSphere/init-containers-values.yaml (HashSphere on Google Cloud, for 4-core/32Gb 7-node ) +* Latitude/init-containers-values.yaml (Latitude, 128Gb, 10-node) + +## and corresponding NetworkLoadGenerator templates: + +* HashSphere/nlg-values.yaml +* Latitude/nlg-values.yaml +Start as the following: +> helm upgrade --install nlg oci://swirldslabs.jfrog.io/load-generator-helm-release-local/network-load-generator --version 0.2.1 --values nlg-values.yaml -n solo-hashsphere1