diff --git a/.github/workflows/flow-build-application.yaml b/.github/workflows/flow-build-application.yaml index b020c8b0e..5ec7c6c38 100644 --- a/.github/workflows/flow-build-application.yaml +++ b/.github/workflows/flow-build-application.yaml @@ -141,6 +141,30 @@ jobs: coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-node-update-tests: + name: E2E Tests + uses: ./.github/workflows/zxc-e2e-test.yaml + needs: + - env-vars + - code-style + with: + custom-job-label: Node Update + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-update-coverage-report }} + + e2e-node-delete-tests: + name: E2E Tests + uses: ./.github/workflows/zxc-e2e-test.yaml + needs: + - env-vars + - code-style + with: + custom-job-label: Node Delete + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-delete-coverage-report }} + e2e-relay-tests: name: E2E Tests if: ${{ github.event_name == 'push' || github.event.inputs.enable-e2e-tests == 'true' }} @@ -166,6 +190,8 @@ jobs: - e2e-node-pfx-kill-tests - e2e-node-local-build-tests - e2e-node-add-tests + - e2e-node-update-tests + - e2e-node-delete-tests - e2e-relay-tests if: ${{ (github.event_name == 'push' || github.event.inputs.enable-unit-tests == 'true' || github.event.inputs.enable-e2e-tests == 'true') && !failure() && !cancelled() }} with: @@ -180,6 +206,8 @@ jobs: e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + e2e-node-update-test-subdir: ${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + e2e-node-delete-test-subdir: ${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} @@ -187,6 +215,8 @@ jobs: e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-node-update-coverage-report: ${{ needs.env-vars.outputs.e2e-node-update-coverage-report }} + e2e-node-delete-coverage-report: ${{ needs.env-vars.outputs.e2e-node-delete-coverage-report }} e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} secrets: snyk-token: ${{ secrets.SNYK_TOKEN }} diff --git a/.github/workflows/flow-pull-request-checks.yaml b/.github/workflows/flow-pull-request-checks.yaml index 9d96a6595..14940cbe9 100644 --- a/.github/workflows/flow-pull-request-checks.yaml +++ b/.github/workflows/flow-pull-request-checks.yaml @@ -126,6 +126,30 @@ jobs: coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-node-update-tests: + name: E2E Tests + uses: ./.github/workflows/zxc-e2e-test.yaml + needs: + - env-vars + - code-style + with: + custom-job-label: Node Update + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-update-coverage-report }} + + e2e-node-delete-tests: + name: E2E Tests + uses: ./.github/workflows/zxc-e2e-test.yaml + needs: + - env-vars + - code-style + with: + custom-job-label: Node Delete + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-delete-coverage-report }} + e2e-relay-tests: name: E2E Tests if: ${{ !cancelled() && always() }} @@ -151,6 +175,8 @@ jobs: - e2e-node-pfx-kill-tests - e2e-node-local-build-tests - e2e-node-add-tests + - e2e-node-update-tests + - e2e-node-delete-tests - e2e-relay-tests if: ${{ github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name }} with: @@ -163,6 +189,8 @@ jobs: e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + e2e-node-update-test-subdir: ${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + e2e-node-delete-test-subdir: ${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} @@ -170,6 +198,8 @@ jobs: e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-node-update-coverage-report: ${{ needs.env-vars.outputs.e2e-node-update-coverage-report }} + e2e-node-delete-coverage-report: ${{ needs.env-vars.outputs.e2e-node-delete-coverage-report }} e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} secrets: codecov-token: ${{ secrets.CODECOV_TOKEN }} @@ -186,6 +216,8 @@ jobs: - e2e-node-pfx-kill-tests - e2e-node-local-build-tests - e2e-node-add-tests + - e2e-node-update-tests + - e2e-node-delete-tests - e2e-relay-tests if: ${{ github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name }} with: @@ -198,6 +230,8 @@ jobs: e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + e2e-node-update-test-subdir: ${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} + e2e-node-delete-test-subdir: ${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} @@ -205,6 +239,8 @@ jobs: e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-node-update-coverage-report: ${{ needs.env-vars.outputs.e2e-node-update-coverage-report }} + e2e-node-delete-coverage-report: ${{ needs.env-vars.outputs.e2e-node-delete-coverage-report }} e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} secrets: codacy-project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} @@ -221,6 +257,8 @@ jobs: # - e2e-node-pfx-kill-tests # - e2e-node-local-build-tests # - e2e-node-add-tests +# - e2e-node-update-tests +# - e2e-node-delete-tests # - e2e-relay-tests # if: ${{ github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name && github.actor != 'dependabot[bot]' }} # with: @@ -232,6 +270,8 @@ jobs: # e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} # e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} # e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} +# e2e-node-update-test-subdir: ${{ needs.env-vars.outputs.e2e-node-update-test-subdir }} +# e2e-node-delete-test-subdir: ${{ needs.env-vars.outputs.e2e-node-delete-test-subdir }} # e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} # e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} # e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} @@ -239,6 +279,8 @@ jobs: # e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} # e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} # e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} +# e2e-node-update-coverage-report: ${{ needs.env-vars.outputs.e2e-node-update-coverage-report }} +# e2e-node-delete-coverage-report: ${{ needs.env-vars.outputs.e2e-node-delete-coverage-report }} # e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} # secrets: # snyk-token: ${{ secrets.SNYK_TOKEN }} diff --git a/.github/workflows/zxc-code-analysis.yaml b/.github/workflows/zxc-code-analysis.yaml index 4324834f3..d04147aa1 100644 --- a/.github/workflows/zxc-code-analysis.yaml +++ b/.github/workflows/zxc-code-analysis.yaml @@ -85,6 +85,16 @@ on: type: string required: false default: "e2e-node-add" + e2e-node-update-test-subdir: + description: "E2E Node Update Test Subdirectory:" + type: string + required: false + default: "e2e-node-update" + e2e-node-delete-test-subdir: + description: "E2E Node Delete Test Subdirectory:" + type: string + required: false + default: "e2e-node-delete" e2e-relay-test-subdir: description: "E2E Relay Test Subdirectory:" type: string @@ -120,6 +130,16 @@ on: type: string required: false default: "E2E Node Add Tests Coverage Report" + e2e-node-update-coverage-report: + description: "E2E Node Update Coverage Report:" + type: string + required: false + default: "E2E Node Update Tests Coverage Report" + e2e-node-delete-coverage-report: + description: "E2E Node Delete Coverage Report:" + type: string + required: false + default: "E2E Node Delete Tests Coverage Report" e2e-relay-coverage-report: description: "E2E Relay Coverage Report:" type: string @@ -224,6 +244,20 @@ jobs: name: ${{ inputs.e2e-node-add-coverage-report }} path: 'coverage/${{ inputs.e2e-node-add-test-subdir }}' + - name: Download E2E Update Coverage Report + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + if: ${{ (inputs.enable-codecov-analysis || inputs.enable-codacy-coverage) && inputs.enable-e2e-coverage-report && !cancelled() && !failure() }} + with: + name: ${{ inputs.e2e-node-udpate-coverage-report }} + path: 'coverage/${{ inputs.e2e-udpate-add-test-subdir }}' + + - name: Download E2E Delete Coverage Report + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + if: ${{ (inputs.enable-codecov-analysis || inputs.enable-codacy-coverage) && inputs.enable-e2e-coverage-report && !cancelled() && !failure() }} + with: + name: ${{ inputs.e2e-node-delete-coverage-report }} + path: 'coverage/${{ inputs.e2e-node-delete-test-subdir }}' + - name: Publish To Codecov uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 if: ${{ inputs.enable-codecov-analysis && !cancelled() && !failure() }} diff --git a/.github/workflows/zxc-e2e-test.yaml b/.github/workflows/zxc-e2e-test.yaml index a37d1447e..c701ea6a1 100644 --- a/.github/workflows/zxc-e2e-test.yaml +++ b/.github/workflows/zxc-e2e-test.yaml @@ -163,7 +163,7 @@ jobs: if: ${{ runner.os == 'linux' && (inputs.npm-test-script == 'test-e2e-node-local-build' || inputs.npm-test-script == 'test-e2e-node-add') && !cancelled() && !failure() }} run: | cd .. - git clone https://github.com/hashgraph/hedera-services.git + git clone https://github.com/hashgraph/hedera-services.git --branch v0.53.2 cd hedera-services ls -ltr ${{ env.CG_EXEC }} ./gradlew assemble --stacktrace --info diff --git a/.github/workflows/zxc-env-vars.yaml b/.github/workflows/zxc-env-vars.yaml index e0801f9b5..191832f52 100644 --- a/.github/workflows/zxc-env-vars.yaml +++ b/.github/workflows/zxc-env-vars.yaml @@ -44,6 +44,12 @@ on: e2e-node-add-test-subdir: description: "E2E Node Add Test Subdirectory" value: ${{ jobs.env-vars.outputs.e2e_node_add_test_subdir }} + e2e-node-update-test-subdir: + description: "E2E Node Update Test Subdirectory" + value: ${{ jobs.env-vars.outputs.e2e_node_update_test_subdir }} + e2e-node-delete-test-subdir: + description: "E2E Node Delete Test Subdirectory" + value: ${{ jobs.env-vars.outputs.e2e_node_delete_test_subdir }} e2e-relay-test-subdir: description: "E2E Relay Test Subdirectory" value: ${{ jobs.env-vars.outputs.e2e_relay_test_subdir }} @@ -65,6 +71,12 @@ on: e2e-node-add-coverage-report: description: "E2E Node Add Tests Coverage Report" value: ${{ jobs.env-vars.outputs.e2e_node_add_coverage_report }} + e2e-node-update-coverage-report: + description: "E2E Node Update Tests Coverage Report" + value: ${{ jobs.env-vars.outputs.e2e_node_update_coverage_report }} + e2e-node-delete-coverage-report: + description: "E2E Node Delete Tests Coverage Report" + value: ${{ jobs.env-vars.outputs.e2e_node_delete_coverage_report }} e2e-relay-coverage-report: description: "E2E Relay Tests Coverage Report" value: ${{ jobs.env-vars.outputs.e2e_relay_coverage_report }} @@ -84,6 +96,8 @@ jobs: e2e_node_pfx_kill_test_subdir: e2e-node-pfx-kill e2e_node_local_build_test_subdir: e2e-node-local-build e2e_node_add_test_subdir: e2e-node-add + e2e_node_update_test_subdir: e2e-node-update + e2e_node_delete_test_subdir: e2e-node-delete e2e_relay_test_subdir: e2e-relay e2e_coverage_report: "E2E Tests Coverage Report" e2e_mirror_node_coverage_report: "E2E Mirror Node Tests Coverage Report" @@ -91,6 +105,8 @@ jobs: e2e_node_pfx_kill_coverage_report: "E2E Node PFX Kill Tests Coverage Report" e2e_node_local_build_coverage_report: "E2E Node Local Build Tests Coverage Report" e2e_node_add_coverage_report: "E2E Node Add Tests Coverage Report" + e2e_node_update_coverage_report: "E2E Node Update Tests Coverage Report" + e2e_node_delete_coverage_report: "E2E Node Delete Tests Coverage Report" e2e_relay_coverage_report: "E2E Relay Tests Coverage Report" steps: - run: echo "Exposing environment variables to reusable workflows" diff --git a/package.json b/package.json index 7b5cb79d5..95ddb0589 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,9 @@ "test-e2e-node-pem-stop": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node PEM Stop Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-pem-stop.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-pem-stop' --testRegex=\".*\\/e2e\\/commands\\/node_pem_stop\\.test\\.mjs\"", "test-e2e-node-pfx-kill": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node PFX Kill Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-pfx-kill.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-pfx-kill' --testRegex=\".*\\/e2e\\/commands\\/node_pfx_kill\\.test\\.mjs\"", "test-e2e-node-local-build": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Local Custom Build' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-local-build.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-local-build' --testRegex=\".*\\/e2e\\/commands\\/node-local.*\\.test\\.mjs\"", - "test-e2e-node-add": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Add Custom Build' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-add.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-add' --testRegex=\".*\\/e2e\\/commands\\/node-add.*\\.test\\.mjs\"", + "test-e2e-node-add": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Add' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-add.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-add' --testRegex=\".*\\/e2e\\/commands\\/node-add.*\\.test\\.mjs\"", + "test-e2e-node-update": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Update' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-update.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-update' --testRegex=\".*\\/e2e\\/commands\\/node-update.*\\.test\\.mjs\"", + "test-e2e-node-delete": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Delete' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-delete.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-delete' --testRegex=\".*\\/e2e\\/commands\\/node-delete.*\\.test\\.mjs\"", "test-e2e-relay": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Relay Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-relay.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-relay' --testRegex=\".*\\/e2e\\/commands\\/relay\\.test\\.mjs\"", "merge-clean": "rm -rf .nyc_output && mkdir .nyc_output && rm -rf coverage/lcov-report && rm -rf coverage/solo && rm coverage/*.*", "merge-e2e": "nyc merge ./coverage/e2e/ .nyc_output/coverage.json", diff --git a/resources/templates/application.properties b/resources/templates/application.properties index 7a0bfc822..ae8714f90 100644 --- a/resources/templates/application.properties +++ b/resources/templates/application.properties @@ -13,3 +13,5 @@ nodes.gossipFqdnRestricted=false hedera.profiles.active=TEST # TODO: this is a workaround until prepareUpgrade freeze will recalculate the weight prior to writing the config.txt staking.periodMins=1 +nodes.updateAccountIdAllowed=true + diff --git a/src/commands/flags.mjs b/src/commands/flags.mjs index d445afaea..511d49b2f 100644 --- a/src/commands/flags.mjs +++ b/src/commands/flags.mjs @@ -538,6 +538,72 @@ export const localBuildPath = { } } +/** @type {CommandFlag} **/ +export const newAccountNumber = { + constName: 'newAccountNumber', + name: 'new-account-number', + definition: { + describe: 'new account number for node update transaction', + defaultValue: '', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const newAdminKey = { + constName: 'newAdminKey', + name: 'new-admin-key', + definition: { + describe: 'new admin key for the Hedera account', + defaultValue: '', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const gossipPublicKey = { + constName: 'gossipPublicKey', + name: 'gossip-public-key', + definition: { + describe: 'path and file name of the public key for signing gossip in PEM key format to be used', + defaultValue: '', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const gossipPrivateKey = { + constName: 'gossipPrivateKey', + name: 'gossip-private-key', + definition: { + describe: 'path and file name of the private key for signing gossip in PEM key format to be used', + defaultValue: '', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const tlsPublicKey = { + constName: 'tlsPublicKey', + name: 'tls-public-key', + definition: { + describe: 'path and file name of the public TLS key to be used', + defaultValue: '', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const tlsPrivateKey = { + constName: 'tlsPrivateKey', + name: 'tls-private-key', + definition: { + describe: 'path and file name of the private TLS key to be used', + defaultValue: '', + type: 'string' + } +} + /** @type {CommandFlag} **/ export const log4j2Xml = { constName: 'log4j2Xml', @@ -700,6 +766,8 @@ export const allFlags = [ generateGossipKeys, generateTlsKeys, gossipEndpoints, + gossipPrivateKey, + gossipPublicKey, grpcEndpoints, hederaExplorerTlsHostName, hederaExplorerTlsLoadBalancerIp, @@ -707,6 +775,8 @@ export const allFlags = [ localBuildPath, log4j2Xml, namespace, + newAccountNumber, + newAdminKey, nodeID, nodeIDs, operatorId, @@ -721,6 +791,8 @@ export const allFlags = [ setAlias, settingTxt, tlsClusterIssuerType, + tlsPrivateKey, + tlsPublicKey, updateAccountKeys, valuesFile ] diff --git a/src/commands/node.mjs b/src/commands/node.mjs index 95694624c..3a3dc0b27 100644 --- a/src/commands/node.mjs +++ b/src/commands/node.mjs @@ -24,7 +24,7 @@ import * as helpers from '../core/helpers.mjs' import { getNodeAccountMap, getNodeLogs, - getTmpDir, + getTmpDir, renameAndCopyFile, sleep, validatePath } from '../core/helpers.mjs' @@ -41,8 +41,10 @@ import { FileUpdateTransaction, FreezeTransaction, FreezeType, - NodeCreateTransaction, PrivateKey, + NodeCreateTransaction, + NodeUpdateTransaction, + NodeDeleteTransaction, ServiceEndpoint, Timestamp } from '@hashgraph/sdk' @@ -154,6 +156,53 @@ export class NodeCommand extends BaseCommand { ] } + static get DELETE_CONFIGS_NAME () { + return 'deleteConfigs' + } + + static get DELETE_FLAGS_LIST () { + return [ + flags.app, + flags.cacheDir, + flags.chartDirectory, + flags.devMode, + flags.endpointType, + flags.keyFormat, + flags.localBuildPath, + flags.namespace, + flags.nodeID, + flags.releaseTag + ] + } + + static get UPDATE_CONFIGS_NAME () { + return 'updateConfigs' + } + + static get UPDATE_FLAGS_LIST () { + return [ + flags.app, + flags.cacheDir, + flags.chartDirectory, + flags.devMode, + flags.endpointType, + flags.fstChartVersion, + flags.gossipEndpoints, + flags.gossipPrivateKey, + flags.gossipPublicKey, + flags.grpcEndpoints, + flags.keyFormat, + flags.localBuildPath, + flags.namespace, + flags.newAccountNumber, + flags.newAdminKey, + flags.nodeID, + flags.releaseTag, + flags.tlsPrivateKey, + flags.tlsPublicKey + ] + } + /** * stops and closes the port forwards * @returns {Promise} @@ -578,6 +627,15 @@ export class NodeCommand extends BaseCommand { }) } + async loadPermCertificate (certFullPath) { + const certPem = fs.readFileSync(certFullPath).toString() + const decodedDers = x509.PemConverter.decode(certPem) + if (!decodedDers || decodedDers.length === 0) { + throw new FullstackTestingError('unable to load perm key: ' + certFullPath) + } + return (new Uint8Array(decodedDers[0])) + } + async prepareUpgradeZip (stagingDir) { // we build a mock upgrade.zip file as we really don't need to upgrade the network // also the platform zip file is ~80Mb in size requiring a lot of transactions since the max @@ -1633,12 +1691,7 @@ export class NodeCommand extends BaseCommand { const config = /** @type {NodeAddConfigClass} **/ ctx.config const signingCertFile = Templates.renderGossipPemPublicKeyFile(constants.SIGNING_KEY_PREFIX, config.nodeId) const signingCertFullPath = path.join(config.keysDir, signingCertFile) - const signingCertPem = fs.readFileSync(signingCertFullPath).toString() - const decodedDers = x509.PemConverter.decode(signingCertPem) - if (!decodedDers || decodedDers.length === 0) { - throw new FullstackTestingError('unable to decode public key: ' + signingCertFile) - } - ctx.signingCertDer = new Uint8Array(decodedDers[0]) + ctx.signingCertDer = await this.loadPermCertificate(signingCertFullPath) } }, { @@ -1647,12 +1700,7 @@ export class NodeCommand extends BaseCommand { const config = /** @type {NodeAddConfigClass} **/ ctx.config const tlsCertFile = Templates.renderTLSPemPublicKeyFile(config.nodeId) const tlsCertFullPath = path.join(config.keysDir, tlsCertFile) - const tlsCertPem = fs.readFileSync(tlsCertFullPath).toString() - const tlsCertDers = x509.PemConverter.decode(tlsCertPem) - if (!tlsCertDers || tlsCertDers.length === 0) { - throw new FullstackTestingError('unable to decode tls cert: ' + tlsCertFullPath) - } - const tlsCertDer = new Uint8Array(tlsCertDers[0]) + const tlsCertDer = await this.loadPermCertificate(tlsCertFullPath) ctx.tlsCertHash = crypto.createHash('sha384').update(tlsCertDer).digest() } }, @@ -2067,8 +2115,8 @@ export class NodeCommand extends BaseCommand { try { await tasks.run() } catch (e) { - self.logger.error(`Error in setting up nodes: ${e.message}`, e) - throw new FullstackTestingError(`Error in setting up nodes: ${e.message}`, e) + self.logger.error(`Error in adding nodes: ${e.message}`, e) + throw new FullstackTestingError(`Error in adding nodes: ${e.message}`, e) } finally { await self.close() } @@ -2316,8 +2364,1063 @@ export class NodeCommand extends BaseCommand { }) } }) + .command({ + command: 'update', + desc: 'Update a node with a specific version of Hedera platform', + builder: y => flags.setCommandFlags(y, ...NodeCommand.UPDATE_FLAGS_LIST), + handler: argv => { + nodeCmd.logger.debug('==== Running \'node update\' ===') + nodeCmd.logger.debug(argv) + + nodeCmd.update(argv).then(r => { + nodeCmd.logger.debug('==== Finished running `node update`====') + if (!r) process.exit(1) + }).catch(err => { + nodeCmd.logger.showUserError(err) + process.exit(1) + }) + } + }) + .command({ + command: 'delete', + desc: 'Delete a node with a specific version of Hedera platform', + builder: y => flags.setCommandFlags(y, ...NodeCommand.DELETE_FLAGS_LIST), + handler: argv => { + nodeCmd.logger.debug('==== Running \'node delete\' ===') + nodeCmd.logger.debug(argv) + + nodeCmd.delete(argv).then(r => { + nodeCmd.logger.debug('==== Finished running `node delete`====') + if (!r) process.exit(1) + }).catch(err => { + nodeCmd.logger.showUserError(err) + process.exit(1) + }) + } + }) .demandCommand(1, 'Select a node command') } } } + + async update (argv) { + const self = this + + const tasks = new Listr([ + { + title: 'Initialize', + task: async (ctx, task) => { + self.configManager.update(argv) + + // disable the prompts that we don't want to prompt the user for + prompts.disablePrompts([ + flags.app, + flags.chartDirectory, + flags.devMode, + flags.endpointType, + flags.force, + flags.fstChartVersion, + flags.gossipEndpoints, + flags.gossipPrivateKey, + flags.gossipPublicKey, + flags.grpcEndpoints, + flags.localBuildPath, + flags.newAccountNumber, + flags.newAdminKey, + flags.tlsPrivateKey, + flags.tlsPublicKey + ]) + + await prompts.execute(task, self.configManager, NodeCommand.UPDATE_FLAGS_LIST) + + /** + * @typedef {Object} NodeUpdateConfigClass + * -- flags -- + * @property {string} app + * @property {string} cacheDir + * @property {string} chartDirectory + * @property {boolean} devMode + * @property {string} endpointType + * @property {string} fstChartVersion + * @property {string} gossipEndpoints + * @property {string} gossipPrivateKey + * @property {string} gossipPublicKey + * @property {string} grpcEndpoints + * @property {string} keyFormat + * @property {string} localBuildPath + * @property {string} namespace + * @property {string} newAccountNumber + * @property {string} newAdminKey + * @property {string} nodeId + * @property {string} releaseTag + * @property {string} tlsPrivateKey + * @property {string} tlsPublicKey + * -- extra args -- + * @property {PrivateKey} adminKey + * @property {string[]} allNodeIds + * @property {string} chartPath + * @property {string[]} existingNodeIds + * @property {string} freezeAdminPrivateKey + * @property {string} keysDir + * @property {Object} nodeClient + * @property {Object} podNames + * @property {Map} serviceMap + * @property {string} stagingDir + * @property {string} stagingKeysDir + * @property {PrivateKey} treasuryKey + * -- methods -- + * @property {getUnusedConfigs} getUnusedConfigs + */ + /** + * @callback getUnusedConfigs + * @returns {string[]} + */ + + // create a config object for subsequent steps + const config = /** @type {NodeUpdateConfigClass} **/ this.getConfig(NodeCommand.UPDATE_CONFIGS_NAME, NodeCommand.UPDATE_FLAGS_LIST, + [ + 'allNodeIds', + 'existingNodeIds', + 'freezeAdminPrivateKey', + 'keysDir', + 'nodeClient', + 'podNames', + 'serviceMap', + 'stagingDir', + 'stagingKeysDir', + 'treasuryKey' + ]) + + config.curDate = new Date() + config.existingNodeIds = [] + + await self.initializeSetup(config, self.k8) + + // set config in the context for later tasks to use + ctx.config = config + + ctx.config.chartPath = await self.prepareChartPath(ctx.config.chartDirectory, + constants.FULLSTACK_TESTING_CHART, constants.FULLSTACK_DEPLOYMENT_CHART) + + // initialize Node Client with existing network nodes prior to adding the new node which isn't functioning, yet + ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace) + + const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace) + config.freezeAdminPrivateKey = accountKeys.privateKey + + const treasuryAccount = await this.accountManager.getTreasuryAccountKeys(config.namespace) + const treasuryAccountPrivateKey = treasuryAccount.privateKey + config.treasuryKey = PrivateKey.fromStringED25519(treasuryAccountPrivateKey) + + self.logger.debug('Initialized config', { config }) + } + }, + { + title: 'Identify existing network nodes', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + for (/** @type {NetworkNodeServices} **/ const networkNodeServices of config.serviceMap.values()) { + config.existingNodeIds.push(networkNodeServices.nodeName) + } + + return self.taskCheckNetworkNodePods(ctx, task, config.existingNodeIds) + } + }, + { + title: 'Prepare gossip endpoints', + task: (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + let endpoints = [] + if (!config.gossipEndpoints) { + if (config.endpointType !== constants.ENDPOINT_TYPE_FQDN) { + throw new FullstackTestingError(`--gossip-endpoints must be set if --endpoint-type is: ${constants.ENDPOINT_TYPE_IP}`) + } + + endpoints = [ + `${Templates.renderFullyQualifiedNetworkPodName(config.namespace, config.nodeId)}:${constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT}`, + `${Templates.renderFullyQualifiedNetworkSvcName(config.namespace, config.nodeId)}:${constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT}` + ] + } else { + endpoints = helpers.splitFlagInput(config.gossipEndpoints) + } + + ctx.gossipEndpoints = this.prepareEndpoints(config.endpointType, endpoints, constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT) + } + }, + { + title: 'Prepare grpc service endpoints', + task: (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + let endpoints = [] + + if (!config.grpcEndpoints) { + if (config.endpointType !== constants.ENDPOINT_TYPE_FQDN) { + throw new FullstackTestingError(`--grpc-endpoints must be set if --endpoint-type is: ${constants.ENDPOINT_TYPE_IP}`) + } + + endpoints = [ + `${Templates.renderFullyQualifiedNetworkSvcName(config.namespace, config.nodeId)}:${constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT}` + ] + } else { + endpoints = helpers.splitFlagInput(config.grpcEndpoints) + } + + ctx.grpcServiceEndpoints = this.prepareEndpoints(config.endpointType, endpoints, constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT) + } + }, + { + title: 'Load node admin key', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + config.adminKey = PrivateKey.fromStringED25519(constants.GENESIS_KEY) + } + }, + { + title: 'Prepare upgrade zip file for node upgrade process', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + ctx.upgradeZipFile = await this.prepareUpgradeZip(config.stagingDir) + ctx.upgradeZipHash = await this.uploadUpgradeZip(ctx.upgradeZipFile, config.nodeClient) + } + }, + { + title: 'Check existing nodes staked amount', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + const accountMap = getNodeAccountMap(config.existingNodeIds) + for (const nodeId of config.existingNodeIds) { + const accountId = accountMap.get(nodeId) + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, 1) + } + } + }, + { + title: 'Send node update transaction', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + + const nodeId = Templates.nodeNumberFromNodeId(config.nodeId) - 1 + self.logger.info(`nodeId: ${nodeId}`) + self.logger.info(`config.newAccountNumber: ${config.newAccountNumber}`) + + try { + const nodeUpdateTx = await new NodeUpdateTransaction() + .setNodeId(nodeId) + + if (config.tlsPublicKey && config.tlsPrivateKey) { + self.logger.info(`config.tlsPublicKey: ${config.tlsPublicKey}`) + const tlsCertDer = await this.loadPermCertificate(config.tlsPublicKey) + const tlsCertHash = crypto.createHash('sha384').update(tlsCertDer).digest() + nodeUpdateTx.setCertificateHash(tlsCertHash) + + const publicKeyFile = Templates.renderTLSPemPublicKeyFile(config.nodeId) + const privateKeyFile = Templates.renderTLSPemPrivateKeyFile(config.nodeId) + renameAndCopyFile(config.tlsPublicKey, publicKeyFile, config.keysDir) + renameAndCopyFile(config.tlsPrivateKey, privateKeyFile, config.keysDir) + } + + if (config.gossipPublicKey && config.gossipPrivateKey) { + self.logger.info(`config.gossipPublicKey: ${config.gossipPublicKey}`) + const signingCertDer = await this.loadPermCertificate(config.gossipPublicKey) + nodeUpdateTx.setGossipCaCertificate(signingCertDer) + + const publicKeyFile = Templates.renderGossipPemPublicKeyFile(constants.SIGNING_KEY_PREFIX, config.nodeId) + const privateKeyFile = Templates.renderGossipPemPrivateKeyFile(constants.SIGNING_KEY_PREFIX, config.nodeId) + renameAndCopyFile(config.gossipPublicKey, publicKeyFile, config.keysDir) + renameAndCopyFile(config.gossipPrivateKey, privateKeyFile, config.keysDir) + } + + if (config.newAccountNumber) { + nodeUpdateTx.setAccountId(config.newAccountNumber) + } + + let parsedNewKey + if (config.newAdminKey) { + parsedNewKey = PrivateKey.fromStringED25519(config.newAdminKey) + nodeUpdateTx.setAdminKey(parsedNewKey.publicKey) + } + await nodeUpdateTx.freezeWith(config.nodeClient) + + // config.adminKey contains the original key, needed to sign the transaction + if (config.newAdminKey) { + await nodeUpdateTx.sign(parsedNewKey) + } + const signedTx = await nodeUpdateTx.sign(config.adminKey) + const txResp = await signedTx.execute(config.nodeClient) + const nodeUpdateReceipt = await txResp.getReceipt(config.nodeClient) + this.logger.debug(`NodeUpdateReceipt: ${nodeUpdateReceipt.toString()}`) + } catch (e) { + this.logger.error(`Error updating node to network: ${e.message}`, e) + this.logger.error(e.stack) + throw new FullstackTestingError(`Error updating node to network: ${e.message}`, e) + } + } + }, + { + title: 'Send prepare upgrade transaction', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + await this.prepareUpgradeNetworkNodes(config.freezeAdminPrivateKey, ctx.upgradeZipHash, config.nodeClient) + } + }, + { + title: 'Send freeze upgrade transaction', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + await this.freezeUpgradeNetworkNodes(config.freezeAdminPrivateKey, ctx.upgradeZipHash, config.nodeClient) + } + }, + { + title: 'Download generated files from an existing node', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + const node1FullyQualifiedPodName = Templates.renderNetworkPodName(config.existingNodeIds[0]) + + // copy the config.txt file from the node1 upgrade directory + await self.k8.copyFrom(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/config.txt`, config.stagingDir) + + const signedKeyFiles = (await self.k8.listDir(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current`)).filter(file => file.name.startsWith(constants.SIGNING_KEY_PREFIX)) + for (const signedKeyFile of signedKeyFiles) { + await self.k8.execContainer(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, ['bash', '-c', `[[ ! -f "${constants.HEDERA_HAPI_PATH}/data/keys/${signedKeyFile.name}" ]] || cp ${constants.HEDERA_HAPI_PATH}/data/keys/${signedKeyFile.name} ${constants.HEDERA_HAPI_PATH}/data/keys/${signedKeyFile.name}.old`]) + await self.k8.copyFrom(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/${signedKeyFile.name}`, `${config.keysDir}`) + } + } + }, + { + title: 'Check network nodes are frozen', + task: (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + const subTasks = [] + for (const nodeId of config.existingNodeIds) { + subTasks.push({ + title: `Check node: ${chalk.yellow(nodeId)}`, + task: () => self.checkNetworkNodeState(nodeId, 100, 'FREEZE_COMPLETE') + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Get node logs and configs', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + await helpers.getNodeLogs(self.k8, config.namespace) + } + }, + { + title: 'Update chart to use new configMap due to account number change', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + const index = config.existingNodeIds.length + const nodeId = Templates.nodeNumberFromNodeId(config.nodeId) - 1 + let valuesArg = '' + for (let i = 0; i < index; i++) { + if (i !== nodeId) { + valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.serviceMap.get(config.existingNodeIds[i]).accountId}" --set "hedera.nodes[${i}].name=${config.existingNodeIds[i]}"` + } else { + // use new account number for this node id + valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.newAccountNumber}" --set "hedera.nodes[${i}].name=${config.existingNodeIds[i]}"` + } + } + this.profileValuesFile = await self.profileManager.prepareValuesForNodeAdd( + path.join(config.stagingDir, 'config.txt'), + path.join(config.stagingDir, 'templates', 'application.properties')) + if (this.profileValuesFile) { + valuesArg += this.prepareValuesFiles(this.profileValuesFile) + } + + await self.chartManager.upgrade( + config.namespace, + constants.FULLSTACK_DEPLOYMENT_CHART, + config.chartPath, + valuesArg, + config.fstChartVersion + ) + } + }, + { + title: 'Kill nodes to pick up updated configMaps', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + // the updated node will have a new pod ID if its account ID changed which is a label + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + for (const /** @type {NetworkNodeServices} **/ service of config.serviceMap.values()) { + await self.k8.kubeClient.deleteNamespacedPod(service.nodePodName, config.namespace, undefined, undefined, 1) + } + // again, the pod names will change after the pods are killed + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + config.podNames = {} + for (const service of config.serviceMap.values()) { + config.podNames[service.nodeName] = service.nodePodName + } + } + }, + { + title: 'Check node pods are running', + task: + async (ctx, task) => { + const subTasks = [] + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + ctx.config.allNodeIds = ctx.config.existingNodeIds + // nodes + for (const nodeId of config.allNodeIds) { + subTasks.push({ + title: `Check Node: ${chalk.yellow(nodeId)}`, + task: () => + self.k8.waitForPods([constants.POD_PHASE_RUNNING], [ + 'fullstack.hedera.com/type=network-node', + `fullstack.hedera.com/node-name=${nodeId}` + ], 1, 60 * 15, 1000) // timeout 15 minutes + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, // no need to run concurrently since if one node is up, the rest should be up by then + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Prepare staging directory', + task: async (ctx, parentTask) => { + const subTasks = [ + { + title: 'Copy Gossip keys to staging', + task: async (ctx, _) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + + await this.copyGossipKeysToStaging(config.keyFormat, config.keysDir, config.stagingKeysDir, config.allNodeIds) + } + }, + { + title: 'Copy gRPC TLS keys to staging', + task: async (ctx, _) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + for (const nodeId of config.allNodeIds) { + const tlsKeyFiles = self.keyManager.prepareTLSKeyFilePaths(nodeId, config.keysDir) + await self._copyNodeKeys(tlsKeyFiles, config.stagingKeysDir) + } + } + } + ] + + return parentTask.newListr(subTasks, { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }) + } + }, + { + title: 'Fetch platform software into network nodes', + task: + async (ctx, task) => { + // without this sleep, copy software from local build to container sometimes fail + await sleep(15000) + ctx.config.allNodeIds = ctx.config.existingNodeIds + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + return self.fetchLocalOrReleasedPlatformSoftware(config.allNodeIds, config.podNames, config.releaseTag, task, config.localBuildPath) + } + }, + { + title: 'Setup new network node', + task: async (ctx, parentTask) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + + const subTasks = [] + for (const nodeId of config.allNodeIds) { + const podName = config.podNames[nodeId] + subTasks.push({ + title: `Node: ${chalk.yellow(nodeId)}`, + task: () => + self.platformInstaller.taskInstall(podName, config.stagingDir, config.allNodeIds, config.keyFormat, config.force) + }) + } + + // set up the sub-tasks + return parentTask.newListr(subTasks, { + concurrent: true, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }) + } + }, + { + title: 'Start network nodes', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + const subTasks = [] + // ctx.config.allNodeIds = ctx.config.existingNodeIds + self.startNodes(config.podNames, config.allNodeIds, subTasks) + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: true, + rendererOptions: { + collapseSubtasks: false, + timer: constants.LISTR_DEFAULT_RENDERER_TIMER_OPTION + } + }) + } + }, + { + title: 'Check all nodes are ACTIVE', + task: async (ctx, task) => { + const subTasks = [] + // sleep for 30 seconds to give time for the logs to roll over to prevent capturing an invalid "ACTIVE" string + await sleep(30000) + for (const nodeId of ctx.config.allNodeIds) { + subTasks.push({ + title: `Check node: ${chalk.yellow(nodeId)}`, + task: () => self.checkNetworkNodeState(nodeId, 200) + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Check all node proxies are ACTIVE', + // this is more reliable than checking the nodes logs for ACTIVE, as the + // logs will have a lot of white noise from being behind + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + const subTasks = [] + for (const nodeId of config.allNodeIds) { + subTasks.push({ + title: `Check proxy for node: ${chalk.yellow(nodeId)}`, + task: async () => await self.k8.waitForPodReady( + [`app=haproxy-${nodeId}`, 'fullstack.hedera.com/type=haproxy'], + 1, 300, 2000) + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Trigger stake weight calculate', + task: async (ctx, task) => { + const config = /** @type {NodeUpdateConfigClass} **/ ctx.config + self.logger.info('sleep 60 seconds for the handler to be able to trigger the network node stake weight recalculate') + await sleep(60000) + const accountMap = getNodeAccountMap(config.allNodeIds) + // update map with current account ids + accountMap.set(config.nodeId, config.newAccountNumber) + + // update _nodeClient with the new service map since one of the account number has changed + await this.accountManager.refreshNodeClient(config.namespace) + + // send some write transactions to invoke the handler that will trigger the stake weight recalculate + for (const nodeId of config.allNodeIds) { + const accountId = accountMap.get(nodeId) + config.nodeClient.setOperator(TREASURY_ACCOUNT_ID, config.treasuryKey) + self.logger.info(`Sending 1 tinybar to account: ${accountId}`) + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, 1) + } + } + }, + { + title: 'Finalize', + task: (ctx, _) => { + // reset flags so that keys are not regenerated later + self.configManager.setFlag(flags.generateGossipKeys, false) + self.configManager.setFlag(flags.generateTlsKeys, false) + self.configManager.persist() + } + } + ], { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }) + + try { + await tasks.run() + } catch (e) { + self.logger.error(`Error in updating nodes: ${e.message}`, e) + this.logger.error(e.stack) + throw new FullstackTestingError(`Error in updating nodes: ${e.message}`, e) + } finally { + await self.close() + } + + return true + } + + async delete (argv) { + const self = this + + const tasks = new Listr([ + { + title: 'Initialize', + task: async (ctx, task) => { + self.configManager.update(argv) + + // disable the prompts that we don't want to prompt the user for + prompts.disablePrompts([ + flags.app, + flags.chainId, + flags.chartDirectory, + flags.devMode, + flags.endpointType, + flags.force, + flags.fstChartVersion, + flags.localBuildPath + ]) + + await prompts.execute(task, self.configManager, NodeCommand.DELETE_FLAGS_LIST) + + /** + * @typedef {Object} NodeDeleteConfigClass + * -- flags -- + * @property {string} app + * @property {string} cacheDir + * @property {string} charDirectory + * @property {boolean} devMode + * @property {string} endpointType + * @property {string} fstChartVersion + * @property {string} keyFormat + * @property {string} localBuildPath + * @property {string} namespace + * @property {string} nodeId + * @property {string} releaseTag + * -- extra args -- + * @property {PrivateKey} adminKey + * @property {string[]} allNodeIds + * @property {string} chartPath + * @property {string[]} existingNodeIds + * @property {string} freezeAdminPrivateKey + * @property {string} keysDir + * @property {Object} nodeClient + * @property {Object} podNames + * @property {Map} serviceMap + * @property {string} stagingDir + * @property {string} stagingKeysDir + * @property {PrivateKey} treasuryKey + * -- methods -- + * @property {getUnusedConfigs} getUnusedConfigs + */ + /** + * @callback getUnusedConfigs + * @returns {string[]} + */ + + // create a config object for subsequent steps + const config = /** @type {NodeDeleteConfigClass} **/ this.getConfig(NodeCommand.DELETE_CONFIGS_NAME, NodeCommand.DELETE_FLAGS_LIST, + [ + 'adminKey', + 'allNodeIds', + 'existingNodeIds', + 'freezeAdminPrivateKey', + 'keysDir', + 'nodeClient', + 'podNames', + 'serviceMap', + 'stagingDir', + 'stagingKeysDir', + 'treasuryKey' + ]) + + config.curDate = new Date() + config.existingNodeIds = [] + + await self.initializeSetup(config, self.k8) + + // set config in the context for later tasks to use + ctx.config = config + + ctx.config.chartPath = await self.prepareChartPath(ctx.config.chartDirectory, + constants.FULLSTACK_TESTING_CHART, constants.FULLSTACK_DEPLOYMENT_CHART) + + // initialize Node Client with existing network nodes prior to adding the new node which isn't functioning, yet + ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace) + + const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace) + config.freezeAdminPrivateKey = accountKeys.privateKey + + const treasuryAccount = await this.accountManager.getTreasuryAccountKeys(config.namespace) + const treasuryAccountPrivateKey = treasuryAccount.privateKey + config.treasuryKey = PrivateKey.fromStringED25519(treasuryAccountPrivateKey) + + self.logger.debug('Initialized config', { config }) + } + }, + { + title: 'Identify existing network nodes', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + for (/** @type {NetworkNodeServices} **/ const networkNodeServices of config.serviceMap.values()) { + config.existingNodeIds.push(networkNodeServices.nodeName) + } + + return self.taskCheckNetworkNodePods(ctx, task, config.existingNodeIds) + } + }, + { + title: 'Load node admin key', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + config.adminKey = PrivateKey.fromStringED25519(constants.GENESIS_KEY) + } + }, + { + title: 'Prepare upgrade zip file for node upgrade process', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + ctx.upgradeZipFile = await this.prepareUpgradeZip(config.stagingDir) + ctx.upgradeZipHash = await this.uploadUpgradeZip(ctx.upgradeZipFile, config.nodeClient) + } + }, + { + title: 'Check existing nodes staked amount', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + const accountMap = getNodeAccountMap(config.existingNodeIds) + for (const nodeId of config.existingNodeIds) { + const accountId = accountMap.get(nodeId) + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, 1) + } + } + }, + { + title: 'Send node delete transaction', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + + try { + const accountMap = getNodeAccountMap(config.existingNodeIds) + const deleteAccountId = accountMap.get(ctx.config.nodeId) + this.logger.debug(`Deleting node: ${ctx.config.nodeId} with account: ${deleteAccountId}`) + const nodeId = Templates.nodeNumberFromNodeId(ctx.config.nodeId) - 1 + const nodeDeleteTx = await new NodeDeleteTransaction() + .setNodeId(nodeId) + .freezeWith(config.nodeClient) + + const signedTx = await nodeDeleteTx.sign(config.adminKey) + const txResp = await signedTx.execute(config.nodeClient) + const nodeUpdateReceipt = await txResp.getReceipt(config.nodeClient) + this.logger.debug(`NodeUpdateReceipt: ${nodeUpdateReceipt.toString()}`) + } catch (e) { + this.logger.error(`Error deleting node from network: ${e.message}`, e) + throw new FullstackTestingError(`Error deleting node from network: ${e.message}`, e) + } + } + }, + { + title: 'Send prepare upgrade transaction', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + await this.prepareUpgradeNetworkNodes(config.freezeAdminPrivateKey, ctx.upgradeZipHash, config.nodeClient) + } + }, + { + title: 'Send freeze upgrade transaction', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + await this.freezeUpgradeNetworkNodes(config.freezeAdminPrivateKey, ctx.upgradeZipHash, config.nodeClient) + } + }, + { + title: 'Check network nodes are frozen', + task: (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + const subTasks = [] + for (const nodeId of config.existingNodeIds) { + subTasks.push({ + title: `Check node: ${chalk.yellow(nodeId)}`, + task: () => self.checkNetworkNodeState(nodeId, 100, 'FREEZE_COMPLETE') + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Download new config.txt', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + const node1FullyQualifiedPodName = Templates.renderNetworkPodName(config.existingNodeIds[0]) + + // copy the config.txt file from the node1 upgrade directory + await self.k8.copyFrom(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/config.txt`, config.stagingDir) + } + }, + { + title: 'Get node logs and configs', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + await helpers.getNodeLogs(self.k8, config.namespace) + } + }, + { + title: 'Update chart to use new configMap', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + const index = config.existingNodeIds.length + let valuesArg = '' + for (let i = 0; i < index; i++) { + valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.serviceMap.get(config.existingNodeIds[i]).accountId}" --set "hedera.nodes[${i}].name=${config.existingNodeIds[i]}"` + } + + this.profileValuesFile = await self.profileManager.prepareValuesForNodeAdd( + path.join(config.stagingDir, 'config.txt'), + path.join(config.stagingDir, 'templates', 'application.properties')) + if (this.profileValuesFile) { + valuesArg += this.prepareValuesFiles(this.profileValuesFile) + } + + await self.chartManager.upgrade( + config.namespace, + constants.FULLSTACK_DEPLOYMENT_CHART, + config.chartPath, + valuesArg, + config.fstChartVersion + ) + } + }, + { + title: 'Kill nodes to pick up updated configMaps', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + for (const /** @type {NetworkNodeServices} **/ service of config.serviceMap.values()) { + await self.k8.kubeClient.deleteNamespacedPod(service.nodePodName, config.namespace, undefined, undefined, 1) + } + } + }, + { + title: 'Check node pods are running', + task: + async (ctx, task) => { + await sleep(20000) + const subTasks = [] + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + + // remove nodeId from existingNodeIds + ctx.config.allNodeIds = ctx.config.existingNodeIds.filter(nodeId => nodeId !== ctx.config.nodeId) + + // nodes + for (const nodeId of config.allNodeIds) { + subTasks.push({ + title: `Check Node: ${chalk.yellow(nodeId)}`, + task: () => + self.k8.waitForPods([constants.POD_PHASE_RUNNING], [ + 'fullstack.hedera.com/type=network-node', + `fullstack.hedera.com/node-name=${nodeId}` + ], 1, 60 * 15, 1000) // timeout 15 minutes + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, // no need to run concurrently since if one node is up, the rest should be up by then + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Prepare staging directory', + task: async (ctx, parentTask) => { + const subTasks = [ + { + title: 'Copy Gossip keys to staging', + task: async (ctx, _) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + + await this.copyGossipKeysToStaging(config.keyFormat, config.keysDir, config.stagingKeysDir, config.allNodeIds) + } + }, + { + title: 'Copy gRPC TLS keys to staging', + task: async (ctx, _) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + for (const nodeId of config.allNodeIds) { + const tlsKeyFiles = self.keyManager.prepareTLSKeyFilePaths(nodeId, config.keysDir) + await self._copyNodeKeys(tlsKeyFiles, config.stagingKeysDir) + } + } + } + ] + + return parentTask.newListr(subTasks, { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }) + } + }, + { + title: 'Fetch platform software into all network nodes', + task: + async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + config.podNames[config.nodeId] = config.serviceMap.get( + config.nodeId).nodePodName + return self.fetchLocalOrReleasedPlatformSoftware(config.allNodeIds, config.podNames, config.releaseTag, task, config.localBuildPath) + } + }, + { + title: 'Setup new network node', + task: async (ctx, parentTask) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + + // remove nodeId from existingNodeIds + ctx.config.allNodeIds = ctx.config.existingNodeIds.filter(nodeId => nodeId !== ctx.config.nodeId) + + const subTasks = [] + for (const nodeId of config.allNodeIds) { + const podName = config.podNames[nodeId] + subTasks.push({ + title: `Node: ${chalk.yellow(nodeId)}`, + task: () => + self.platformInstaller.taskInstall(podName, config.stagingDir, config.allNodeIds, config.keyFormat, config.force) + }) + } + + // set up the sub-tasks + return parentTask.newListr(subTasks, { + concurrent: true, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }) + } + }, + { + title: 'Start network nodes', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + const subTasks = [] + + self.startNodes(config.podNames, config.allNodeIds, subTasks) + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: true, + rendererOptions: { + collapseSubtasks: false, + timer: constants.LISTR_DEFAULT_RENDERER_TIMER_OPTION + } + }) + } + }, + { + title: 'Check all nodes are ACTIVE', + task: async (ctx, task) => { + const subTasks = [] + // sleep for 30 seconds to give time for the logs to roll over to prevent capturing an invalid "ACTIVE" string + await sleep(30000) + for (const nodeId of ctx.config.allNodeIds) { + subTasks.push({ + title: `Check node: ${chalk.yellow(nodeId)}`, + task: () => self.checkNetworkNodeState(nodeId, 200) + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Check all node proxies are ACTIVE', + // this is more reliable than checking the nodes logs for ACTIVE, as the + // logs will have a lot of white noise from being behind + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + const subTasks = [] + for (const nodeId of config.allNodeIds) { + subTasks.push({ + title: `Check proxy for node: ${chalk.yellow(nodeId)}`, + task: async () => await self.k8.waitForPodReady( + [`app=haproxy-${nodeId}`, 'fullstack.hedera.com/type=haproxy'], + 1, 300, 2000) + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Trigger stake weight calculate', + task: async (ctx, task) => { + const config = /** @type {NodeDeleteConfigClass} **/ ctx.config + // sleep 60 seconds for the handler to be able to trigger the network node stake weight recalculate + await sleep(60000) + const accountMap = getNodeAccountMap(config.allNodeIds) + // send some write transactions to invoke the handler that will trigger the stake weight recalculate + for (const nodeId of config.allNodeIds) { + const accountId = accountMap.get(nodeId) + config.nodeClient.setOperator(TREASURY_ACCOUNT_ID, config.treasuryKey) + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, 1) + } + } + }, + { + title: 'Finalize', + task: (ctx, _) => { + // reset flags so that keys are not regenerated later + self.configManager.setFlag(flags.generateGossipKeys, false) + self.configManager.setFlag(flags.generateTlsKeys, false) + self.configManager.persist() + } + } + ], { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }) + + try { + await tasks.run() + } catch (e) { + self.logger.error(`Error in deleting nodes: ${e.message}`, e) + throw new FullstackTestingError(`Error in deleting nodes: ${e.message}`, e) + } finally { + await self.close() + } + + return true + } } diff --git a/src/core/account_manager.mjs b/src/core/account_manager.mjs index dec20a943..848e36d28 100644 --- a/src/core/account_manager.mjs +++ b/src/core/account_manager.mjs @@ -155,16 +155,25 @@ export class AccountManager { */ async loadNodeClient (namespace) { if (!this._nodeClient || this._nodeClient.isClientShutDown) { - const treasuryAccountInfo = await this.getTreasuryAccountKeys(namespace) - const networkNodeServicesMap = await this.getNodeServiceMap(namespace) - - this._nodeClient = await this._getNodeClient(namespace, - networkNodeServicesMap, treasuryAccountInfo.accountId, treasuryAccountInfo.privateKey) + await this.refreshNodeClient(namespace) } return this._nodeClient } + /** + * loads and initializes the Node Client + * @param namespace the namespace of the network + * @returns {Promise} + */ + async refreshNodeClient (namespace) { + const treasuryAccountInfo = await this.getTreasuryAccountKeys(namespace) + const networkNodeServicesMap = await this.getNodeServiceMap(namespace) + + this._nodeClient = await this._getNodeClient(namespace, + networkNodeServicesMap, treasuryAccountInfo.accountId, treasuryAccountInfo.privateKey) + } + /** * if the load balancer IP is not set, then we should use the local host port forward * @param {NetworkNodeServices} networkNodeServices @@ -218,7 +227,7 @@ export class AccountManager { const port = networkNodeService.haProxyGrpcPort const targetPort = usePortForward ? localPort : port - if (usePortForward) { + if (usePortForward && this._portForwards.length < networkNodeServicesMap.size) { this._portForwards.push(await this.k8.portForward(networkNodeService.haProxyPodName, localPort, port)) } diff --git a/src/core/helpers.mjs b/src/core/helpers.mjs index 2cf402f07..023382cee 100644 --- a/src/core/helpers.mjs +++ b/src/core/helpers.mjs @@ -266,3 +266,23 @@ export function parseIpAddressToUint8Array (ipAddress) { return uint8Array } + +/** + * If the basename of the src did not match expected basename, rename it first, then copy to destination + * @param srcFilePath + * @param expectedBaseName + * @param destDir + */ +export function renameAndCopyFile (srcFilePath, expectedBaseName, destDir) { + const srcDir = path.dirname(srcFilePath) + if (path.basename(srcFilePath) !== expectedBaseName) { + fs.renameSync(srcFilePath, path.join(srcDir, expectedBaseName)) + } + // copy public key and private key to key directory + fs.copyFile(path.join(srcDir, expectedBaseName), path.join(destDir, expectedBaseName), (err) => { + if (err) { + self.logger.error(`Error copying file: ${err.message}`) + throw new FullstackTestingError(`Error copying file: ${err.message}`) + } + }) +} diff --git a/src/core/profile_manager.mjs b/src/core/profile_manager.mjs index 3d3b258d4..3ab1a0752 100644 --- a/src/core/profile_manager.mjs +++ b/src/core/profile_manager.mjs @@ -435,7 +435,7 @@ export class ProfileManager { const account = nodeAccountMap.get(nodeID) if (releaseVersion.minor >= 40) { - configLines.push(`address, ${nodeSeq}, ${nodeName}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) + configLines.push(`address, ${nodeSeq}, ${nodeSeq}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) } else { configLines.push(`address, ${nodeSeq}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) } diff --git a/test/e2e/commands/node-delete.test.mjs b/test/e2e/commands/node-delete.test.mjs new file mode 100644 index 000000000..af4441a92 --- /dev/null +++ b/test/e2e/commands/node-delete.test.mjs @@ -0,0 +1,88 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @jest-environment steps + */ +import { afterAll, describe, expect, it } from '@jest/globals' +import { flags } from '../../../src/commands/index.mjs' +import { constants } from '../../../src/core/index.mjs' +import { + accountCreationShouldSucceed, + balanceQueryShouldSucceed, + bootstrapNetwork, + getDefaultArgv, + HEDERA_PLATFORM_VERSION_TAG +} from '../../test_util.js' +import { getNodeLogs, getTmpDir } from '../../../src/core/helpers.mjs' +import { NodeCommand } from '../../../src/commands/node.mjs' +import { HEDERA_HAPI_PATH, ROOT_CONTAINER } from '../../../src/core/constants.mjs' +import fs from 'fs' + +describe('Node delete', () => { + const namespace = 'node-delete' + const nodeId = 'node1' + const argv = getDefaultArgv() + argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM + argv[flags.nodeIDs.name] = 'node1,node2,node3,node4' + argv[flags.nodeID.name] = nodeId + argv[flags.generateGossipKeys.name] = true + argv[flags.generateTlsKeys.name] = true + argv[flags.persistentVolumeClaims.name] = true + argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM + // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts + argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined + argv[flags.releaseTag.name] = HEDERA_PLATFORM_VERSION_TAG + argv[flags.namespace.name] = namespace + const bootstrapResp = bootstrapNetwork(namespace, argv) + const nodeCmd = bootstrapResp.cmd.nodeCmd + const accountCmd = bootstrapResp.cmd.accountCmd + const k8 = bootstrapResp.opts.k8 + + afterAll(async () => { + await getNodeLogs(k8, namespace) + // await k8.deleteNamespace(namespace) + }, 600000) + + it('should succeed with init command', async () => { + const status = await accountCmd.init(argv) + expect(status).toBeTruthy() + }, 450000) + + it('should delete a new node to the network successfully', async () => { + await nodeCmd.delete(argv) + expect(nodeCmd.getUnusedConfigs(NodeCommand.DELETE_CONFIGS_NAME)).toEqual([ + flags.app.constName, + flags.devMode.constName, + flags.endpointType.constName + ]) + + await nodeCmd.accountManager.close() + }, 600000) + + balanceQueryShouldSucceed(nodeCmd.accountManager, nodeCmd, namespace) + + accountCreationShouldSucceed(nodeCmd.accountManager, nodeCmd, namespace) + + it('config.txt should no longer contain removed nodeid', async () => { + // read config.txt file from first node, read config.txt line by line, it should not contain value of nodeId + const pods = await k8.getPodsByLabel(['fullstack.hedera.com/type=network-node']) + const podName = pods[0].metadata.name + const tmpDir = getTmpDir() + await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir) + const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8') + console.log('config.txt:', configTxt) + expect(configTxt).not.toContain(nodeId) + }, 600000) +}) diff --git a/test/e2e/commands/node-update.test.mjs b/test/e2e/commands/node-update.test.mjs new file mode 100644 index 000000000..9d5ec0c6c --- /dev/null +++ b/test/e2e/commands/node-update.test.mjs @@ -0,0 +1,133 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @jest-environment steps + */ +import { afterAll, describe, expect, it } from '@jest/globals' +import { flags } from '../../../src/commands/index.mjs' +import { constants } from '../../../src/core/index.mjs' +import { + // accountCreationShouldSucceed, + // balanceQueryShouldSucceed, + bootstrapNetwork, + getDefaultArgv, getNodeIdsPrivateKeysHash, getTmpDir, + HEDERA_PLATFORM_VERSION_TAG +} from '../../test_util.js' +import { getNodeLogs } from '../../../src/core/helpers.mjs' +import { NodeCommand } from '../../../src/commands/node.mjs' +import { HEDERA_HAPI_PATH, ROOT_CONTAINER } from '../../../src/core/constants.mjs' +import fs from 'fs' + +describe('Node update', () => { + const defaultTimeout = 120000 + const namespace = 'node-update' + const updateNodeId = 'node2' + const newAccountId = '0.0.7' + const argv = getDefaultArgv() + argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM + argv[flags.nodeIDs.name] = 'node1,node2,node3' + argv[flags.nodeID.name] = updateNodeId + + argv[flags.newAccountNumber.name] = newAccountId + argv[flags.newAdminKey.name] = '302e020100300506032b6570042204200cde8d512569610f184b8b399e91e46899805c6171f7c2b8666d2a417bcc66c2' + + argv[flags.generateGossipKeys.name] = true + argv[flags.generateTlsKeys.name] = true + argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM + // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts + argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined + argv[flags.releaseTag.name] = HEDERA_PLATFORM_VERSION_TAG + argv[flags.namespace.name] = namespace + argv[flags.persistentVolumeClaims.name] = true + const bootstrapResp = bootstrapNetwork(namespace, argv) + const nodeCmd = bootstrapResp.cmd.nodeCmd + const accountCmd = bootstrapResp.cmd.accountCmd + const k8 = bootstrapResp.opts.k8 + let existingServiceMap + let existingNodeIdsPrivateKeysHash + + afterAll(async () => { + await getNodeLogs(k8, namespace) + await k8.deleteNamespace(namespace) + }, 600000) + + it('cache current version of private keys', async () => { + existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace) + existingNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, constants.KEY_FORMAT_PEM, k8, getTmpDir()) + }, defaultTimeout) + + it('should succeed with init command', async () => { + const status = await accountCmd.init(argv) + expect(status).toBeTruthy() + }, 450000) + + it.skip('should update a new node property successfully', async () => { + // generate gossip and tls keys for the updated node + const tmpDir = getTmpDir() + + const signingKey = await nodeCmd.keyManager.generateSigningKey(updateNodeId) + const signingKeyFiles = await nodeCmd.keyManager.storeSigningKey(updateNodeId, signingKey, tmpDir) + nodeCmd.logger.debug(`generated test gossip signing keys for node ${updateNodeId} : ${signingKeyFiles.certificateFile}`) + argv[flags.gossipPublicKey.name] = signingKeyFiles.certificateFile + argv[flags.gossipPrivateKey.name] = signingKeyFiles.privateKeyFile + + const tlsKey = await nodeCmd.keyManager.generateGrpcTLSKey(updateNodeId) + const tlsKeyFiles = await nodeCmd.keyManager.storeTLSKey(updateNodeId, tlsKey, tmpDir) + nodeCmd.logger.debug(`generated test TLS keys for node ${updateNodeId} : ${tlsKeyFiles.certificateFile}`) + argv[flags.tlsPublicKey.name] = tlsKeyFiles.certificateFile + argv[flags.tlsPrivateKey.name] = tlsKeyFiles.privateKeyFile + + await nodeCmd.update(argv) + expect(nodeCmd.getUnusedConfigs(NodeCommand.UPDATE_CONFIGS_NAME)).toEqual([ + flags.app.constName, + flags.devMode.constName + ]) + await nodeCmd.accountManager.close() + }, 1800000) + + // balanceQueryShouldSucceed(nodeCmd.accountManager, nodeCmd, namespace) + // + // accountCreationShouldSucceed(nodeCmd.accountManager, nodeCmd, namespace) + + it.skip('signing key and tls key should not match previous one', async () => { + const currentNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, constants.KEY_FORMAT_PEM, k8, getTmpDir()) + + for (const [nodeId, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { + const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeId) + + for (const [keyFileName, existingKeyHash] of existingKeyHashMap.entries()) { + if (nodeId === updateNodeId && + (keyFileName.startsWith(constants.SIGNING_KEY_PREFIX) || keyFileName.startsWith('hedera'))) { + expect(`${nodeId}:${keyFileName}:${currentNodeKeyHashMap.get(keyFileName)}`).not.toEqual( + `${nodeId}:${keyFileName}:${existingKeyHash}`) + } else { + expect(`${nodeId}:${keyFileName}:${currentNodeKeyHashMap.get(keyFileName)}`).toEqual( + `${nodeId}:${keyFileName}:${existingKeyHash}`) + } + } + } + }, defaultTimeout) + + it.skip('config.txt should be changed with new account id', async () => { + // read config.txt file from first node, read config.txt line by line, it should not contain value of newAccountId + const pods = await k8.getPodsByLabel(['fullstack.hedera.com/type=network-node']) + const podName = pods[0].metadata.name + const tmpDir = getTmpDir() + await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir) + const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8') + console.log('config.txt:', configTxt) + expect(configTxt).toContain(newAccountId) + }, 600000) +}) diff --git a/test/test_util.js b/test/test_util.js index ee9dbd4e9..196591301 100644 --- a/test/test_util.js +++ b/test/test_util.js @@ -54,7 +54,7 @@ import { AccountCommand } from '../src/commands/account.mjs' export const testLogger = logging.NewLogger('debug', true) export const TEST_CLUSTER = 'solo-e2e' -export const HEDERA_PLATFORM_VERSION_TAG = 'v0.53.0-release-0.53.xff7c43d' +export const HEDERA_PLATFORM_VERSION_TAG = 'v0.53.2' export function getTestCacheDir (testName) { const baseDir = 'test/data/tmp' diff --git a/version.mjs b/version.mjs index 75b7c406f..4881aedf4 100644 --- a/version.mjs +++ b/version.mjs @@ -22,4 +22,4 @@ export const JAVA_VERSION = '21.0.1+12' export const HELM_VERSION = 'v3.14.2' export const FST_CHART_VERSION = 'v0.29.1' -export const HEDERA_PLATFORM_VERSION = 'v0.53.0-release-0.53.xff7c43d' +export const HEDERA_PLATFORM_VERSION = 'v0.53.2'