diff --git a/.eslintrc.yml b/.eslintrc.yml index fd12b9363..87dca16cf 100644 --- a/.eslintrc.yml +++ b/.eslintrc.yml @@ -1,6 +1,7 @@ env: browser: true es2021: true + jest: true extends: standard parserOptions: ecmaVersion: latest @@ -30,4 +31,4 @@ overrides: - files: ["*.mjs"] parserOptions: ecmaVersion: latest - sourceType: module \ No newline at end of file + sourceType: module diff --git a/jest.config.mjs b/jest.config.mjs index 341553719..5b2bbd9b4 100644 --- a/jest.config.mjs +++ b/jest.config.mjs @@ -19,7 +19,8 @@ const config = { moduleFileExtensions: ['js', 'mjs'], verbose: true, reporters: [['default', { summaryThreshold: 1 }], 'jest-junit'], - testSequencer: './test/testSequencer.mjs' + testSequencer: './test/testSequencer.mjs', + setupFilesAfterEnv: ['jest-expect-message'] } export default config diff --git a/package-lock.json b/package-lock.json index 7c239e5a5..4f43a8800 100644 --- a/package-lock.json +++ b/package-lock.json @@ -55,6 +55,7 @@ "eslint-plugin-promise": "^6.6.0", "jest": "^29.7.0", "jest-environment-steps": "^1.1.1", + "jest-expect-message": "^1.1.3", "jest-junit": "^16.0.0", "nyc": "^17.0.0", "remark-cli": "^12.0.1", @@ -6941,6 +6942,12 @@ "jest": "^27.0.3 || ^28.1.0 || ^29.0.0" } }, + "node_modules/jest-expect-message": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/jest-expect-message/-/jest-expect-message-1.1.3.tgz", + "integrity": "sha512-bTK77T4P+zto+XepAX3low8XVQxDgaEqh3jSTQOG8qvPpD69LsIdyJTa+RmnJh3HNSzJng62/44RPPc7OIlFxg==", + "dev": true + }, "node_modules/jest-get-type": { "version": "29.6.3", "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", diff --git a/package.json b/package.json index 29bf59a21..df37f8cdb 100644 --- a/package.json +++ b/package.json @@ -76,6 +76,7 @@ "eslint-plugin-promise": "^6.6.0", "jest": "^29.7.0", "jest-environment-steps": "^1.1.1", + "jest-expect-message": "^1.1.3", "jest-junit": "^16.0.0", "nyc": "^17.0.0", "remark-cli": "^12.0.1", diff --git a/resources/templates/bootstrap.properties b/resources/templates/bootstrap.properties index 74d317b61..9e85f2be2 100644 --- a/resources/templates/bootstrap.properties +++ b/resources/templates/bootstrap.properties @@ -1,5 +1,7 @@ +# TODO: bootstrap.properties is scheduled to go away, will need to delete this and related code ledger.id=0x01 netty.mode=DEV +# TODO: if chain ID is passed in as a flag, this is not getting updated contracts.chainId=298 hedera.recordStream.logPeriod=1 balances.exportPeriodSecs=400 diff --git a/src/commands/base.mjs b/src/commands/base.mjs index d7377c488..e6b41b199 100644 --- a/src/commands/base.mjs +++ b/src/commands/base.mjs @@ -57,7 +57,7 @@ export class BaseCommand extends ShellRunner { super(opts.logger) this.helm = opts.helm - this.k8 = opts.k8 + this.k8 = /** @type {K8} **/ opts.k8 this.chartManager = opts.chartManager this.configManager = opts.configManager this.depManager = opts.depManager diff --git a/src/commands/flags.mjs b/src/commands/flags.mjs index e8de5b036..847eb9150 100644 --- a/src/commands/flags.mjs +++ b/src/commands/flags.mjs @@ -310,7 +310,7 @@ export const chainId = { name: 'ledger-id', definition: { describe: 'Ledger ID (a.k.a. Chain ID)', - defaultValue: '298', // Ref: https://github.com/hashgraph/hedera-json-rpc-relay#configuration + defaultValue: constants.HEDERA_CHAIN_ID, // Ref: https://github.com/hashgraph/hedera-json-rpc-relay#configuration alias: 'l', type: 'string' } @@ -511,7 +511,7 @@ export const app = { name: 'app', definition: { describe: 'Testing app name', - defaultValue: '', + defaultValue: constants.HEDERA_APP_NAME, type: 'string' } } @@ -656,6 +656,17 @@ export const endpointType = { } } +/** @type {CommandFlag} **/ +export const persistentVolumeClaims = { + constName: 'persistentVolumeClaims', + name: 'pvcs', + definition: { + describe: 'Enable persistent volume claims to store data outside the pod, required for node add', + defaultValue: false, + type: 'boolean' + } +} + /** @type {CommandFlag[]} **/ export const allFlags = [ accountId, @@ -700,6 +711,7 @@ export const allFlags = [ nodeIDs, operatorId, operatorKey, + persistentVolumeClaims, privateKey, profileFile, profileName, diff --git a/src/commands/network.mjs b/src/commands/network.mjs index 3c57a127c..e1db35598 100644 --- a/src/commands/network.mjs +++ b/src/commands/network.mjs @@ -40,7 +40,12 @@ export class NetworkCommand extends BaseCommand { static get DEPLOY_FLAGS_LIST () { return [ + flags.apiPermissionProperties, + flags.app, flags.applicationEnv, + flags.applicationProperties, + flags.bootstrapProperties, + flags.chainId, flags.chartDirectory, flags.deployHederaExplorer, flags.deployMirrorNode, @@ -49,11 +54,14 @@ export class NetworkCommand extends BaseCommand { flags.fstChartVersion, flags.hederaExplorerTlsHostName, flags.hederaExplorerTlsLoadBalancerIp, + flags.log4j2Xml, flags.namespace, flags.nodeIDs, + flags.persistentVolumeClaims, flags.profileFile, flags.profileName, flags.releaseTag, + flags.settingTxt, flags.tlsClusterIssuerType, flags.valuesFile ] @@ -99,7 +107,7 @@ export class NetworkCommand extends BaseCommand { } const profileName = this.configManager.getFlag(flags.profileName) - this.profileValuesFile = await this.profileManager.prepareValuesForFstChart(profileName, config.applicationEnv) + this.profileValuesFile = await this.profileManager.prepareValuesForFstChart(profileName) if (this.profileValuesFile) { valuesArg += this.prepareValuesFiles(this.profileValuesFile) } @@ -118,6 +126,8 @@ export class NetworkCommand extends BaseCommand { valuesArg += ` --set "defaults.root.image.repository=${rootImage}"` } + valuesArg += ` --set "defaults.volumeClaims.enabled=${config.persistentVolumeClaims}"` + this.logger.debug('Prepared helm chart values', { valuesArg }) return valuesArg } @@ -128,10 +138,20 @@ export class NetworkCommand extends BaseCommand { // disable the prompts that we don't want to prompt the user for prompts.disablePrompts([ + flags.apiPermissionProperties, + flags.app, flags.applicationEnv, + flags.applicationProperties, + flags.bootstrapProperties, + flags.chainId, flags.deployHederaExplorer, flags.deployMirrorNode, - flags.hederaExplorerTlsLoadBalancerIp + flags.hederaExplorerTlsLoadBalancerIp, + flags.log4j2Xml, + flags.persistentVolumeClaims, + flags.profileName, + flags.profileFile, + flags.settingTxt ]) await prompts.execute(task, this.configManager, NetworkCommand.DEPLOY_FLAGS_LIST) @@ -150,6 +170,7 @@ export class NetworkCommand extends BaseCommand { * @property {string} hederaExplorerTlsLoadBalancerIp * @property {string} namespace * @property {string} nodeIDs + * @property {string} persistentVolumeClaims * @property {string} profileFile * @property {string} profileName * @property {string} releaseTag @@ -197,22 +218,23 @@ export class NetworkCommand extends BaseCommand { { title: 'Initialize', task: async (ctx, task) => { - ctx.config = await self.prepareConfig(task, argv) + ctx.config = /** @type {NetworkDeployConfigClass} **/ await self.prepareConfig(task, argv) } }, { title: `Install chart '${constants.FULLSTACK_DEPLOYMENT_CHART}'`, task: async (ctx, _) => { - if (await self.chartManager.isChartInstalled(ctx.config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART)) { - await self.chartManager.uninstall(ctx.config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART) + const config = /** @type {NetworkDeployConfigClass} **/ ctx.config + if (await self.chartManager.isChartInstalled(config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART)) { + await self.chartManager.uninstall(config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART) } await this.chartManager.install( - ctx.config.namespace, + config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART, - ctx.config.chartPath, - ctx.config.fstChartVersion, - ctx.config.valuesArg) + config.chartPath, + config.fstChartVersion, + config.valuesArg) } }, { @@ -220,9 +242,10 @@ export class NetworkCommand extends BaseCommand { task: async (ctx, task) => { const subTasks = [] + const config = /** @type {NetworkDeployConfigClass} **/ ctx.config // nodes - for (const nodeId of ctx.config.nodeIds) { + for (const nodeId of config.nodeIds) { subTasks.push({ title: `Check Node: ${chalk.yellow(nodeId)}`, task: () => @@ -247,9 +270,10 @@ export class NetworkCommand extends BaseCommand { task: async (ctx, task) => { const subTasks = [] + const config = /** @type {NetworkDeployConfigClass} **/ ctx.config // HAProxy - for (const nodeId of ctx.config.nodeIds) { + for (const nodeId of config.nodeIds) { subTasks.push({ title: `Check HAProxy for: ${chalk.yellow(nodeId)}`, task: () => @@ -260,7 +284,7 @@ export class NetworkCommand extends BaseCommand { } // Envoy Proxy - for (const nodeId of ctx.config.nodeIds) { + for (const nodeId of config.nodeIds) { subTasks.push({ title: `Check Envoy Proxy for: ${chalk.yellow(nodeId)}`, task: () => @@ -419,12 +443,13 @@ export class NetworkCommand extends BaseCommand { { title: `Upgrade chart '${constants.FULLSTACK_DEPLOYMENT_CHART}'`, task: async (ctx, _) => { + const config = ctx.config await this.chartManager.upgrade( - ctx.config.namespace, + config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART, - ctx.config.chartPath, - ctx.config.valuesArg, - ctx.config.fstChartVersion + config.chartPath, + config.valuesArg, + config.fstChartVersion ) } }, diff --git a/src/commands/node.mjs b/src/commands/node.mjs index 8433e6dad..166a48df8 100644 --- a/src/commands/node.mjs +++ b/src/commands/node.mjs @@ -17,12 +17,17 @@ import * as x509 from '@peculiar/x509' import chalk from 'chalk' import * as fs from 'fs' -import { readFile, writeFile } from 'fs/promises' import { Listr } from 'listr2' import path from 'path' import { FullstackTestingError, IllegalArgumentError } from '../core/errors.mjs' import * as helpers from '../core/helpers.mjs' -import { getNodeAccountMap, getNodeLogs, getTmpDir, sleep, validatePath } from '../core/helpers.mjs' +import { + getNodeAccountMap, + getNodeLogs, + getTmpDir, + sleep, + validatePath +} from '../core/helpers.mjs' import { constants, Templates, Zippy } from '../core/index.mjs' import { BaseCommand } from './base.mjs' import * as flags from './flags.mjs' @@ -30,16 +35,16 @@ import * as prompts from './prompts.mjs' import { AccountBalanceQuery, + AccountId, AccountUpdateTransaction, - FileUpdateTransaction, FileAppendTransaction, + FileUpdateTransaction, FreezeTransaction, FreezeType, - ServiceEndpoint, - Timestamp, + NodeCreateTransaction, PrivateKey, - AccountId, - NodeCreateTransaction + ServiceEndpoint, + Timestamp } from '@hashgraph/sdk' import * as crypto from 'crypto' import { @@ -60,12 +65,14 @@ export class NodeCommand extends BaseCommand { if (!opts || !opts.keyManager) throw new IllegalArgumentError('An instance of core/KeyManager is required', opts.keyManager) if (!opts || !opts.accountManager) throw new IllegalArgumentError('An instance of core/AccountManager is required', opts.accountManager) if (!opts || !opts.keytoolDepManager) throw new IllegalArgumentError('An instance of KeytoolDependencyManager is required', opts.keytoolDepManager) + if (!opts || !opts.profileManager) throw new IllegalArgumentError('An instance of ProfileManager is required', opts.profileManager) this.downloader = opts.downloader this.platformInstaller = opts.platformInstaller this.keyManager = opts.keyManager this.accountManager = opts.accountManager this.keytoolDepManager = opts.keytoolDepManager + this.profileManager = opts.profileManager this._portForwards = [] } @@ -75,24 +82,17 @@ export class NodeCommand extends BaseCommand { static get SETUP_FLAGS_LIST () { return [ - flags.apiPermissionProperties, - flags.app, flags.appConfig, - flags.applicationProperties, - flags.bootstrapProperties, flags.cacheDir, - flags.chainId, flags.devMode, flags.force, flags.generateGossipKeys, flags.generateTlsKeys, flags.keyFormat, flags.localBuildPath, - flags.log4j2Xml, flags.namespace, flags.nodeIDs, - flags.releaseTag, - flags.settingTxt + flags.releaseTag ] } @@ -117,11 +117,11 @@ export class NodeCommand extends BaseCommand { static get REFRESH_FLAGS_LIST () { return [ - flags.app, flags.cacheDir, flags.devMode, flags.force, flags.keyFormat, + flags.localBuildPath, flags.namespace, flags.nodeIDs, flags.releaseTag @@ -134,9 +134,7 @@ export class NodeCommand extends BaseCommand { static get ADD_FLAGS_LIST () { return [ - flags.apiPermissionProperties, - flags.applicationProperties, - flags.bootstrapProperties, + flags.app, flags.cacheDir, flags.chainId, flags.chartDirectory, @@ -149,11 +147,10 @@ export class NodeCommand extends BaseCommand { flags.gossipEndpoints, flags.grpcEndpoints, flags.keyFormat, - flags.log4j2Xml, + flags.localBuildPath, flags.namespace, flags.nodeID, - flags.releaseTag, - flags.settingTxt + flags.releaseTag ] } @@ -261,7 +258,8 @@ export class NodeCommand extends BaseCommand { if (output && output.indexOf('Terminating Netty') < 0 && // make sure we are not at the beginning of a restart (output.indexOf(`Now current platform status = ${status}`) > 0 || output.indexOf(`Platform Status Change ${status}`) > 0 || - output.indexOf(`is ${status}`) > 0)) { // 'is ACTIVE' is for newer versions, first seen in v0.49.0 + output.indexOf(`is ${status}`) > 0 || + output.indexOf(`"newStatus":"${status}"`) > 0)) { this.logger.debug(`Node ${nodeId} is ${status} [ attempt: ${attempt}/${maxAttempt}]`) isActive = true break @@ -475,12 +473,13 @@ export class NodeCommand extends BaseCommand { } } - async initializeSetup (config, configManager, k8) { + async initializeSetup (config, k8) { // compute other config parameters - config.releasePrefix = Templates.prepareReleasePrefix(config.releaseTag) - config.buildZipFile = `${config.cacheDir}/${config.releasePrefix}/build-${config.releaseTag}.zip` config.keysDir = path.join(validatePath(config.cacheDir), 'keys') - config.stagingDir = Templates.renderStagingDir(configManager, flags) + config.stagingDir = Templates.renderStagingDir( + config.cacheDir, + config.releaseTag + ) config.stagingKeysDir = path.join(validatePath(config.stagingDir), 'keys') if (!await k8.hasNamespace(config.namespace)) { @@ -550,9 +549,8 @@ export class NodeCommand extends BaseCommand { }) } - fetchLocalOrReleasedPlatformSoftware (nodeIds, podNames, releaseTag, task) { + fetchLocalOrReleasedPlatformSoftware (nodeIds, podNames, releaseTag, task, localBuildPath) { const self = this - const localBuildPath = self.configManager.getFlag(flags.localBuildPath) if (localBuildPath !== '') { return self.uploadPlatformSoftware(nodeIds, podNames, task, localBuildPath) } else { @@ -693,16 +691,10 @@ export class NodeCommand extends BaseCommand { // disable the prompts that we don't want to prompt the user for prompts.disablePrompts([ - flags.apiPermissionProperties, - flags.app, flags.appConfig, - flags.applicationProperties, - flags.bootstrapProperties, flags.devMode, flags.force, - flags.localBuildPath, - flags.log4j2Xml, - flags.settingTxt + flags.localBuildPath ]) await prompts.execute(task, self.configManager, NodeCommand.SETUP_FLAGS_LIST) @@ -710,31 +702,23 @@ export class NodeCommand extends BaseCommand { /** * @typedef {Object} NodeSetupConfigClass * -- flags -- - * @property {string} apiPermissionProperties * @property {string} app * @property {string} appConfig - * @property {string} applicationProperties - * @property {string} bootstrapProperties * @property {string} cacheDir - * @property {string} chainId * @property {boolean} devMode * @property {boolean} force * @property {boolean} generateGossipKeys * @property {boolean} generateTlsKeys * @property {string} keyFormat * @property {string} localBuildPath - * @property {string} log4j2Xml * @property {string} namespace * @property {string} nodeIDs * @property {string} releaseTag - * @property {string} settingTxt * -- extra args -- - * @property {string} buildZipFile * @property {Date} curDate * @property {string} keysDir * @property {string[]} nodeIds - * @property {Object} podNames - * @property {string} releasePrefix + * @property {string[]} podNames * @property {string} stagingDir * @property {string} stagingKeysDir * -- methods -- @@ -748,12 +732,10 @@ export class NodeCommand extends BaseCommand { // create a config object for subsequent steps const config = /** @type {NodeSetupConfigClass} **/ this.getConfig(NodeCommand.SETUP_CONFIGS_NAME, NodeCommand.SETUP_FLAGS_LIST, [ - 'buildZipFile', 'curDate', 'keysDir', 'nodeIds', 'podNames', - 'releasePrefix', 'stagingDir', 'stagingKeysDir' ]) @@ -761,7 +743,7 @@ export class NodeCommand extends BaseCommand { config.nodeIds = helpers.parseNodeIds(config.nodeIDs) config.curDate = new Date() - await self.initializeSetup(config, self.configManager, self.k8) + await self.initializeSetup(config, self.k8) // set config in the context for later tasks to use ctx.config = config @@ -776,7 +758,8 @@ export class NodeCommand extends BaseCommand { { title: 'Generate Gossip keys', task: async (ctx, parentTask) => { - const config = ctx.config + const config = /** @type {NodeSetupConfigClass} **/ ctx.config + const subTasks = self._nodeGossipKeysTaskList(config.keyFormat, config.nodeIds, config.keysDir, config.curDate) // set up the sub-tasks return parentTask.newListr(subTasks, { @@ -808,53 +791,23 @@ export class NodeCommand extends BaseCommand { { title: 'Prepare staging directory', task: async (ctx, parentTask) => { - const config = ctx.config const subTasks = [ - { - title: 'Copy configuration files', - task: () => { - for (const flag of flags.nodeConfigFileFlags.values()) { - const filePath = self.configManager.getFlag(flag) - if (!filePath) { - throw new FullstackTestingError(`Configuration file path is missing for: ${flag.name}`) - } - - const fileName = path.basename(filePath) - const destPath = `${config.stagingDir}/templates/${fileName}` - self.logger.debug(`Copying configuration file to staging: ${filePath} -> ${destPath}`) - - fs.cpSync(filePath, destPath, { force: true }) - } - } - }, { title: 'Copy Gossip keys to staging', task: async (ctx, _) => { - await this.copyGossipKeysToStaging(ctx.config.keyFormat, ctx.config.keysDir, ctx.config.stagingKeysDir, ctx.config.nodeIds) + const config = /** @type {NodeSetupConfigClass} **/ ctx.config + await this.copyGossipKeysToStaging(config.keyFormat, config.keysDir, config.stagingKeysDir, ctx.config.nodeIds) } }, { title: 'Copy gRPC TLS keys to staging', task: async (ctx, _) => { for (const nodeId of ctx.config.nodeIds) { + const config = /** @type {NodeSetupConfigClass} **/ ctx.config const tlsKeyFiles = self.keyManager.prepareTLSKeyFilePaths(nodeId, config.keysDir) await self._copyNodeKeys(tlsKeyFiles, config.stagingKeysDir) } } - }, - { - title: 'Prepare config.txt for the network', - task: async (ctx, _) => { - const configTxtPath = `${ctx.config.stagingDir}/config.txt` - const template = `${constants.RESOURCES_DIR}/templates/config.template` - await self.platformInstaller.prepareConfigTxt( - ctx.config.nodeIds, - configTxtPath, - ctx.config.releaseTag, - ctx.config.chainId, - template, - ctx.config.app || undefined) - } } ] @@ -869,7 +822,7 @@ export class NodeCommand extends BaseCommand { task: async (ctx, task) => { const config = /** @type {NodeSetupConfigClass} **/ ctx.config - return self.fetchLocalOrReleasedPlatformSoftware(config.nodeIds, config.podNames, config.releaseTag, task) + return self.fetchLocalOrReleasedPlatformSoftware(config.nodeIds, config.podNames, config.releaseTag, task, config.localBuildPath) } }, { @@ -883,7 +836,6 @@ export class NodeCommand extends BaseCommand { task: () => self.platformInstaller.taskInstall( podName, - ctx.config.buildZipFile, ctx.config.stagingDir, ctx.config.nodeIds, ctx.config.keyFormat, @@ -941,7 +893,10 @@ export class NodeCommand extends BaseCommand { nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)) } - ctx.config.stagingDir = Templates.renderStagingDir(self.configManager, flags) + ctx.config.stagingDir = Templates.renderStagingDir( + self.configManager.getFlag(flags.cacheDir), + self.configManager.getFlag(flags.releaseTag) + ) if (!await self.k8.hasNamespace(ctx.config.namespace)) { throw new FullstackTestingError(`namespace ${ctx.config.namespace} does not exist`) @@ -973,7 +928,7 @@ export class NodeCommand extends BaseCommand { task: (ctx, task) => { const subTasks = [] for (const nodeId of ctx.config.nodeIds) { - if (self.configManager.getFlag(flags.app) !== '') { + if (self.configManager.getFlag(flags.app) !== '' && self.configManager.getFlag(flags.app) !== constants.HEDERA_APP_NAME) { subTasks.push({ title: `Check node: ${chalk.yellow(nodeId)}`, task: () => self.checkNetworkNodeState(nodeId, 100, 'ACTIVE', 'output/swirlds.log') @@ -1016,7 +971,7 @@ export class NodeCommand extends BaseCommand { } }) }, - skip: (ctx, _) => self.configManager.getFlag(flags.app) !== '' + skip: (ctx, _) => self.configManager.getFlag(flags.app) !== '' && self.configManager.getFlag(flags.app) !== constants.HEDERA_APP_NAME }, { title: 'Add node stakes', @@ -1238,7 +1193,6 @@ export class NodeCommand extends BaseCommand { self.configManager.update(argv) // disable the prompts that we don't want to prompt the user for prompts.disablePrompts([ - flags.app, flags.devMode, flags.force ]) @@ -1248,20 +1202,18 @@ export class NodeCommand extends BaseCommand { /** * @typedef {Object} NodeRefreshConfigClass * -- flags -- - * @property {string} app * @property {string} cacheDir * @property {boolean} devMode * @property {boolean} force * @property {string} keyFormat + * @property {string} localBuildPath * @property {string} namespace * @property {string} nodeIDs * @property {string} releaseTag * -- extra args -- - * @property {string} buildZipFile * @property {string} keysDir * @property {string[]} nodeIds * @property {Object} podNames - * @property {string} releasePrefix * @property {string} stagingDir * @property {string} stagingKeysDir * -- methods -- @@ -1275,18 +1227,16 @@ export class NodeCommand extends BaseCommand { // create a config object for subsequent steps ctx.config = /** @type {NodeRefreshConfigClass} **/ this.getConfig(NodeCommand.REFRESH_CONFIGS_NAME, NodeCommand.REFRESH_FLAGS_LIST, [ - 'buildZipFile', 'keysDir', 'nodeIds', 'podNames', - 'releasePrefix', 'stagingDir', 'stagingKeysDir' ]) ctx.config.nodeIds = helpers.parseNodeIds(ctx.config.nodeIDs) - await self.initializeSetup(ctx.config, self.configManager, self.k8) + await self.initializeSetup(ctx.config, self.k8) self.logger.debug('Initialized config', ctx.config) } @@ -1322,7 +1272,8 @@ export class NodeCommand extends BaseCommand { title: 'Fetch platform software into network nodes', task: async (ctx, task) => { - return self.fetchLocalOrReleasedPlatformSoftware(ctx.config.nodeIds, ctx.config.podNames, ctx.config.releaseTag, task) + const config = /** @type {NodeRefreshConfigClass} **/ ctx.config + return self.fetchLocalOrReleasedPlatformSoftware(config.nodeIds, config.podNames, config.releaseTag, task, config.localBuildPath) } }, { @@ -1342,8 +1293,7 @@ export class NodeCommand extends BaseCommand { subTasks.push({ title: `Node: ${chalk.yellow(nodeId)}`, task: () => - self.platformInstaller.taskInstall(podName, config.buildZipFile, - config.stagingDir, nodeList, config.keyFormat, config.force) + self.platformInstaller.taskInstall(podName, config.stagingDir, nodeList, config.keyFormat, config.force) }) } @@ -1384,7 +1334,7 @@ export class NodeCommand extends BaseCommand { task: (ctx, task) => { const subTasks = [] for (const nodeId of ctx.config.nodeIds) { - if (ctx.config.app !== '') { + if (ctx.config.app !== '' && ctx.config.app !== constants.HEDERA_APP_NAME) { subTasks.push({ title: `Check node: ${chalk.yellow(nodeId)}`, task: () => self.checkNetworkNodeState(nodeId, 100, 'ACTIVE', 'output/swirlds.log') @@ -1495,18 +1445,15 @@ export class NodeCommand extends BaseCommand { // disable the prompts that we don't want to prompt the user for prompts.disablePrompts([ - flags.apiPermissionProperties, - flags.applicationProperties, - flags.bootstrapProperties, + flags.app, + flags.chainId, flags.chartDirectory, flags.devMode, flags.endpointType, flags.force, flags.fstChartVersion, flags.gossipEndpoints, - flags.grpcEndpoints, - flags.log4j2Xml, - flags.settingTxt + flags.grpcEndpoints ]) await prompts.execute(task, self.configManager, NodeCommand.ADD_FLAGS_LIST) @@ -1514,9 +1461,7 @@ export class NodeCommand extends BaseCommand { /** * @typedef {Object} NodeAddConfigClass * -- flags -- - * @property {string} apiPermissionProperties - * @property {string} applicationProperties - * @property {string} bootstrapProperties + * @property {string} app * @property {string} cacheDir * @property {string} chainId * @property {string} chartDirectory @@ -1529,15 +1474,13 @@ export class NodeCommand extends BaseCommand { * @property {string} gossipEndpoints * @property {string} grpcEndpoints * @property {string} keyFormat - * @property {string} log4j2Xml + * @property {string} localBuildPath * @property {string} namespace * @property {string} nodeId * @property {string} releaseTag - * @property {string} settingTxt * -- extra args -- * @property {PrivateKey} adminKey * @property {string[]} allNodeIds - * @property {string} buildZipFile * @property {string} chartPath * @property {Date} curDate * @property {string[]} existingNodeIds @@ -1545,9 +1488,7 @@ export class NodeCommand extends BaseCommand { * @property {string} keysDir * @property {string} lastStateZipPath * @property {Object} nodeClient - * @property {string[]} nodeIds * @property {Object} podNames - * @property {string} releasePrefix * @property {Map} serviceMap * @property {PrivateKey} treasuryKey * @property {string} stagingDir @@ -1565,7 +1506,6 @@ export class NodeCommand extends BaseCommand { [ 'adminKey', 'allNodeIds', - 'buildZipFile', 'chartPath', 'curDate', 'existingNodeIds', @@ -1573,9 +1513,7 @@ export class NodeCommand extends BaseCommand { 'keysDir', 'lastStateZipPath', 'nodeClient', - 'nodeIds', 'podNames', - 'releasePrefix', 'serviceMap', 'stagingDir', 'stagingKeysDir', @@ -1584,13 +1522,12 @@ export class NodeCommand extends BaseCommand { config.curDate = new Date() config.existingNodeIds = [] - config.nodeIds = [config.nodeId] if (config.keyFormat !== constants.KEY_FORMAT_PEM) { throw new FullstackTestingError('key type cannot be PFX') } - await self.initializeSetup(config, self.configManager, self.k8) + await self.initializeSetup(config, self.k8) // set config in the context for later tasks to use ctx.config = config @@ -1611,25 +1548,35 @@ export class NodeCommand extends BaseCommand { self.logger.debug('Initialized config', { config }) } }, + { + title: 'Check that PVCs are enabled', + task: async (ctx, task) => { + if (!self.configManager.getFlag(flags.persistentVolumeClaims)) { + throw new FullstackTestingError('PVCs are not enabled. Please enable PVCs before adding a node') + } + } + }, { title: 'Identify existing network nodes', task: async (ctx, task) => { - ctx.config.serviceMap = await self.accountManager.getNodeServiceMap( - ctx.config.namespace) - for (/** @type {NetworkNodeServices} **/ const networkNodeServices of ctx.config.serviceMap.values()) { - ctx.config.existingNodeIds.push(networkNodeServices.nodeName) + const config = /** @type {NodeAddConfigClass} **/ ctx.config + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + for (/** @type {NetworkNodeServices} **/ const networkNodeServices of config.serviceMap.values()) { + config.existingNodeIds.push(networkNodeServices.nodeName) } - return self.taskCheckNetworkNodePods(ctx, task, ctx.config.existingNodeIds) + return self.taskCheckNetworkNodePods(ctx, task, config.existingNodeIds) } }, { title: 'Determine new node account number', task: (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config const values = { hedera: { nodes: [] } } let maxNum = 0 - for (/** @type {NetworkNodeServices} **/ const networkNodeServices of ctx.config.serviceMap.values()) { + for (/** @type {NetworkNodeServices} **/ const networkNodeServices of config.serviceMap.values()) { values.hedera.nodes.push({ accountId: networkNodeServices.accountId, name: networkNodeServices.nodeName @@ -1642,14 +1589,14 @@ export class NodeCommand extends BaseCommand { ctx.maxNum = maxNum ctx.newNode = { accountId: `${constants.HEDERA_NODE_ACCOUNT_ID_START.realm}.${constants.HEDERA_NODE_ACCOUNT_ID_START.shard}.${++maxNum}`, - name: ctx.config.nodeId + name: config.nodeId } } }, { title: 'Generate Gossip key', task: async (ctx, parentTask) => { - const config = ctx.config + const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = self._nodeGossipKeysTaskList(config.keyFormat, [config.nodeId], config.keysDir, config.curDate, config.allNodeIds) // set up the sub-tasks return parentTask.newListr(subTasks, { @@ -1665,7 +1612,7 @@ export class NodeCommand extends BaseCommand { { title: 'Generate gRPC TLS key', task: async (ctx, parentTask) => { - const config = ctx.config + const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = self._nodeTlsKeyTaskList([config.nodeId], config.keysDir, config.curDate) // set up the sub-tasks return parentTask.newListr(subTasks, { @@ -1851,6 +1798,13 @@ export class NodeCommand extends BaseCommand { }) } }, + { + title: 'Get node logs and configs', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + await helpers.getNodeLogs(self.k8, config.namespace) + } + }, { title: 'Deploy new network node', task: async (ctx, task) => { @@ -1862,6 +1816,13 @@ export class NodeCommand extends BaseCommand { } valuesArg += ` --set "hedera.nodes[${index}].accountId=${ctx.newNode.accountId}" --set "hedera.nodes[${index}].name=${ctx.newNode.name}"` + this.profileValuesFile = await self.profileManager.prepareValuesForNodeAdd( + path.join(config.stagingDir, 'config.txt'), + path.join(config.stagingDir, 'templates', 'application.properties')) + if (this.profileValuesFile) { + valuesArg += this.prepareValuesFiles(this.profileValuesFile) + } + await self.chartManager.upgrade( config.namespace, constants.FULLSTACK_DEPLOYMENT_CHART, @@ -1874,34 +1835,46 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Check new network node pod is running', + title: 'Kill nodes to pick up updated configMaps', task: async (ctx, task) => { const config = /** @type {NodeAddConfigClass} **/ ctx.config - config.podNames[config.nodeId] = await this.checkNetworkNodePod(config.namespace, config.nodeId) + for (const /** @type {NetworkNodeServices} **/ service of config.serviceMap.values()) { + await self.k8.kubeClient.deleteNamespacedPod(service.nodePodName, config.namespace, undefined, undefined, 1) + } } }, + { + title: 'Check node pods are running', + task: + async (ctx, task) => { + const subTasks = [] + const config = /** @type {NodeAddConfigClass} **/ ctx.config + + // nodes + for (const nodeId of config.allNodeIds) { + subTasks.push({ + title: `Check Node: ${chalk.yellow(nodeId)}`, + task: () => + self.k8.waitForPods([constants.POD_PHASE_RUNNING], [ + 'fullstack.hedera.com/type=network-node', + `fullstack.hedera.com/node-name=${nodeId}` + ], 1, 60 * 15, 1000) // timeout 15 minutes + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, // no need to run concurrently since if one node is up, the rest should be up by then + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, { title: 'Prepare staging directory', task: async (ctx, parentTask) => { - const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = [ - { - title: 'Copy configuration files', - task: () => { - for (const flag of flags.nodeConfigFileFlags.values()) { - const filePath = self.configManager.getFlag(flag) - if (!filePath) { - throw new FullstackTestingError(`Configuration file path is missing for: ${flag.name}`) - } - - const fileName = path.basename(filePath) - const destPath = `${config.stagingDir}/templates/${fileName}` - self.logger.debug(`Copying configuration file to staging: ${filePath} -> ${destPath}`) - - fs.cpSync(filePath, destPath, { force: true }) - } - } - }, { title: 'Copy Gossip keys to staging', task: async (ctx, _) => { @@ -1929,11 +1902,15 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Fetch platform software into new network node', + title: 'Fetch platform software into all network nodes', task: async (ctx, task) => { const config = /** @type {NodeAddConfigClass} **/ ctx.config - return self.fetchLocalOrReleasedPlatformSoftware(config.nodeIds, config.podNames, config.releaseTag, task) + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace) + config.podNames[config.nodeId] = config.serviceMap.get( + config.nodeId).nodePodName + return self.fetchLocalOrReleasedPlatformSoftware(config.allNodeIds, config.podNames, config.releaseTag, task, config.localBuildPath) } }, { @@ -1968,16 +1945,13 @@ export class NodeCommand extends BaseCommand { task: async (ctx, parentTask) => { const config = /** @type {NodeAddConfigClass} **/ ctx.config - // modify application.properties to trick Hedera Services into receiving an updated address book - await self.bumpHederaConfigVersion(`${config.stagingDir}/templates/application.properties`) - const subTasks = [] for (const nodeId of config.allNodeIds) { const podName = config.podNames[nodeId] subTasks.push({ title: `Node: ${chalk.yellow(nodeId)}`, task: () => - self.platformInstaller.taskInstall(podName, config.buildZipFile, config.stagingDir, config.allNodeIds, config.keyFormat, config.force) + self.platformInstaller.taskInstall(podName, config.stagingDir, config.allNodeIds, config.keyFormat, config.force) }) } @@ -1990,7 +1964,7 @@ export class NodeCommand extends BaseCommand { }, { title: 'Start network nodes', - task: (ctx, task) => { + task: async (ctx, task) => { const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = [] self.startNodes(config.podNames, config.allNodeIds, subTasks) @@ -2063,7 +2037,7 @@ export class NodeCommand extends BaseCommand { title: 'Trigger stake weight calculate', task: async (ctx, task) => { const config = /** @type {NodeAddConfigClass} **/ ctx.config - // sleep 60 seconds for the handler to be able to trigger the network node stake weight recalculate + self.logger.info('sleep 60 seconds for the handler to be able to trigger the network node stake weight recalculate') await sleep(60000) const accountMap = getNodeAccountMap(config.allNodeIds) // send some write transactions to invoke the handler that will trigger the stake weight recalculate @@ -2344,18 +2318,4 @@ export class NodeCommand extends BaseCommand { } } } - - async bumpHederaConfigVersion (configTxtPath) { - const lines = (await readFile(configTxtPath, 'utf-8')).split('\n') - - for (const line of lines) { - if (line.startsWith('hedera.config.version=')) { - const version = parseInt(line.split('=')[1]) + 1 - lines[lines.indexOf(line)] = `hedera.config.version=${version}` - break - } - } - - await writeFile(configTxtPath, lines.join('\n')) - } } diff --git a/src/commands/prompts.mjs b/src/commands/prompts.mjs index 81c8f4a8e..0e5b2f9f9 100644 --- a/src/commands/prompts.mjs +++ b/src/commands/prompts.mjs @@ -444,6 +444,14 @@ export async function promptEndpointType (task, input) { flags.endpointType.name) } +export async function promptPersistentVolumeClaims (task, input) { + return await promptToggle(task, input, + flags.persistentVolumeClaims.definition.defaultValue, + 'Would you like to enable persistent volume claims to store data outside the pod? ', + null, + flags.persistentVolumeClaims.name) +} + export function getPromptMap () { return new Map() .set(flags.accountId.name, promptAccountId) @@ -472,6 +480,7 @@ export function getPromptMap () { .set(flags.nodeIDs.name, promptNodeIds) .set(flags.operatorId.name, promptOperatorId) .set(flags.operatorKey.name, promptOperatorKey) + .set(flags.persistentVolumeClaims.name, promptPersistentVolumeClaims) .set(flags.privateKey.name, promptPrivateKey) .set(flags.profileFile.name, promptProfileFile) .set(flags.profileName.name, promptProfile) diff --git a/src/commands/relay.mjs b/src/commands/relay.mjs index 34bf35734..f19cd3673 100644 --- a/src/commands/relay.mjs +++ b/src/commands/relay.mjs @@ -162,7 +162,11 @@ export class RelayCommand extends BaseCommand { * @property {number} replicaCount * @property {string} valuesFile * -- extra args -- + * @property {string} chartPath + * @property {boolean} isChartInstalled * @property {string[]} nodeIds + * @property {string} releaseName + * @property {string} valuesArg * -- methods -- * @property {getUnusedConfigs} getUnusedConfigs */ @@ -176,8 +180,8 @@ export class RelayCommand extends BaseCommand { ['nodeIds']) ctx.config.nodeIds = helpers.parseNodeIds(ctx.config.nodeIDs) - ctx.releaseName = self.prepareReleaseName(ctx.config.nodeIds) - ctx.isChartInstalled = await self.chartManager.isChartInstalled(ctx.config.namespace, ctx.releaseName) + ctx.config.releaseName = self.prepareReleaseName(ctx.config.nodeIds) + ctx.config.isChartInstalled = await self.chartManager.isChartInstalled(ctx.config.namespace, ctx.releaseName) self.logger.debug('Initialized config', { config: ctx.config }) } @@ -185,32 +189,30 @@ export class RelayCommand extends BaseCommand { { title: 'Prepare chart values', task: async (ctx, _) => { - ctx.chartPath = await self.prepareChartPath(ctx.config.chartDirectory, constants.JSON_RPC_RELAY_CHART, constants.JSON_RPC_RELAY_CHART) - ctx.valuesArg = await self.prepareValuesArg( - ctx.config.valuesFile, - ctx.config.nodeIds, - ctx.config.chainId, - ctx.config.relayReleaseTag, - ctx.config.replicaCount, - ctx.config.operatorId, - ctx.config.operatorKey, - ctx.config.namespace + const config = /** @type {RelayDeployConfigClass} **/ ctx.config + config.chartPath = await self.prepareChartPath(config.chartDirectory, constants.JSON_RPC_RELAY_CHART, constants.JSON_RPC_RELAY_CHART) + config.valuesArg = await self.prepareValuesArg( + config.valuesFile, + config.nodeIds, + config.chainId, + config.relayReleaseTag, + config.replicaCount, + config.operatorId, + config.operatorKey, + config.namespace ) } }, { title: 'Deploy JSON RPC Relay', task: async (ctx, _) => { - const namespace = ctx.config.namespace - const releaseName = ctx.releaseName - const chartPath = ctx.chartPath - const valuesArg = ctx.valuesArg + const config = /** @type {RelayDeployConfigClass} **/ ctx.config - await self.chartManager.install(namespace, releaseName, chartPath, '', valuesArg) + await self.chartManager.install(config.namespace, config.releaseName, config.chartPath, '', config.valuesArg) await self.k8.waitForPods([constants.POD_PHASE_RUNNING], [ 'app=hedera-json-rpc-relay', - `app.kubernetes.io/instance=${releaseName}` + `app.kubernetes.io/instance=${config.releaseName}` ], 1, 900, 1000) // reset nodeID @@ -221,14 +223,14 @@ export class RelayCommand extends BaseCommand { { title: 'Check relay is ready', task: async (ctx, _) => { - const releaseName = ctx.releaseName + const config = /** @type {RelayDeployConfigClass} **/ ctx.config try { await self.k8.waitForPodReady([ 'app=hedera-json-rpc-relay', - `app.kubernetes.io/instance=${releaseName}` + `app.kubernetes.io/instance=${config.releaseName}` ], 1, 100, 2000) } catch (e) { - throw new FullstackTestingError(`Relay ${releaseName} is not ready: ${e.message}`, e) + throw new FullstackTestingError(`Relay ${config.releaseName} is not ready: ${e.message}`, e) } } } @@ -270,8 +272,8 @@ export class RelayCommand extends BaseCommand { nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)) } - ctx.releaseName = this.prepareReleaseName(ctx.config.nodeIds) - ctx.isChartInstalled = await this.chartManager.isChartInstalled(ctx.config.namespace, ctx.releaseName) + ctx.config.releaseName = this.prepareReleaseName(ctx.config.nodeIds) + ctx.config.isChartInstalled = await this.chartManager.isChartInstalled(ctx.config.namespace, ctx.config.releaseName) self.logger.debug('Initialized config', { config: ctx.config }) } @@ -279,12 +281,11 @@ export class RelayCommand extends BaseCommand { { title: 'Destroy JSON RPC Relay', task: async (ctx, _) => { - const namespace = ctx.config.namespace - const releaseName = ctx.releaseName + const config = ctx.config - await this.chartManager.uninstall(namespace, releaseName) + await this.chartManager.uninstall(config.namespace, config.releaseName) - this.logger.showList('Destroyed Relays', await self.chartManager.getInstalledCharts(namespace)) + this.logger.showList('Destroyed Relays', await self.chartManager.getInstalledCharts(config.namespace)) // reset nodeID self.configManager.setFlag(flags.nodeIDs, '') diff --git a/src/core/account_manager.mjs b/src/core/account_manager.mjs index 07ad3c68b..b1c13989e 100644 --- a/src/core/account_manager.mjs +++ b/src/core/account_manager.mjs @@ -303,6 +303,15 @@ export class AccountManager { serviceBuilder.withHaProxyPodName(podList.body.items[0].metadata.name) } + // get the pod name of the network node + const pods = await this.k8.getPodsByLabel(['fullstack.hedera.com/type=network-node']) + for (const pod of pods) { + const podName = pod.metadata.name + const nodeName = pod.metadata.labels['fullstack.hedera.com/node-name'] + const serviceBuilder = /** @type {NetworkNodeServicesBuilder} **/ serviceBuilderMap.get(nodeName) + serviceBuilder.withNodePodName(podName) + } + /** @type {Map} **/ const serviceMap = new Map() for (const networkNodeServicesBuilder of serviceBuilderMap.values()) { @@ -624,7 +633,7 @@ export class AccountManager { // ensure serviceEndpoint.ipAddressV4 value for all nodes in the addressBook is a 4 bytes array instead of string // See: https://github.com/hashgraph/hedera-protobufs/blob/main/services/basic_types.proto#L1309 - // TODO: with v0.53 will mirror node no longer need this and we can remove @hashgraph/proto? + // TODO: with v0.53 will mirror node no longer need this and we can remove @hashgraph/proto: https://github.com/hashgraph/solo/issues/493 const addressBook = HashgraphProto.proto.NodeAddressBook.decode(addressBookBytes) const hasAlphaRegEx = /[a-zA-Z]+/ let modified = false diff --git a/src/core/helpers.mjs b/src/core/helpers.mjs index ee36e2928..2cf402f07 100644 --- a/src/core/helpers.mjs +++ b/src/core/helpers.mjs @@ -225,9 +225,13 @@ export async function getNodeLogs (k8, namespace) { } } -// a function generate map between the nodeId and their account ids +/** + * Create a map of node IDs to account IDs + * @param nodeIDs an array of the node IDs + * @returns {Map} the map of node IDs to account IDs + */ export function getNodeAccountMap (nodeIDs) { - const accountMap = new Map() + const accountMap = /** @type {Map} **/ new Map() const realm = constants.HEDERA_NODE_ACCOUNT_ID_START.realm const shard = constants.HEDERA_NODE_ACCOUNT_ID_START.shard let accountId = constants.HEDERA_NODE_ACCOUNT_ID_START.num diff --git a/src/core/network_node_services.mjs b/src/core/network_node_services.mjs index cb642b3e7..9f2b76a3b 100644 --- a/src/core/network_node_services.mjs +++ b/src/core/network_node_services.mjs @@ -17,6 +17,7 @@ export class NetworkNodeServices { constructor (builder) { this.nodeName = builder.nodeName + this.nodePodName = builder.nodePodName this.haProxyName = builder.haProxyName this.haProxyLoadBalancerIp = builder.haProxyLoadBalancerIp this.haProxyClusterIp = builder.haProxyClusterIp @@ -87,6 +88,11 @@ export class NetworkNodeServicesBuilder { return this } + withNodePodName (nodePodName) { + this.nodePodName = nodePodName + return this + } + withNodeServiceName (nodeServiceName) { this.nodeServiceName = nodeServiceName return this diff --git a/src/core/platform_installer.mjs b/src/core/platform_installer.mjs index 72391990e..2bd4bb3ef 100644 --- a/src/core/platform_installer.mjs +++ b/src/core/platform_installer.mjs @@ -18,7 +18,6 @@ import * as fs from 'fs' import * as os from 'os' import { Listr } from 'listr2' import * as path from 'path' -import * as semver from 'semver' import { FullstackTestingError, IllegalArgumentError, MissingArgumentError } from './errors.mjs' import { constants } from './index.mjs' import { Templates } from './templates.mjs' @@ -177,35 +176,6 @@ export class PlatformInstaller { } } - async copyPlatformConfigFiles (podName, stagingDir) { - const self = this - - if (!podName) throw new MissingArgumentError('podName is required') - if (!stagingDir) throw new MissingArgumentError('stagingDir is required') - - try { - const srcFilesSet1 = [ - `${stagingDir}/config.txt`, - `${stagingDir}/templates/log4j2.xml`, - `${stagingDir}/templates/settings.txt` - ] - - const fileList1 = await self.copyFiles(podName, srcFilesSet1, constants.HEDERA_HAPI_PATH) - - const srcFilesSet2 = [ - `${stagingDir}/templates/api-permission.properties`, - `${stagingDir}/templates/application.properties`, - `${stagingDir}/templates/bootstrap.properties` - ] - - const fileList2 = await self.copyFiles(podName, srcFilesSet2, `${constants.HEDERA_HAPI_PATH}/data/config`) - - return fileList1.concat(fileList2) - } catch (e) { - throw new FullstackTestingError(`failed to copy config files to pod '${podName}': ${e.message}`, e) - } - } - async copyTLSKeys (podName, stagingDir) { if (!podName) throw new MissingArgumentError('podName is required') if (!stagingDir) throw new MissingArgumentError('stagingDir is required') @@ -268,70 +238,6 @@ export class PlatformInstaller { } } - /** - * Prepares config.txt file for the node - * @param nodeIDs node IDs - * @param destPath path where config.txt should be written - * @param releaseTag release tag e.g. v0.42.0 - * @param template path to the config.template file - * @param chainId chain ID (298 for local network) - * @param appName the app name to be used in the config.txt (optional, defaults to HederaNode.jar) - * @returns {Promise} - */ - async prepareConfigTxt (nodeIDs, destPath, releaseTag, chainId = constants.HEDERA_CHAIN_ID, template = `${constants.RESOURCES_DIR}/templates/config.template`, appName = constants.HEDERA_APP_NAME) { - if (!nodeIDs || nodeIDs.length === 0) throw new MissingArgumentError('list of node IDs is required') - if (!destPath) throw new MissingArgumentError('destPath is required') - if (!template) throw new MissingArgumentError('config templatePath is required') - if (!releaseTag) throw new MissingArgumentError('release tag is required') - - if (!fs.existsSync(path.dirname(destPath))) throw new IllegalArgumentError(`destPath does not exist: ${destPath}`, destPath) - if (!fs.existsSync(template)) throw new IllegalArgumentError(`config templatePath does not exist: ${template}`, destPath) - - // init variables - const internalPort = constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT - const externalPort = constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT - const nodeStakeAmount = constants.HEDERA_NODE_DEFAULT_STAKE_AMOUNT - - const releaseVersion = semver.parse(releaseTag, { includePrerelease: true }) - - try { - const networkNodeServicesMap = await this.accountManager.getNodeServiceMap(this._getNamespace()) - /** @type {string[]} */ - const configLines = [] - configLines.push(`swirld, ${chainId}`) - configLines.push(`app, ${appName}`) - - let nodeSeq = 0 - for (const nodeId of nodeIDs) { - const networkNodeServices = networkNodeServicesMap.get(nodeId) - const nodeName = nodeId - const nodeNickName = nodeId - - const internalIP = Templates.renderFullyQualifiedNetworkPodName(this._getNamespace(), nodeId) - const externalIP = Templates.renderFullyQualifiedNetworkSvcName(this._getNamespace(), nodeId) - - const account = networkNodeServices.accountId - if (releaseVersion.minor >= 40) { - configLines.push(`address, ${nodeSeq}, ${nodeNickName}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) - } else { - configLines.push(`address, ${nodeSeq}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) - } - - nodeSeq += 1 - } - - if (releaseVersion.minor >= 41) { - configLines.push(`nextNodeId, ${nodeSeq}`) - } - - fs.writeFileSync(destPath, configLines.join('\n')) - - return configLines - } catch (e) { - throw new FullstackTestingError('failed to generate config.txt', e) - } - } - /** * Return a list of task to perform node installation * @@ -342,20 +248,15 @@ export class PlatformInstaller { * ${staging}/keys/a-.crt: agreement cert for a node * ${staging}/keys/hedera-.key: gRPC TLS key for a node * ${staging}/keys/hedera-.crt: gRPC TLS cert for a node - * ${staging}/properties: contains all properties files - * ${staging}/log4j2.xml: LOG4J file - * ${staging}/settings.txt: settings.txt file for the network - * ${staging}/config.txt: config.txt file for the network * * @param podName name of the pod - * @param buildZipFile path to the platform build.zip file * @param stagingDir staging directory path * @param nodeIds list of node ids * @param keyFormat key format (pfx or pem) * @param force force flag * @returns {Listr} */ - taskInstall (podName, buildZipFile, stagingDir, nodeIds, keyFormat = constants.KEY_FORMAT_PEM, force = false) { + taskInstall (podName, stagingDir, nodeIds, keyFormat = constants.KEY_FORMAT_PEM, force = false) { const self = this return new Listr([ { @@ -368,11 +269,6 @@ export class PlatformInstaller { task: (_, task) => self.copyTLSKeys(podName, stagingDir, keyFormat) }, - { - title: 'Copy configuration files', - task: (_, task) => - self.copyPlatformConfigFiles(podName, stagingDir) - }, { title: 'Set file permissions', task: (_, task) => diff --git a/src/core/profile_manager.mjs b/src/core/profile_manager.mjs index 3931ff18a..3d3b258d4 100644 --- a/src/core/profile_manager.mjs +++ b/src/core/profile_manager.mjs @@ -19,9 +19,11 @@ import path from 'path' import { FullstackTestingError, IllegalArgumentError, MissingArgumentError } from './errors.mjs' import * as yaml from 'js-yaml' import { flags } from '../commands/index.mjs' -import { constants, helpers } from './index.mjs' +import { constants, helpers, Templates } from './index.mjs' import dot from 'dot-object' import { getNodeAccountMap } from './helpers.mjs' +import * as semver from 'semver' +import { readFile, writeFile } from 'fs/promises' const consensusSidecars = [ 'recordStreamUploader', 'eventStreamUploader', 'backupUploader', 'accountBalanceUploader', 'otelCollector'] @@ -158,6 +160,46 @@ export class ProfileManager { this._setChartItems(`hedera.nodes.${nodeIndex}`, profile.consensus, yamlRoot) } + const stagingDir = Templates.renderStagingDir( + this.configManager.getFlag(flags.cacheDir), + this.configManager.getFlag(flags.releaseTag) + ) + + if (!fs.existsSync(stagingDir)) { + fs.mkdirSync(stagingDir, { recursive: true }) + } + + const configTxtPath = this.prepareConfigTxt( + this.configManager.getFlag(flags.namespace), + accountMap, + stagingDir, + this.configManager.getFlag(flags.releaseTag), + this.configManager.getFlag(flags.app), + this.configManager.getFlag(flags.chainId)) + + for (const flag of flags.nodeConfigFileFlags.values()) { + const filePath = this.configManager.getFlag(flag) + if (!filePath) { + throw new FullstackTestingError(`Configuration file path is missing for: ${flag.name}`) + } + + const fileName = path.basename(filePath) + const destPath = path.join(stagingDir, 'templates', fileName) + this.logger.debug(`Copying configuration file to staging: ${filePath} -> ${destPath}`) + + fs.cpSync(filePath, destPath, { force: true }) + } + + this._setFileContentsAsValue('hedera.configMaps.configTxt', configTxtPath, yamlRoot) + this._setFileContentsAsValue('hedera.configMaps.log4j2Xml', path.join(stagingDir, 'templates', 'log4j2.xml'), yamlRoot) + this._setFileContentsAsValue('hedera.configMaps.settingsTxt', path.join(stagingDir, 'templates', 'settings.txt'), yamlRoot) + this._setFileContentsAsValue('hedera.configMaps.applicationProperties', path.join(stagingDir, 'templates', 'application.properties'), yamlRoot) + this._setFileContentsAsValue('hedera.configMaps.apiPermissionsProperties', path.join(stagingDir, 'templates', 'api-permission.properties'), yamlRoot) + this._setFileContentsAsValue('hedera.configMaps.bootstrapProperties', path.join(stagingDir, 'templates', 'bootstrap.properties'), yamlRoot) + if (this.configManager.getFlag(flags.applicationEnv)) { + this._setFileContentsAsValue('hedera.configMaps.applicationEnv', this.configManager.getFlag(flags.applicationEnv), yamlRoot) + } + if (profile.consensus) { // set default for consensus pod this._setChartItems('defaults.root', profile.consensus.root, yamlRoot) @@ -211,10 +253,9 @@ export class ProfileManager { /** * Prepare a values file for FST Helm chart * @param {string} profileName resource profile name - * @param {string} applicationEnvFilePath path to the application.env file * @return {Promise} return the full path to the values file */ - prepareValuesForFstChart (profileName, applicationEnvFilePath = '') { + prepareValuesForFstChart (profileName) { if (!profileName) throw new MissingArgumentError('profileName is required') const profile = this.getProfile(profileName) @@ -228,12 +269,41 @@ export class ProfileManager { this.resourcesForEnvoyProxyPod(profile, yamlRoot) this.resourcesForMinioTenantPod(profile, yamlRoot) - if (applicationEnvFilePath) { - this._setFileContentsAsValue('hedera.configMaps.applicationEnv', applicationEnvFilePath, yamlRoot) + // write the yaml + const cachedValuesFile = path.join(this.cacheDir, `fst-${profileName}.yaml`) + return new Promise((resolve, reject) => { + fs.writeFile(cachedValuesFile, yaml.dump(yamlRoot), (err) => { + if (err) { + reject(err) + } + + resolve(cachedValuesFile) + }) + }) + } + + async bumpHederaConfigVersion (applicationPropertiesPath) { + const lines = (await readFile(applicationPropertiesPath, 'utf-8')).split('\n') + + for (const line of lines) { + if (line.startsWith('hedera.config.version=')) { + const version = parseInt(line.split('=')[1]) + 1 + lines[lines.indexOf(line)] = `hedera.config.version=${version}` + break + } } + await writeFile(applicationPropertiesPath, lines.join('\n')) + } + + async prepareValuesForNodeAdd (configTxtPath, applicationPropertiesPath) { + const yamlRoot = {} + this._setFileContentsAsValue('hedera.configMaps.configTxt', configTxtPath, yamlRoot) + await this.bumpHederaConfigVersion(applicationPropertiesPath) + this._setFileContentsAsValue('hedera.configMaps.applicationProperties', applicationPropertiesPath, yamlRoot) + // write the yaml - const cachedValuesFile = path.join(this.cacheDir, `fst-${profileName}.yaml`) + const cachedValuesFile = path.join(this.cacheDir, 'fst-node-add.yaml') return new Promise((resolve, reject) => { fs.writeFile(cachedValuesFile, yaml.dump(yamlRoot), (err) => { if (err) { @@ -323,4 +393,66 @@ export class ProfileManager { const fileContents = fs.readFileSync(valueFilePath, 'utf8') this._setValue(itemPath, fileContents, yamlRoot) } + + /** + * Prepares config.txt file for the node + * @param {string} namespace namespace where the network is deployed + * @param {Map} nodeAccountMap the map of node IDs to account IDs + * @param {string} destPath path to the destination directory to write the config.txt file + * @param {string} releaseTag release tag e.g. v0.42.0 + * @param {string} appName the app name (default: HederaNode.jar) + * @param {string} chainId chain ID (298 for local network) + * @param {string} template path to the config.template file + * @returns {string} the config.txt file path + */ + prepareConfigTxt (namespace, nodeAccountMap, destPath, releaseTag, appName = constants.HEDERA_APP_NAME, chainId = constants.HEDERA_CHAIN_ID, template = path.join(constants.RESOURCES_DIR, 'templates', 'config.template')) { + if (!nodeAccountMap || nodeAccountMap.size === 0) throw new MissingArgumentError('nodeAccountMap the map of node IDs to account IDs is required') + if (!template) throw new MissingArgumentError('config templatePath is required') + if (!releaseTag) throw new MissingArgumentError('release tag is required') + + if (!fs.existsSync(destPath)) throw new IllegalArgumentError(`config destPath does not exist: ${destPath}`, destPath) + if (!fs.existsSync(template)) throw new IllegalArgumentError(`config templatePath does not exist: ${template}`, template) + + // init variables + const internalPort = constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT + const externalPort = constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT + const nodeStakeAmount = constants.HEDERA_NODE_DEFAULT_STAKE_AMOUNT + + const releaseVersion = semver.parse(releaseTag, { includePrerelease: true }) + + try { + /** @type {string[]} */ + const configLines = fs.readFileSync(template, 'utf-8').split('\n') + configLines.push(`swirld, ${chainId}`) + configLines.push(`app, ${appName}`) + + let nodeSeq = 0 + for (const nodeID of nodeAccountMap.keys()) { + const nodeName = nodeID + + const internalIP = Templates.renderFullyQualifiedNetworkPodName(namespace, nodeName) + const externalIP = Templates.renderFullyQualifiedNetworkSvcName(namespace, nodeName) + + const account = nodeAccountMap.get(nodeID) + if (releaseVersion.minor >= 40) { + configLines.push(`address, ${nodeSeq}, ${nodeName}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) + } else { + configLines.push(`address, ${nodeSeq}, ${nodeName}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`) + } + + nodeSeq += 1 + } + + if (releaseVersion.minor >= 41) { + configLines.push(`nextNodeId, ${nodeSeq}`) + } + + const configFilePath = path.join(destPath, 'config.txt') + fs.writeFileSync(configFilePath, configLines.join('\n')) + + return configFilePath + } catch (e) { + throw new FullstackTestingError('failed to generate config.txt', e) + } + } } diff --git a/src/core/templates.mjs b/src/core/templates.mjs index 7170dc288..114501f0c 100644 --- a/src/core/templates.mjs +++ b/src/core/templates.mjs @@ -124,14 +124,11 @@ export class Templates { return new x509.Name(`CN=${nodeId},ST=${state},L=${locality},O=${org},OU=${orgUnit},C=${country}`) } - static renderStagingDir (configManager, flags) { - if (!configManager) throw new MissingArgumentError('configManager is required') - const cacheDir = configManager.getFlag(flags.cacheDir) + static renderStagingDir (cacheDir, releaseTag) { if (!cacheDir) { throw new IllegalArgumentError('cacheDir cannot be empty') } - const releaseTag = configManager.getFlag(flags.releaseTag) if (!releaseTag) { throw new IllegalArgumentError('releaseTag cannot be empty') } diff --git a/test/e2e/commands/network.test.mjs b/test/e2e/commands/network.test.mjs index 68e1b95a5..34d171ee0 100644 --- a/test/e2e/commands/network.test.mjs +++ b/test/e2e/commands/network.test.mjs @@ -66,6 +66,7 @@ describe('NetworkCommand', () => { const networkCmd = bootstrapResp.cmd.networkCmd const clusterCmd = bootstrapResp.cmd.clusterCmd + const initCmd = bootstrapResp.cmd.initCmd afterAll(async () => { await getNodeLogs(k8, namespace) @@ -74,6 +75,7 @@ describe('NetworkCommand', () => { }, 180000) beforeAll(async () => { + await initCmd.init(argv) await clusterCmd.setup(argv) fs.mkdirSync(applicationEnvParentDirectory, { recursive: true }) fs.writeFileSync(applicationEnvFilePath, applicationEnvFileContents) @@ -92,12 +94,20 @@ describe('NetworkCommand', () => { networkCmd.logger.showList('PVCs', pvcs) expect(networkCmd.getUnusedConfigs(NetworkCommand.DEPLOY_CONFIGS_NAME)).toEqual([ + flags.apiPermissionProperties.constName, + flags.app.constName, + flags.applicationEnv.constName, + flags.applicationProperties.constName, + flags.bootstrapProperties.constName, + flags.chainId.constName, flags.deployHederaExplorer.constName, flags.deployMirrorNode.constName, flags.hederaExplorerTlsHostName.constName, flags.hederaExplorerTlsLoadBalancerIp.constName, + flags.log4j2Xml.constName, flags.profileFile.constName, flags.profileName.constName, + flags.settingTxt.constName, flags.tlsClusterIssuerType.constName ]) } catch (e) { diff --git a/test/e2e/commands/node-add.test.mjs b/test/e2e/commands/node-add.test.mjs index 1c7715be2..2765de628 100644 --- a/test/e2e/commands/node-add.test.mjs +++ b/test/e2e/commands/node-add.test.mjs @@ -15,14 +15,15 @@ * * @jest-environment steps */ -import { afterAll, beforeAll, describe, expect, it } from '@jest/globals' import { flags } from '../../../src/commands/index.mjs' import { constants } from '../../../src/core/index.mjs' import { accountCreationShouldSucceed, balanceQueryShouldSucceed, bootstrapNetwork, - getDefaultArgv, getNodeIdsPrivateKeysHash, getTestConfigManager, getTmpDir, + getDefaultArgv, + getNodeIdsPrivateKeysHash, + getTmpDir, HEDERA_PLATFORM_VERSION_TAG } from '../../test_util.js' import { getNodeLogs } from '../../../src/core/helpers.mjs' @@ -43,6 +44,7 @@ describe('Node add', () => { argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined argv[flags.releaseTag.name] = HEDERA_PLATFORM_VERSION_TAG argv[flags.namespace.name] = namespace + argv[flags.persistentVolumeClaims.name] = true const bootstrapResp = bootstrapNetwork(namespace, argv) const nodeCmd = bootstrapResp.cmd.nodeCmd const accountCmd = bootstrapResp.cmd.accountCmd @@ -50,18 +52,16 @@ describe('Node add', () => { let existingServiceMap let existingNodeIdsPrivateKeysHash - beforeAll(async () => { - const configManager = getTestConfigManager(`${namespace}-solo.config`) - configManager.update(argv, true) - existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace) - existingNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, constants.KEY_FORMAT_PEM, k8, getTmpDir()) - }, defaultTimeout) - afterAll(async () => { await getNodeLogs(k8, namespace) await k8.deleteNamespace(namespace) }, 600000) + it('cache current version of private keys', async () => { + existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace) + existingNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, constants.KEY_FORMAT_PEM, k8, getTmpDir()) + }, defaultTimeout) + it('should succeed with init command', async () => { const status = await accountCmd.init(argv) expect(status).toBeTruthy() @@ -70,13 +70,9 @@ describe('Node add', () => { it('should add a new node to the network successfully', async () => { await nodeCmd.add(argv) expect(nodeCmd.getUnusedConfigs(NodeCommand.ADD_CONFIGS_NAME)).toEqual([ - flags.apiPermissionProperties.constName, - flags.applicationProperties.constName, - flags.bootstrapProperties.constName, + flags.app.constName, flags.chainId.constName, - flags.devMode.constName, - flags.log4j2Xml.constName, - flags.settingTxt.constName + flags.devMode.constName ]) await nodeCmd.accountManager.close() }, 600000) diff --git a/test/e2e/core/account_manager.test.mjs b/test/e2e/core/account_manager.test.mjs index d7c79ce60..72a5989b4 100644 --- a/test/e2e/core/account_manager.test.mjs +++ b/test/e2e/core/account_manager.test.mjs @@ -14,7 +14,6 @@ * limitations under the License. * */ -import { afterAll, describe, expect, it } from '@jest/globals' import { flags } from '../../../src/commands/index.mjs' import { bootstrapNetwork, @@ -43,6 +42,7 @@ describe('AccountManager', () => { }, 180000) it('should be able to stop port forwards', async () => { + await accountManager.close() expect.assertions(4) const localHost = '127.0.0.1' @@ -50,22 +50,22 @@ describe('AccountManager', () => { const podPort = 9090 const localPort = 19090 - expect(accountManager._portForwards.length).toStrictEqual(0) + expect(accountManager._portForwards.length, 'starting accountManager port forwards lengths should be zero').toStrictEqual(0) // ports should be opened accountManager._portForwards.push(await k8.portForward(podName, localPort, podPort)) const status = await k8.testConnection(localHost, localPort) - expect(status).toBeTruthy() + expect(status, 'test connection status should be true').toBeTruthy() // ports should be closed await accountManager.close() try { await k8.testConnection(localHost, localPort) } catch (e) { - expect(e.message.includes(`failed to connect to '${localHost}:${localPort}'`)).toBeTruthy() + expect(e.message.includes(`failed to connect to '${localHost}:${localPort}'`), 'expect failed test connection').toBeTruthy() } - expect(accountManager._portForwards.length).toStrictEqual(0) + expect(accountManager._portForwards.length, 'expect that the closed account manager should have no port forwards').toStrictEqual(0) }) it('should be able to load a new client', async () => { diff --git a/test/e2e/core/chart_manager.test.mjs b/test/e2e/core/chart_manager.test.mjs index 29e33eb8e..bf7af267f 100644 --- a/test/e2e/core/chart_manager.test.mjs +++ b/test/e2e/core/chart_manager.test.mjs @@ -14,31 +14,23 @@ * limitations under the License. * */ -import { beforeAll, describe, expect, it } from '@jest/globals' -import { flags } from '../../../src/commands/index.mjs' -import { ChartManager, ConfigManager, Helm, constants } from '../../../src/core/index.mjs' +import { describe, expect, it } from '@jest/globals' +import { ChartManager, Helm, constants } from '../../../src/core/index.mjs' import { testLogger } from '../../test_util.js' describe('ChartManager', () => { const helm = new Helm(testLogger) const chartManager = new ChartManager(helm, testLogger) - const configManager = new ConfigManager(testLogger) - const argv = [] - - beforeAll(() => { - argv[flags.namespace.name] = constants.FULLSTACK_SETUP_NAMESPACE - configManager.update(argv) - }) it('should be able to list installed charts', async () => { - const ns = configManager.getFlag(flags.namespace) + const ns = constants.FULLSTACK_SETUP_NAMESPACE expect(ns).not.toBeNull() const list = await chartManager.getInstalledCharts(ns) expect(list.length).not.toBe(0) }) it('should be able to check if a chart is installed', async () => { - const ns = configManager.getFlag(flags.namespace) + const ns = constants.FULLSTACK_SETUP_NAMESPACE expect(ns).not.toBeNull() const isInstalled = await chartManager.isChartInstalled(ns, constants.FULLSTACK_CLUSTER_SETUP_CHART) expect(isInstalled).toBeTruthy() diff --git a/test/e2e/core/platform_installer_e2e.test.mjs b/test/e2e/core/platform_installer_e2e.test.mjs index adc502573..a54ffe843 100644 --- a/test/e2e/core/platform_installer_e2e.test.mjs +++ b/test/e2e/core/platform_installer_e2e.test.mjs @@ -36,6 +36,8 @@ const defaultTimeout = 20000 describe('PackageInstallerE2E', () => { const namespace = 'pkg-installer-e2e' const argv = getDefaultArgv() + const testCacheDir = getTestCacheDir() + argv[flags.cacheDir.name] = testCacheDir argv[flags.namespace.name] = namespace argv[flags.nodeIDs.name] = 'node0' argv[flags.clusterName.name] = TEST_CLUSTER @@ -47,7 +49,6 @@ describe('PackageInstallerE2E', () => { const accountManager = bootstrapResp.opts.accountManager const configManager = bootstrapResp.opts.configManager const installer = bootstrapResp.opts.platformInstaller - const testCacheDir = getTestCacheDir() const podName = 'network-node0-0' const packageVersion = 'v0.42.5' @@ -98,33 +99,6 @@ describe('PackageInstallerE2E', () => { }, 60000) }) - describe('prepareConfigTxt', () => { - it('should succeed in generating config.txt', async () => { - const tmpDir = getTmpDir() - const configPath = `${tmpDir}/config.txt` - const nodeIDs = ['node0'] - const chainId = '299' - - const configLines = await installer.prepareConfigTxt(nodeIDs, configPath, packageVersion, chainId) - - // verify format is correct - expect(configLines.length).toBe(4) - expect(configLines[0]).toBe(`swirld, ${chainId}`) - expect(configLines[1]).toBe(`app, ${constants.HEDERA_APP_NAME}`) - expect(configLines[2]).toContain(`address, 0, node0, node0, ${constants.HEDERA_NODE_DEFAULT_STAKE_AMOUNT}`) - expect(configLines[3]).toBe('nextNodeId, 1') - - // verify the file exists - expect(fs.existsSync(configPath)).toBeTruthy() - const fileContents = fs.readFileSync(configPath).toString() - - // verify file content matches - expect(fileContents).toBe(configLines.join('\n')) - - fs.rmSync(tmpDir, { recursive: true }) - }, defaultTimeout) - }) - describe('copyGossipKeys', () => { it('should succeed to copy legacy pfx gossip keys for node0', async () => { const podName = 'network-node0-0' @@ -178,30 +152,4 @@ describe('PackageInstallerE2E', () => { fs.rmSync(tmpDir, { recursive: true }) }, defaultTimeout) }) - - describe('copyPlatformConfigFiles', () => { - it('should succeed to copy platform config files for node0', async () => { - const podName = 'network-node0-0' - await k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -f ${constants.HEDERA_HAPI_PATH}/*.txt`]) - await k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -f ${constants.HEDERA_HAPI_PATH}/*.xml`]) - await k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -f ${constants.HEDERA_HAPI_PATH}/data/config/*.properties`]) - - const tmpDir = getTmpDir() - const nodeIDs = ['node0'] - const releaseTag = 'v0.42.0' - - fs.cpSync(`${constants.RESOURCES_DIR}/templates`, `${tmpDir}/templates`, { recursive: true }) - await installer.prepareConfigTxt(nodeIDs, `${tmpDir}/config.txt`, releaseTag, constants.HEDERA_CHAIN_ID, `${tmpDir}/templates/config.template`) - - const fileList = await installer.copyPlatformConfigFiles(podName, tmpDir) - expect(fileList.length).toBeGreaterThanOrEqual(6) - expect(fileList).toContain(`${constants.HEDERA_HAPI_PATH}/config.txt`) - expect(fileList).toContain(`${constants.HEDERA_HAPI_PATH}/log4j2.xml`) - expect(fileList).toContain(`${constants.HEDERA_HAPI_PATH}/settings.txt`) - expect(fileList).toContain(`${constants.HEDERA_HAPI_PATH}/data/config/api-permission.properties`) - expect(fileList).toContain(`${constants.HEDERA_HAPI_PATH}/data/config/application.properties`) - expect(fileList).toContain(`${constants.HEDERA_HAPI_PATH}/data/config/bootstrap.properties`) - fs.rmSync(tmpDir, { recursive: true }) - }, defaultTimeout) - }) }) diff --git a/test/test_util.js b/test/test_util.js index 10816f608..ee9dbd4e9 100644 --- a/test/test_util.js +++ b/test/test_util.js @@ -225,12 +225,20 @@ export function bootstrapNetwork (testName, argv, await networkCmd.deploy(argv) expect(networkCmd.getUnusedConfigs(NetworkCommand.DEPLOY_CONFIGS_NAME)).toEqual([ + flags.apiPermissionProperties.constName, + flags.app.constName, + flags.applicationEnv.constName, + flags.applicationProperties.constName, + flags.bootstrapProperties.constName, + flags.chainId.constName, flags.deployHederaExplorer.constName, flags.deployMirrorNode.constName, flags.hederaExplorerTlsHostName.constName, flags.hederaExplorerTlsLoadBalancerIp.constName, + flags.log4j2Xml.constName, flags.profileFile.constName, flags.profileName.constName, + flags.settingTxt.constName, flags.tlsClusterIssuerType.constName ]) }, 180000) @@ -238,18 +246,17 @@ export function bootstrapNetwork (testName, argv, if (startNodes) { it('should succeed with node setup command', async () => { expect.assertions(2) + // cache this, because `solo node setup.finalize()` will reset it to false + const generateGossipKeys = bootstrapResp.opts.configManager.getFlag(flags.generateGossipKeys) try { await expect(nodeCmd.setup(argv)).resolves.toBeTruthy() - expect(nodeCmd.getUnusedConfigs(NodeCommand.SETUP_CONFIGS_NAME)).toEqual([ - flags.apiPermissionProperties.constName, - flags.appConfig.constName, - flags.applicationProperties.constName, - flags.bootstrapProperties.constName, - flags.devMode.constName, - flags.localBuildPath.constName, - flags.log4j2Xml.constName, - flags.settingTxt.constName - ]) + const expectedUnusedConfigs = [] + expectedUnusedConfigs.push(flags.appConfig.constName) + expectedUnusedConfigs.push(flags.devMode.constName) + if (!generateGossipKeys) { + expectedUnusedConfigs.push('curDate') + } + expect(nodeCmd.getUnusedConfigs(NodeCommand.SETUP_CONFIGS_NAME)).toEqual(expectedUnusedConfigs) } catch (e) { nodeCmd.logger.showUserError(e) expect(e).toBeNull() diff --git a/test/unit/core/platform_installer.test.mjs b/test/unit/core/platform_installer.test.mjs index 6f2ba75ed..c027f47e7 100644 --- a/test/unit/core/platform_installer.test.mjs +++ b/test/unit/core/platform_installer.test.mjs @@ -103,24 +103,6 @@ describe('PackageInstaller', () => { }) }) - describe('prepareConfigTxt', () => { - it('should fail for missing nodeIDs', async () => { - await expect(installer.prepareConfigTxt([], './test', '0.42.0')).rejects.toThrow(MissingArgumentError) - }) - - it('should fail for missing destPath', async () => { - await expect(installer.prepareConfigTxt(['node0'], '', '0.42.0')).rejects.toThrow(MissingArgumentError) - }) - - it('should fail for missing release tag', async () => { - await expect(installer.prepareConfigTxt(['node0'], `${os.tmpdir()}/config.txt`, '')).rejects.toThrow(MissingArgumentError) - }) - - it('should fail for invalid destPath', async () => { - await expect(installer.prepareConfigTxt(['node0'], '/INVALID/config.txt', '0.42.0')).rejects.toThrow(IllegalArgumentError) - }) - }) - describe('copyGossipKeys', () => { it('should fail for missing podName', async () => { await expect(installer.copyGossipKeys('', os.tmpdir())).rejects.toThrow(MissingArgumentError) diff --git a/test/unit/core/profile_manager.test.mjs b/test/unit/core/profile_manager.test.mjs index a01edabac..b4a6a3c73 100644 --- a/test/unit/core/profile_manager.test.mjs +++ b/test/unit/core/profile_manager.test.mjs @@ -19,8 +19,13 @@ import fs from 'fs' import * as yaml from 'js-yaml' import path from 'path' import { flags } from '../../../src/commands/index.mjs' -import { ConfigManager, ProfileManager } from '../../../src/core/index.mjs' -import { getTmpDir, testLogger } from '../../test_util.js' +import { + ConfigManager, + constants, + ProfileManager +} from '../../../src/core/index.mjs' +import { getTestCacheDir, getTmpDir, testLogger } from '../../test_util.js' +import * as version from '../../../version.mjs' const tmpDir = getTmpDir() const configFile = path.join(tmpDir, 'resource-manager.config') @@ -28,6 +33,14 @@ const configManager = new ConfigManager(testLogger, configFile) const profileManager = new ProfileManager(testLogger, configManager, tmpDir) configManager.setFlag(flags.nodeIDs, 'node0,node1,node3') const testProfileFile = path.join('test', 'data', 'test-profiles.yaml') +configManager.setFlag(flags.cacheDir, getTestCacheDir('ProfileManager')) +configManager.setFlag(flags.releaseTag, version.HEDERA_PLATFORM_VERSION) +const cacheDir = configManager.getFlag(flags.cacheDir) +configManager.setFlag(flags.apiPermissionProperties, path.join(cacheDir, 'templates', 'api-permission.properties')) +configManager.setFlag(flags.applicationProperties, path.join(cacheDir, 'templates', 'application.properties')) +configManager.setFlag(flags.bootstrapProperties, path.join(cacheDir, 'templates', 'bootstrap.properties')) +configManager.setFlag(flags.log4j2Xml, path.join(cacheDir, 'templates', 'log4j2.xml')) +configManager.setFlag(flags.settingTxt, path.join(cacheDir, 'templates', 'settings.txt')) describe('ProfileManager', () => { afterAll(() => { @@ -62,6 +75,20 @@ describe('ProfileManager', () => { ])('determine chart values for a profile', (input) => { it(`should determine FST chart values [profile = ${input.profileName}]`, async () => { configManager.setFlag(flags.profileFile, input.profileFile) + + const resources = ['templates', 'profiles'] + for (const dirName of resources) { + const srcDir = path.resolve(path.join(constants.RESOURCES_DIR, dirName)) + if (!fs.existsSync(srcDir)) continue + + const destDir = path.resolve(path.join(cacheDir, dirName)) + if (!fs.existsSync(destDir)) { + fs.mkdirSync(destDir, { recursive: true }) + } + + fs.cpSync(srcDir, destDir, { recursive: true }) + } + profileManager.loadProfiles(true) const valuesFile = await profileManager.prepareValuesForFstChart(input.profileName) expect(valuesFile).not.toBeNull() @@ -93,6 +120,8 @@ describe('ProfileManager', () => { it(`should determine mirror-node chart values [profile = ${input.profileName}]`, async () => { configManager.setFlag(flags.profileFile, input.profileFile) + configManager.setFlag(flags.cacheDir, getTestCacheDir('ProfileManager')) + configManager.setFlag(flags.releaseTag, version.HEDERA_PLATFORM_VERSION) profileManager.loadProfiles(true) const valuesFile = await profileManager.prepareValuesForMirrorNodeChart(input.profileName) expect(fs.existsSync(valuesFile)).toBeTruthy() @@ -126,12 +155,87 @@ describe('ProfileManager', () => { it('prepareValuesForFstChart should set the value of a key to the contents of a file', async () => { configManager.setFlag(flags.profileFile, testProfileFile) + // profileManager.loadProfiles(true) const file = path.join(tmpDir, '_setFileContentsAsValue.txt') const fileContents = '# row 1\n# row 2\n# row 3' fs.writeFileSync(file, fileContents) - const cachedValuesFile = await profileManager.prepareValuesForFstChart('test', file) + configManager.setFlag(flags.applicationEnv, file) + const cachedValuesFile = await profileManager.prepareValuesForFstChart('test') const valuesYaml = yaml.load(fs.readFileSync(cachedValuesFile).toString()) expect(valuesYaml.hedera.configMaps.applicationEnv).toEqual(fileContents) }) + + describe('prepareConfigText', () => { + it('should write and return the path to the config.txt file', () => { + const nodeAccountMap = /** @type {Map} */ new Map() + nodeAccountMap.set('node0', '0.0.3') + nodeAccountMap.set('node1', '0.0.4') + nodeAccountMap.set('node2', '0.0.5') + const destPath = path.join(tmpDir, 'staging') + fs.mkdirSync(destPath, { recursive: true }) + const namespace = 'test-namespace' + profileManager.prepareConfigTxt(namespace, nodeAccountMap, destPath, version.HEDERA_PLATFORM_VERSION) + + // expect that the config.txt file was created and exists + const configFile = path.join(destPath, 'config.txt') + expect(fs.existsSync(configFile)).toBeTruthy() + + const configText = fs.readFileSync(configFile).toString() + + // expect that the config.txt file contains the namespace + expect(configText.includes(namespace)).toBeTruthy() + // expect that the config.txt file contains the node account IDs + expect(configText.includes('0.0.3')).toBeTruthy() + expect(configText.includes('0.0.4')).toBeTruthy() + expect(configText.includes('0.0.5')).toBeTruthy() + // expect the config.txt file to contain the node IDs + expect(configText.includes('node0')).toBeTruthy() + expect(configText.includes('node1')).toBeTruthy() + expect(configText.includes('node2')).toBeTruthy() + }) + + it('should fail when no nodeIDs', () => { + const nodeAccountMap = /** @type {Map} */ new Map() + expect(() => profileManager.prepareConfigTxt('', nodeAccountMap, '', version.HEDERA_PLATFORM_VERSION)).toThrow('nodeAccountMap the map of node IDs to account IDs is required') + }) + + it('should fail when an invalid template path is provided', () => { + const nodeAccountMap = /** @type {Map} */ new Map() + nodeAccountMap.set('node0', '0.0.3') + expect(() => profileManager.prepareConfigTxt('', nodeAccountMap, '', version.HEDERA_PLATFORM_VERSION, constants.HEDERA_APP_NAME, constants.HEDERA_CHAIN_ID, '')).toThrow('config templatePath is required') + }) + + it('should fail when no releaseTag is provided', () => { + const nodeAccountMap = /** @type {Map} */ new Map() + nodeAccountMap.set('node0', '0.0.3') + expect(() => profileManager.prepareConfigTxt('', nodeAccountMap, '', undefined)).toThrow('release tag is required') + }) + + it('should fail when destPath does not exist', () => { + expect.assertions(2) + const nodeAccountMap = /** @type {Map} */ new Map() + nodeAccountMap.set('node0', '0.0.3') + const destPath = path.join(tmpDir, 'missing-directory') + try { + profileManager.prepareConfigTxt('', nodeAccountMap, destPath, version.HEDERA_PLATFORM_VERSION) + } catch (e) { + expect(e.message).toContain('config destPath does not exist') + expect(e.message).toContain(destPath) + } + }) + + it('should fail when template path does not exist', () => { + expect.assertions(2) + const nodeAccountMap = /** @type {Map} */ new Map() + nodeAccountMap.set('node0', '0.0.3') + const destPath = path.join(tmpDir, 'staging') + try { + profileManager.prepareConfigTxt('', nodeAccountMap, destPath, version.HEDERA_PLATFORM_VERSION, constants.HEDERA_APP_NAME, constants.HEDERA_CHAIN_ID, 'INVALID') + } catch (e) { + expect(e.message).toContain('config templatePath does not exist') + expect(e.message).toContain('INVALID') + } + }) + }) })