Skip to content

Commit 0690cff

Browse files
committed
draft of node refresh
Signed-off-by: Jeromy Cannon <[email protected]>
1 parent d5d53c5 commit 0690cff

File tree

1 file changed

+236
-5
lines changed

1 file changed

+236
-5
lines changed

src/commands/node.mjs

Lines changed: 236 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ export class NodeCommand extends BaseCommand {
9393
break
9494
}
9595
} catch (e) {
96+
this.logger.error(`error in checking if log file is accessible: ${e.message}`, e)
9697
} // ignore errors
9798

9899
await sleep(1000)
@@ -565,20 +566,20 @@ export class NodeCommand extends BaseCommand {
565566
self.configManager.update(argv)
566567
await prompts.execute(task, self.configManager, [
567568
flags.namespace,
568-
flags.chartDirectory,
569+
flags.chartDirectory, // TODO still needed?
569570
flags.nodeIDs
570571
])
571572

572573
ctx.config = {
573574
namespace: self.configManager.getFlag(flags.namespace),
574-
chartDir: self.configManager.getFlag(flags.chartDirectory),
575-
fstChartVersion: self.configManager.getFlag(flags.fstChartVersion),
575+
chartDir: self.configManager.getFlag(flags.chartDirectory), // TODO still needed?
576+
fstChartVersion: self.configManager.getFlag(flags.fstChartVersion), // TODO still needed?
576577
nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)),
577578
applicationEnv: self.configManager.getFlag(flags.applicationEnv),
578579
cacheDir: self.configManager.getFlag(flags.cacheDir)
579580
}
580581

581-
ctx.config.chartPath = await self.prepareChartPath(ctx.config.chartDir,
582+
ctx.config.chartPath = await self.prepareChartPath(ctx.config.chartDir, // TODO still needed?
582583
constants.FULLSTACK_TESTING_CHART, constants.FULLSTACK_DEPLOYMENT_CHART)
583584

584585
ctx.config.stagingDir = Templates.renderStagingDir(self.configManager, flags)
@@ -587,7 +588,7 @@ export class NodeCommand extends BaseCommand {
587588
throw new FullstackTestingError(`namespace ${ctx.config.namespace} does not exist`)
588589
}
589590

590-
await self.accountManager.loadNodeClient(ctx.config.namespace)
591+
await self.accountManager.loadNodeClient(ctx.config.namespace) // TODO is this still needed?
591592
}
592593
},
593594
{
@@ -875,6 +876,211 @@ export class NodeCommand extends BaseCommand {
875876
return true
876877
}
877878

879+
async refresh (argv) {
880+
const self = this
881+
882+
const tasks = new Listr([
883+
{
884+
title: 'Initialize',
885+
task: async (ctx, task) => {
886+
self.configManager.update(argv)
887+
await prompts.execute(task, self.configManager, [
888+
flags.namespace,
889+
flags.nodeIDs,
890+
flags.releaseTag,
891+
flags.cacheDir,
892+
flags.chainId,
893+
flags.keyFormat
894+
]) // TODO verify we need all of these
895+
896+
const config = {
897+
namespace: self.configManager.getFlag(flags.namespace),
898+
nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)),
899+
releaseTag: self.configManager.getFlag(flags.releaseTag),
900+
cacheDir: self.configManager.getFlag(flags.cacheDir),
901+
force: self.configManager.getFlag(flags.force),
902+
chainId: self.configManager.getFlag(flags.chainId),
903+
applicationEnv: self.configManager.getFlag(flags.applicationEnv),
904+
devMode: self.configManager.getFlag(flags.devMode),
905+
curDate: new Date()
906+
}
907+
908+
// compute other config parameters
909+
// TODO DRY
910+
config.releasePrefix = Templates.prepareReleasePrefix(config.releaseTag)
911+
config.buildZipFile = `${config.cacheDir}/${config.releasePrefix}/build-${config.releaseTag}.zip`
912+
config.keysDir = path.join(config.cacheDir, 'keys')
913+
config.stagingDir = Templates.renderStagingDir(self.configManager, flags)
914+
config.stagingKeysDir = path.join(config.stagingDir, 'keys')
915+
916+
if (!await this.k8.hasNamespace(config.namespace)) {
917+
throw new FullstackTestingError(`namespace ${config.namespace} does not exist`)
918+
}
919+
920+
// prepare staging keys directory
921+
if (!fs.existsSync(config.stagingKeysDir)) {
922+
fs.mkdirSync(config.stagingKeysDir, { recursive: true })
923+
}
924+
925+
// create cached keys dir if it does not exist yet
926+
if (!fs.existsSync(config.keysDir)) {
927+
fs.mkdirSync(config.keysDir)
928+
}
929+
930+
// set config in the context for later tasks to use
931+
ctx.config = config
932+
933+
self.logger.debug('Initialized config', { config })
934+
}
935+
},
936+
{
937+
title: 'Identify network pods',
938+
task: (ctx, task) => self.taskCheckNetworkNodePods(ctx, task)
939+
},
940+
{ // TODO DRY
941+
title: 'Fetch platform software into network nodes',
942+
task:
943+
async (ctx, task) => {
944+
const config = ctx.config
945+
946+
const subTasks = []
947+
for (const nodeId of ctx.config.nodeIds) {
948+
const podName = ctx.config.podNames[nodeId]
949+
subTasks.push({
950+
title: `Node: ${chalk.yellow(nodeId)}`,
951+
task: () =>
952+
self.plaformInstaller.fetchPlatform(podName, config.releaseTag)
953+
})
954+
}
955+
956+
// set up the sub-tasks
957+
return task.newListr(subTasks, {
958+
concurrent: true, // since we download in the container directly, we want this to be in parallel across all nodes
959+
rendererOptions: {
960+
collapseSubtasks: false
961+
}
962+
})
963+
}
964+
},
965+
{ // TODO DRY
966+
title: 'Setup network nodes',
967+
task: async (ctx, parentTask) => {
968+
const config = ctx.config
969+
970+
const subTasks = []
971+
for (const nodeId of config.nodeIds) {
972+
const podName = config.podNames[nodeId]
973+
subTasks.push({
974+
title: `Node: ${chalk.yellow(nodeId)}`,
975+
task: () =>
976+
self.plaformInstaller.taskInstall(podName, config.buildZipFile, config.stagingDir, config.nodeIds, config.keyFormat, config.force)
977+
})
978+
}
979+
980+
// set up the sub-tasks
981+
return parentTask.newListr(subTasks, {
982+
concurrent: true,
983+
rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION
984+
})
985+
}
986+
},
987+
{
988+
title: 'Finalize',
989+
task: (ctx, _) => {
990+
// reset flags so that keys are not regenerated later
991+
self.configManager.setFlag(flags.generateGossipKeys, false)
992+
self.configManager.setFlag(flags.generateTlsKeys, false)
993+
self.configManager.persist()
994+
}
995+
},
996+
{
997+
title: 'Starting nodes',
998+
task: (ctx, task) => {
999+
const subTasks = []
1000+
for (const nodeId of ctx.config.nodeIds) {
1001+
const podName = ctx.config.podNames[nodeId]
1002+
subTasks.push({
1003+
title: `Start node: ${chalk.yellow(nodeId)}`,
1004+
task: async () => {
1005+
await self.k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -f ${constants.HEDERA_HAPI_PATH}/logs/*`])
1006+
1007+
// copy application.env file if required
1008+
if (ctx.config.applicationEnv) {
1009+
const stagingDir = Templates.renderStagingDir(self.configManager, flags)
1010+
const applicationEnvFile = path.join(stagingDir, 'application.env')
1011+
fs.cpSync(ctx.config.applicationEnv, applicationEnvFile)
1012+
await self.k8.copyTo(podName, constants.ROOT_CONTAINER, applicationEnvFile, `${constants.HEDERA_HAPI_PATH}`)
1013+
}
1014+
1015+
await self.k8.execContainer(podName, constants.ROOT_CONTAINER, ['systemctl', 'restart', 'network-node'])
1016+
}
1017+
})
1018+
}
1019+
1020+
// set up the sub-tasks
1021+
return task.newListr(subTasks, {
1022+
concurrent: true,
1023+
rendererOptions: {
1024+
collapseSubtasks: false,
1025+
timer: constants.LISTR_DEFAULT_RENDERER_TIMER_OPTION
1026+
}
1027+
})
1028+
}
1029+
},
1030+
{
1031+
title: 'Check nodes are ACTIVE',
1032+
task: (ctx, task) => {
1033+
const subTasks = []
1034+
for (const nodeId of ctx.config.nodeIds) {
1035+
subTasks.push({
1036+
title: `Check node: ${chalk.yellow(nodeId)}`,
1037+
task: () => self.checkNetworkNodeStarted(nodeId)
1038+
})
1039+
}
1040+
1041+
// set up the sub-tasks
1042+
return task.newListr(subTasks, {
1043+
concurrent: false,
1044+
rendererOptions: {
1045+
collapseSubtasks: false
1046+
}
1047+
})
1048+
}
1049+
},
1050+
{
1051+
title: 'Check node proxies are ACTIVE',
1052+
task: async (ctx, parentTask) => {
1053+
const subTasks = []
1054+
let localPort = constants.LOCAL_NODE_PROXY_START_PORT
1055+
for (const nodeId of ctx.config.nodeIds) {
1056+
subTasks.push({
1057+
title: `Check proxy for node: ${chalk.yellow(nodeId)}`,
1058+
task: async () => await self.checkNetworkNodeProxyUp(nodeId, localPort++)
1059+
})
1060+
}
1061+
1062+
// set up the sub-tasks
1063+
return parentTask.newListr(subTasks, {
1064+
concurrent: false,
1065+
rendererOptions: {
1066+
collapseSubtasks: false
1067+
}
1068+
})
1069+
}
1070+
}], {
1071+
concurrent: false,
1072+
rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION
1073+
})
1074+
1075+
try {
1076+
await tasks.run()
1077+
} catch (e) {
1078+
throw new FullstackTestingError(`Error in refreshing nodes: ${e.message}`, e)
1079+
}
1080+
1081+
return true
1082+
}
1083+
8781084
/**
8791085
* Return Yargs command definition for 'node' command
8801086
* @param nodeCmd an instance of NodeCommand
@@ -981,6 +1187,31 @@ export class NodeCommand extends BaseCommand {
9811187
})
9821188
}
9831189
})
1190+
.command({
1191+
command: 'refresh',
1192+
desc: 'Refresh a node',
1193+
builder: y => flags.setCommandFlags(y,
1194+
flags.namespace,
1195+
flags.nodeIDs,
1196+
flags.releaseTag,
1197+
flags.cacheDir,
1198+
flags.chainId,
1199+
flags.applicationEnv,
1200+
flags.keyFormat
1201+
),
1202+
handler: argv => {
1203+
nodeCmd.logger.debug('==== Running \'node refresh\' ===')
1204+
nodeCmd.logger.debug(argv)
1205+
1206+
nodeCmd.refresh(argv).then(r => {
1207+
nodeCmd.logger.debug('==== Finished running `node refresh`====')
1208+
if (!r) process.exit(1)
1209+
}).catch(err => {
1210+
nodeCmd.logger.showUserError(err)
1211+
process.exit(1)
1212+
})
1213+
}
1214+
})
9841215
.demandCommand(1, 'Select a node command')
9851216
}
9861217
}

0 commit comments

Comments
 (0)