Skip to content

Commit 3dd7b9f

Browse files
committed
jobs/build: factor out functions to upload OSTree and trigger jobs
Factor out a function to trigger the release job since we'll be calling it from another location in a future patch. Transform the closures for uploading the OSTree and triggering the multi-arch jobs into separate functions. Having them as closures makes it look more complex than it really is. While we're here, consistently wrap the stages inside their gating if conditions rather than the other way around. We already did this for the "Publish" stage but not for the other stages involved here. We could pass less variables if we make `basearch` and `newBuildID` script globals, but because they're mutable, I'd like to avoid that to keep it easier to follow. While we're here, update the outdated comment near where we trigger the release job.
1 parent e1fdff8 commit 3dd7b9f

File tree

1 file changed

+66
-66
lines changed

1 file changed

+66
-66
lines changed

jobs/build.Jenkinsfile

Lines changed: 66 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -297,56 +297,10 @@ lock(resource: "build-${params.STREAM}") {
297297
skipSecureBoot: pipecfg.hotfix?.skip_secureboot_tests_hack)
298298
}
299299

300-
// Define a closure for what to do when we want to fork off
301-
// the multi-arch builds.
302-
archive_ostree_and_fork_mArch_jobs = {
303-
// If we are uploading results then let's do an early archive
304-
// of just the OSTree. This has the desired side effect of
305-
// reserving our build ID before we fork off multi-arch builds.
306-
stage('Archive OSTree') {
307-
if (uploading) {
308-
def acl = pipecfg.s3.acl ?: 'public-read'
309-
// run with --force here in case the previous run of the
310-
// pipeline died in between buildupload and bump_builds_json()
311-
pipeutils.shwrapWithAWSBuildUploadCredentials("""
312-
cosa buildupload --force --skip-builds-json --artifact=ostree \
313-
s3 --aws-config-file=\${AWS_BUILD_UPLOAD_CONFIG} \
314-
--acl=${acl} ${s3_stream_dir}/builds
315-
""")
316-
pipeutils.bump_builds_json(
317-
params.STREAM,
318-
newBuildID,
319-
basearch,
320-
s3_stream_dir,
321-
acl)
322-
}
323-
}
324-
325-
stage('Fork Multi-Arch Builds') {
326-
if (uploading) {
327-
for (arch in additional_arches) {
328-
// We pass in FORCE=true here since if we got this far we know
329-
// we want to do a build even if the code tells us that there
330-
// are no apparent changes since the previous commit.
331-
build job: 'build-arch', wait: false, parameters: [
332-
booleanParam(name: 'FORCE', value: true),
333-
booleanParam(name: 'ALLOW_KOLA_UPGRADE_FAILURE', value: params.ALLOW_KOLA_UPGRADE_FAILURE),
334-
string(name: 'SRC_CONFIG_COMMIT', value: src_config_commit),
335-
string(name: 'COREOS_ASSEMBLER_IMAGE', value: cosa_img),
336-
string(name: 'STREAM', value: params.STREAM),
337-
string(name: 'VERSION', value: newBuildID),
338-
string(name: 'ARCH', value: arch),
339-
string(name: 'PIPECFG_HOTFIX_REPO', value: params.PIPECFG_HOTFIX_REPO),
340-
string(name: 'PIPECFG_HOTFIX_REF', value: params.PIPECFG_HOTFIX_REF)
341-
]
342-
}
343-
}
344-
}
345-
}
346-
347300
// If desired let's go ahead and archive+fork the multi-arch jobs
348-
if (params.EARLY_ARCH_JOBS) {
349-
archive_ostree_and_fork_mArch_jobs.call()
301+
if (params.EARLY_ARCH_JOBS && uploading) {
302+
archive_ostree(newBuildID, basearch, s3_stream_dir)
303+
run_multiarch_jobs(additional_arches, src_config_commit, newBuildID, cosa_img)
350304
}
351305

352306
// Build the remaining artifacts
@@ -382,8 +336,9 @@ lock(resource: "build-${params.STREAM}") {
382336

383337
// If we didn't do an early archive and start multi-arch
384338
// jobs let's go ahead and do those pieces now
385-
if (!params.EARLY_ARCH_JOBS) {
386-
archive_ostree_and_fork_mArch_jobs.call()
339+
if (!params.EARLY_ARCH_JOBS && uploading) {
340+
archive_ostree(newBuildID, basearch, s3_stream_dir)
341+
run_multiarch_jobs(additional_arches, src_config_commit, newBuildID, cosa_img)
387342
}
388343

389344
stage('Archive') {
@@ -444,22 +399,8 @@ lock(resource: "build-${params.STREAM}") {
444399

445400
// For now, we auto-release all non-production streams builds. That
446401
// way, we can e.g. test testing-devel AMIs easily.
447-
//
448-
// Since we are only running this stage for non-production (i.e. mechanical
449-
// and development) builds we'll default to not doing AWS AMI replication.
450-
// We'll also default to allowing failures for additonal architectures.
451402
if (uploading && stream_info.type != "production") {
452-
stage('Publish') {
453-
build job: 'release', wait: false, parameters: [
454-
string(name: 'STREAM', value: params.STREAM),
455-
string(name: 'ADDITIONAL_ARCHES', value: params.ADDITIONAL_ARCHES),
456-
string(name: 'VERSION', value: newBuildID),
457-
booleanParam(name: 'ALLOW_MISSING_ARCHES', value: true),
458-
booleanParam(name: 'CLOUD_REPLICATION', value: params.CLOUD_REPLICATION),
459-
string(name: 'PIPECFG_HOTFIX_REPO', value: params.PIPECFG_HOTFIX_REPO),
460-
string(name: 'PIPECFG_HOTFIX_REF', value: params.PIPECFG_HOTFIX_REF)
461-
]
462-
}
403+
run_release_job(newBuildID)
463404
}
464405

465406
currentBuild.result = 'SUCCESS'
@@ -502,3 +443,62 @@ lock(resource: "build-${params.STREAM}") {
502443
""")
503444
}
504445
}}}} // finally, cosaPod, timeout, and locks finish here
446+
447+
// This does an early archive of just the OSTree. This has the desired side
448+
// effect of reserving our build ID before we fork off multi-arch builds.
449+
def archive_ostree(version, basearch, s3_stream_dir) {
450+
stage('Archive OSTree') {
451+
def acl = pipecfg.s3.acl ?: 'public-read'
452+
// run with --force here in case the previous run of the
453+
// pipeline died in between buildupload and bump_builds_json()
454+
pipeutils.shwrapWithAWSBuildUploadCredentials("""
455+
cosa buildupload --force --skip-builds-json --artifact=ostree \
456+
s3 --aws-config-file=\${AWS_BUILD_UPLOAD_CONFIG} \
457+
--acl=${acl} ${s3_stream_dir}/builds
458+
""")
459+
pipeutils.bump_builds_json(
460+
params.STREAM,
461+
version,
462+
basearch,
463+
s3_stream_dir,
464+
acl)
465+
}
466+
}
467+
468+
def run_multiarch_jobs(arches, src_commit, version, cosa_img) {
469+
stage('Fork Multi-Arch Builds') {
470+
for (arch in arches) {
471+
// We pass in FORCE=true here since if we got this far we know
472+
// we want to do a build even if the code tells us that there
473+
// are no apparent changes since the previous commit.
474+
build job: 'build-arch', wait: false, parameters: [
475+
booleanParam(name: 'FORCE', value: true),
476+
booleanParam(name: 'ALLOW_KOLA_UPGRADE_FAILURE', value: params.ALLOW_KOLA_UPGRADE_FAILURE),
477+
string(name: 'SRC_CONFIG_COMMIT', value: src_commit),
478+
string(name: 'COREOS_ASSEMBLER_IMAGE', value: cosa_img),
479+
string(name: 'STREAM', value: params.STREAM),
480+
string(name: 'VERSION', value: version),
481+
string(name: 'ARCH', value: arch),
482+
string(name: 'PIPECFG_HOTFIX_REPO', value: params.PIPECFG_HOTFIX_REPO),
483+
string(name: 'PIPECFG_HOTFIX_REF', value: params.PIPECFG_HOTFIX_REF)
484+
]
485+
}
486+
}
487+
}
488+
489+
def run_release_job(buildID) {
490+
stage('Publish') {
491+
// Since we are only running this stage for non-production (i.e.
492+
// mechanical and development) builds we'll default to allowing failures
493+
// for additional architectures.
494+
build job: 'release', wait: wait, parameters: [
495+
string(name: 'STREAM', value: params.STREAM),
496+
string(name: 'ADDITIONAL_ARCHES', value: params.ADDITIONAL_ARCHES),
497+
string(name: 'VERSION', value: buildID),
498+
booleanParam(name: 'ALLOW_MISSING_ARCHES', value: true),
499+
booleanParam(name: 'CLOUD_REPLICATION', value: params.CLOUD_REPLICATION),
500+
string(name: 'PIPECFG_HOTFIX_REPO', value: params.PIPECFG_HOTFIX_REPO),
501+
string(name: 'PIPECFG_HOTFIX_REF', value: params.PIPECFG_HOTFIX_REF)
502+
]
503+
}
504+
}

0 commit comments

Comments
 (0)