Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: RHCOS pipeline changes #652

Closed
wants to merge 16 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions HACKING.md
Original file line number Diff line number Diff line change
Expand Up @@ -546,6 +546,14 @@ value from the `manifests/pipeline.yaml` OpenShift template. This is
currently as designed (see
[#65](https://github.com/coreos/fedora-coreos-pipeline/issues/65)).

If you update the Jenkins config in `jenkins/config`, then you need to update
it in the clsuter too:

```
oc delete configmap/jenkins-casc-cfg
oc create configmap jenkins-casc-cfg --from-file=jenkins/config
```

### Nuking everything

One can leverage Kubernetes labels to delete all objects
Expand Down
138 changes: 118 additions & 20 deletions config.yaml
Original file line number Diff line number Diff line change
@@ -1,31 +1,129 @@
streams:
stable:
master:
type: production
testing:
variants: []
branch: master
cosa_image: "coreos-assembler:main"
"4.11":
type: production
next:
branch: release-4.11
cosa_image: "coreos-assembler:rhcos-4.11"
skip_artifacts:
s390x:
- ibmcloud
"4.10":
type: production
testing-devel:
type: development
default: true
next-devel: # do not touch; line managed by `next-devel/manage.py`
type: development # do not touch; line managed by `next-devel/manage.py`
rawhide:
type: mechanical
# branched:
# type: mechanical
# bodhi-updates:
# type: mechanical
# bodhi-updates-testing:
# type: mechanical

additional_arches: [aarch64, ppc64le, s390x]
branch: release-4.10
cosa_image: "coreos-assembler:rhcos-4.10"
skip_artifacts:
aarch64:
- azure
ppc64le:
- powervs
s390x:
- ibmcloud
"4.9":
type: production
branch: release-4.9
cosa_image: "coreos-assembler:rhcos-4.9"
skip_artifacts:
aarch64:
- azure
ppc64le:
- powervs
s390x:
- ibmcloud
x86_64:
- aliyun
- nutanix
"4.8":
type: production
branch: release-4.8
cosa_image: "coreos-assembler:rhcos-4.8"
skip_artifacts:
aarch64:
- azure
- openstack
ppc64le:
- powervs
s390x:
- ibmcloud
x86_64:
- aliyun
- azurestack
- nutanix
"4.7":
type: production
branch: release-4.7
cosa_image: "coreos-assembler:rhcos-4.7"
skip_artifacts:
ppc64le:
- powervs
s390x:
- ibmcloud
x86_64:
- aliyun
- azurestack
- nutanix
"4.6":
type: production
branch: release-4.6
cosa_image: "coreos-assembler:rhcos-4.6"
skip_artifacts:
ppc64le:
- powervs
s390x:
- ibmcloud
x86_64:
- aliyun
- azurestack
- ibmcloud
- nutanix
# additional_arches: [aarch64, ppc64le, s390x]
additional_arches: [ppc64le]

source_config:
url: https://github.com/coreos/fedora-coreos-config
url: https://github.com/openshift/os


default_artifacts:
aarch64:
- aws
- azure
- live
- metal
- metal4k
- openstack
ppc64le:
- live
- metal
- metal4k
- openstack
- powervs
s390x:
- dasd
- ibmcloud
- live
- metal
- metal4k
- openstack
x86_64:
- aliyun
- aws
- azure
- azurestack
- gcp
- ibmcloud
- live
- nutanix
- metal
- metal4k
- openstack
- vmware

# remove to disable S3 uploads
s3_bucket: fcos-builds
# s3_bucket: rhcos-ci
# path: releases/4.11-devel

registry_repos:
oscontainer: quay.io/fedora/fedora-coreos
Expand Down
2 changes: 1 addition & 1 deletion jenkins/config/seed.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:

node {
// XXX: hack, should put this in coreos-ci-lib
sh("curl -LO https://raw.githubusercontent.com/coreos/fedora-coreos-pipeline/main/utils.groovy")
sh("curl -LO https://raw.githubusercontent.com/travier/fedora-coreos-pipeline/rhcos/utils.groovy")
def pipeutils = load("utils.groovy")
def jenkinscfg = pipeutils.load_jenkins_config()
def url = jenkinscfg["jenkins-jobs-url"]
Expand Down
107 changes: 61 additions & 46 deletions jobs/build.Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ properties([
booleanParam(name: 'FORCE',
defaultValue: false,
description: 'Whether to force a rebuild'),
// Never use minimal for now
booleanParam(name: 'MINIMAL',
defaultValue: (official ? false : true),
defaultValue: false,
description: 'Whether to only build the OSTree and qemu images'),
booleanParam(name: 'ALLOW_KOLA_UPGRADE_FAILURE',
defaultValue: false,
Expand All @@ -48,7 +49,7 @@ properties([
description: 'Force AWS AMI replication for non-production'),
string(name: 'COREOS_ASSEMBLER_IMAGE',
description: 'Override coreos-assembler image to use',
defaultValue: "coreos-assembler:main",
defaultValue: "",
trim: true),
booleanParam(name: 'KOLA_RUN_SLEEP',
defaultValue: false,
Expand All @@ -75,7 +76,7 @@ def strict_build_param = stream_info.type == "mechanical" ? "" : "--strict"
// here; we can look into making them configurable through the template if
// developers really need to tweak them (note that in the default minimal devel
// workflow, only the qemu image is built).
def cosa_memory_request_mb = 6.5 * 1024 as Integer
def cosa_memory_request_mb = 10 * 1024 as Integer

// Now that we've established the memory constraint based on xz above, derive
// kola parallelism from that. We leave 512M for overhead and VMs are 1G each
Expand Down Expand Up @@ -139,7 +140,6 @@ lock(resource: "build-${params.STREAM}") {
mkdir -p \$(dirname ${cache_img})
ln -s ${cache_img} cache/cache.qcow2
""")

// If the cache img is larger than 7G, then nuke it. Otherwise
// it'll just keep growing and we'll hit ENOSPC. It'll get rebuilt.
shwrap("""
Expand Down Expand Up @@ -321,55 +321,68 @@ lock(resource: "build-${params.STREAM}") {

// Kola QEMU tests
parallelruns['Kola:QEMU'] = {
// remove 1 for upgrade test
def n = ncpus - 1
shwrap("""
cosa kola run --rerun --parallel ${n} --no-test-exit-error --denylist-test basic --tag '!reprovision'
cosa shell -- tar -c --xz tmp/kola/ > kola-run.tar.xz
cosa shell -- cat tmp/kola/reports/report.json > report.json
""")
archiveArtifacts "kola-run.tar.xz"
if (!pipeutils.checkKolaSuccess("report.json")) {
error('Kola:QEMU')
}
shwrap("""
cosa shell -- rm -rf tmp/kola
cosa kola run --rerun --no-test-exit-error --tag reprovision
cosa shell -- tar -c --xz tmp/kola/ > kola-run-reprovision.tar.xz
cosa shell -- cat tmp/kola/reports/report.json > report.json
""")
archiveArtifacts "kola-run-reprovision.tar.xz"
if (!pipeutils.checkKolaSuccess("report.json")) {
error('Kola:QEMU')
}
}

// Kola QEMU Upgrade tests
parallelruns['Kola:QEMU Upgrade'] = {
// If upgrades are broken `cosa kola --upgrades` might
// fail to even find the previous image so we wrap this
// in a try/catch so ALLOW_KOLA_UPGRADE_FAILURE can work.
try {
if (params.STREAM == "master") {
// remove 1 for upgrade test
def n = ncpus - 1
shwrap("""
cosa kola --rerun --upgrades --no-test-exit-error
cosa shell -- tar -c --xz tmp/kola-upgrade/ > kola-run-upgrade.tar.xz
cosa shell -- cat tmp/kola-upgrade/reports/report.json > report.json
cosa kola run --rerun --parallel ${n} --no-test-exit-error --denylist-test basic --tag '!reprovision'
cosa shell -- tar -c --xz tmp/kola/ > kola-run.tar.xz
cosa shell -- cat tmp/kola/reports/report.json > report.json
""")
archiveArtifacts "kola-run-upgrade.tar.xz"
archiveArtifacts "kola-run.tar.xz"
if (!pipeutils.checkKolaSuccess("report.json")) {
error('Kola:QEMU Upgrade')
error('Kola:QEMU')
}
} catch(e) {
if (params.ALLOW_KOLA_UPGRADE_FAILURE) {
warnError(message: 'Upgrade Failed') {
error(e.getMessage())
}
} else {
throw e
shwrap("""
cosa shell -- rm -rf tmp/kola
cosa kola run --rerun --no-test-exit-error --tag reprovision
cosa shell -- tar -c --xz tmp/kola/ > kola-run-reprovision.tar.xz
cosa shell -- cat tmp/kola/reports/report.json > report.json
""")
archiveArtifacts "kola-run-reprovision.tar.xz"
if (!pipeutils.checkKolaSuccess("report.json")) {
error('Kola:QEMU')
}
} else {
shwrap("""
cosa shell -- tar -c --xz tmp/kola/ > kola-run.tar.xz
cosa shell -- cat tmp/kola/reports/report.json > report.json
""")
archiveArtifacts "kola-run.tar.xz"
if (!pipeutils.checkKolaSuccess("report.json")) {
error('Kola:QEMU')
}
}
}

// XXX: hack: need to dig into RHCOS upgrade tests
//
//// Kola QEMU Upgrade tests
//parallelruns['Kola:QEMU Upgrade'] = {
// // If upgrades are broken `cosa kola --upgrades` might
// // fail to even find the previous image so we wrap this
// // in a try/catch so ALLOW_KOLA_UPGRADE_FAILURE can work.
// try {
// shwrap("""
// cosa kola --rerun --upgrades --no-test-exit-error
// cosa shell -- tar -c --xz tmp/kola-upgrade/ > kola-run-upgrade.tar.xz
// cosa shell -- cat tmp/kola-upgrade/reports/report.json > report.json
// """)
// archiveArtifacts "kola-run-upgrade.tar.xz"
// if (!pipeutils.checkKolaSuccess("report.json")) {
// error('Kola:QEMU Upgrade')
// }
// } catch(e) {
// if (params.ALLOW_KOLA_UPGRADE_FAILURE) {
// warnError(message: 'Upgrade Failed') {
// error(e.getMessage())
// }
// } else {
// throw e
// }
// }
//}

// process this batch
parallel parallelruns

Expand Down Expand Up @@ -450,7 +463,9 @@ lock(resource: "build-${params.STREAM}") {
shwrap("cosa kola testiso -S --output-dir tmp/kola-testiso-metal")
}
parallelruns['metal4k'] = {
shwrap("cosa kola testiso -SP --qemu-native-4k --qemu-multipath --output-dir tmp/kola-testiso-metal4k")
// just run the iso-install scenario to sanity-check the metal4k media
// and also use it to test multipath
shwrap("kola testiso -S --qemu-native-4k --qemu-multipath --scenarios iso-install --output-dir tmp/kola-testiso-metal4k")
}
parallelruns['uefi'] = {
shwrap("cosa shell -- mkdir -p tmp/kola-testiso-uefi")
Expand Down
40 changes: 20 additions & 20 deletions manifests/jenkins.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,18 @@ objects:
to:
kind: Service
name: ${JENKINS_SERVICE_NAME}
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ${JENKINS_SERVICE_NAME}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: ${VOLUME_CAPACITY}
# DELTA: support specifying storage class
storageClassName: "${STORAGE_CLASS_NAME}"
# - apiVersion: v1
# kind: PersistentVolumeClaim
# metadata:
# name: ${JENKINS_SERVICE_NAME}
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: ${VOLUME_CAPACITY}
# # DELTA: support specifying storage class
# storageClassName: "${STORAGE_CLASS_NAME}"
- apiVersion: v1
kind: DeploymentConfig
metadata:
Expand Down Expand Up @@ -128,20 +128,20 @@ objects:
privileged: false
terminationMessagePath: /dev/termination-log
volumeMounts:
- mountPath: /var/lib/jenkins
name: ${JENKINS_SERVICE_NAME}-data
# DELTA: mount c-as-c config map
# - mountPath: /var/lib/jenkins
# name: ${JENKINS_SERVICE_NAME}-data
# # DELTA: mount c-as-c config map
- name: ${JENKINS_SERVICE_NAME}-casc-cfg
mountPath: /var/lib/jenkins/configuration-as-code
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccountName: ${JENKINS_SERVICE_NAME}
volumes:
- name: ${JENKINS_SERVICE_NAME}-data
persistentVolumeClaim:
claimName: ${JENKINS_SERVICE_NAME}
# DELTA: add a configmap -- it's defined in pipeline.yaml
# volumes:
# - name: ${JENKINS_SERVICE_NAME}-data
# persistentVolumeClaim:
# claimName: ${JENKINS_SERVICE_NAME}
# # DELTA: add a configmap -- it's defined in pipeline.yaml
- name: ${JENKINS_SERVICE_NAME}-casc-cfg
configMap:
name: jenkins-casc-cfg
Expand Down
Loading