diff --git a/cmd/coreos-assembler.go b/cmd/coreos-assembler.go index d7f14a7976..2824864fa5 100644 --- a/cmd/coreos-assembler.go +++ b/cmd/coreos-assembler.go @@ -13,8 +13,8 @@ import ( // commands we'd expect to use in the local dev path var buildCommands = []string{"init", "fetch", "build", "run", "prune", "clean", "list"} -var advancedBuildCommands = []string{"buildfetch", "buildupload", "oc-adm-release", "push-container", "upload-oscontainer", "buildextend-extensions"} -var buildextendCommands = []string{"aliyun", "applehv", "aws", "azure", "digitalocean", "exoscale", "extensions", "extensions-container", "gcp", "hashlist-experimental", "hyperv", "ibmcloud", "kubevirt", "legacy-oscontainer", "live", "metal", "metal4k", "nutanix", "openstack", "qemu", "secex", "virtualbox", "vmware", "vultr"} +var advancedBuildCommands = []string{"buildfetch", "buildupload", "oc-adm-release", "push-container"} +var buildextendCommands = []string{"aliyun", "applehv", "aws", "azure", "digitalocean", "exoscale", "extensions-container", "gcp", "hashlist-experimental", "hyperv", "ibmcloud", "kubevirt", "live", "metal", "metal4k", "nutanix", "openstack", "qemu", "secex", "virtualbox", "vmware", "vultr"} var utilityCommands = []string{"aws-replicate", "compress", "copy-container", "koji-upload", "kola", "push-container-manifest", "remote-build-container", "remote-prune", "remote-session", "sign", "tag", "update-variant"} var otherCommands = []string{"shell", "meta"} diff --git a/docs/cosa.md b/docs/cosa.md index 8cabf8587e..c61ecc00f9 100644 --- a/docs/cosa.md +++ b/docs/cosa.md @@ -46,7 +46,6 @@ other platforms or cloud providers: | [buildextend-{aliyun,aws,azure,digitalocean,exoscale,gcp,vultr}](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-ore-wrapper) | Generate artifacts for the given platforms | [buildextend-{azurestack,ibmcloud,openstack,vmware}](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-artifact-disk) | Generate artifacts for the given platforms | [{aliyun,aws}-replicate](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-ore-wrapper) | Replicate images on the platforms (AMIs for AWS) -| [buildextend-legacy-oscontainer](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-buildextend-legacy-oscontainer) | Create an oscontainer in legacy format (i.e. not OSTree-native) ## Misc commands @@ -71,4 +70,3 @@ Those less commonly used commands are listed here: | [supermin-shell](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-supermin-shell) | Get a supermin shell | [tag](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-tag) | Operate on the tags in `builds.json` | [test-coreos-installer](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-test-coreos-installer) | Automate an end-to-end run of coreos-installer with the metal image -| [upload-oscontainer](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-upload-oscontainer) | Upload an oscontainer (historical wrapper for `cosa oscontainer`) diff --git a/src/buildextend-legacy-oscontainer.py b/src/buildextend-legacy-oscontainer.py deleted file mode 100755 index 92d28c8ee3..0000000000 --- a/src/buildextend-legacy-oscontainer.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/env python3 -# NOTE: PYTHONUNBUFFERED is set in the entrypoint for unbuffered output -# -# An "oscontainer" is an ostree (archive) repository stuck inside -# a Docker/OCI container at /srv/repo. For more information, -# see https://github.com/openshift/pivot -# -# This command manipulates those images. - -# This file was forked from src/oscontainer-deprecated-legacy-format.py and is -# now only used to build the legacy oscontainer via supermin. - -import gi - -gi.require_version('OSTree', '1.0') -gi.require_version('RpmOstree', '1.0') - -from gi.repository import GLib, Gio, OSTree, RpmOstree - -import argparse -import json -import os -import shutil -import subprocess -from cosalib import cmdlib -from cosalib.buildah import ( - buildah_base_args -) - -OSCONTAINER_COMMIT_LABEL = 'com.coreos.ostree-commit' - - -def run_get_json(args): - return json.loads(subprocess.check_output(args)) - - -def run_get_string(args): - return subprocess.check_output(args, encoding='UTF-8').strip() - - -def find_commit_from_oscontainer(repo): - """Given an ostree repo, find exactly one commit object in it""" - o = subprocess.check_output(['find', repo + '/objects', '-name', '*.commit'], encoding='UTF-8').strip().split('\n') - if len(o) > 1: - raise SystemExit(f"Multiple commit objects found in {repo}") - d, n = os.path.split(o[0]) - return os.path.basename(d) + n.split('.')[0] - - -# Given a container reference, pull the latest version, then extract the ostree -# repo a new directory dest/repo. -def oscontainer_extract(containers_storage, tmpdir, src, dest, - tls_verify=True, ref=None, cert_dir="", - authfile=""): - dest = os.path.realpath(dest) - subprocess.check_call(["ostree", "--repo=" + dest, "refs"]) - - # FIXME: Today we use skopeo in a hacky way for this. What we - # really want is the equivalent of `oc image extract` as part of - # podman or skopeo. - cmd = ['skopeo'] - # See similar message in oscontainer_build. - if tmpdir is not None: - os.environ['TMPDIR'] = tmpdir - - if not tls_verify: - cmd.append('--tls-verify=false') - - if authfile != "": - cmd.append("--authfile={}".format(authfile)) - if cert_dir != "": - cmd.append("--cert-dir={}".format(cert_dir)) - tmp_tarball = tmpdir + '/container.tar' - cmd += ['copy', "docker://" + src, 'docker-archive://' + tmp_tarball] - cmdlib.runcmd(cmd) - cmdlib.runcmd(['tar', 'xf', tmp_tarball], cwd=tmpdir) - os.unlink(tmp_tarball) - # This is a brutal hack to extract all the layers; we don't even bother with ordering - # because we know we're not removing anything in higher layers. - subprocess.check_call(['find', '-name', '*.tar', '-exec', 'tar', 'xUf', '{}', ';'], cwd=tmpdir) - # Some files/directories aren't writable, and this will cause permission errors - subprocess.check_call(['find', '!', '-perm', '-u+w', '-exec', 'chmod', 'u+w', '{}', ';'], cwd=tmpdir) - - repo = tmpdir + '/srv/repo' - commit = find_commit_from_oscontainer(repo) - print(f"commit: {commit}") - cmdlib.runcmd(["ostree", "--repo=" + dest, "pull-local", repo, commit]) - if ref is not None: - cmdlib.runcmd([ - "ostree", "--repo=" + dest, "refs", '--create=' + ref, commit]) - - -# Given an OSTree repository at src (and exactly one ref) generate an -# oscontainer with it. -def oscontainer_build(containers_storage, tmpdir, src, ref, image_name_and_tag, - base_image, tls_verify=True, pushformat=None, - add_directories=[], cert_dir="", authfile="", - output=None, display_name=None, labeled_pkgs=[]): - r = OSTree.Repo.new(Gio.File.new_for_path(src)) - r.open(None) - - [_, rev] = r.resolve_rev(ref, True) - if ref != rev: - print("Resolved {} = {}".format(ref, rev)) - [_, ostree_commit, _] = r.load_commit(rev) - ostree_commitmeta = ostree_commit.get_child_value(0) - versionv = ostree_commitmeta.lookup_value( - "version", GLib.VariantType.new("s")) - if versionv: - ostree_version = versionv.get_string() - else: - ostree_version = None - - buildah_base_argv = buildah_base_args(None) - - # In general, we just stick with the default tmpdir set up. But if a - # workdir is provided, then we want to be sure that all the heavy I/O work - # that happens stays in there since e.g. we might be inside a tiny supermin - # appliance. - if tmpdir is not None: - os.environ['TMPDIR'] = tmpdir - - bid = run_get_string(buildah_base_argv + ['from', base_image]) - mnt = run_get_string(buildah_base_argv + ['mount', bid]) - try: - dest_repo = os.path.join(mnt, 'srv/repo') - subprocess.check_call(['mkdir', '-p', dest_repo]) - subprocess.check_call([ - "ostree", "--repo=" + dest_repo, "init", "--mode=archive"]) - # Note that oscontainers don't have refs; we also disable fsync - # because the repo will be put into a container image and the build - # process should handle its own fsync (or choose not to). - print("Copying ostree commit into container: {} ...".format(rev)) - cmdlib.runcmd(["ostree", "--repo=" + dest_repo, "pull-local", "--disable-fsync", src, rev]) - - for d in add_directories: - with os.scandir(d) as it: - for entry in it: - dest = os.path.join(mnt, entry.name) - subprocess.check_call(['/usr/lib/coreos-assembler/cp-reflink', entry.path, dest]) - print(f"Copied in content from: {d}") - - # We use /noentry to trick `podman create` into not erroring out - # on a container with no cmd/entrypoint. It won't actually be run. - config = ['--entrypoint', '["/noentry"]', - '-l', OSCONTAINER_COMMIT_LABEL + '=' + rev] - if ostree_version is not None: - config += ['-l', 'version=' + ostree_version] - - base_pkgs = RpmOstree.db_query_all(r, rev, None) - for pkg in base_pkgs: - name = pkg.get_name() - if name in labeled_pkgs: - config += ['-l', f"com.coreos.rpm.{name}={pkg.get_evr()}.{pkg.get_arch()}"] - - # Generate pkglist.txt in to the oscontainer at / - pkg_list_dest = os.path.join(mnt, 'pkglist.txt') - # should already be sorted, but just re-sort to be sure - nevras = sorted([pkg.get_nevra() for pkg in base_pkgs]) - with open(pkg_list_dest, 'w') as f: - for nevra in nevras: - f.write(nevra) - f.write('\n') - - meta = {} - builddir = None - if os.path.isfile('builds/builds.json'): - with open('builds/builds.json') as fb: - builds = json.load(fb)['builds'] - latest_build = builds[0]['id'] - arch = cmdlib.get_basearch() - builddir = f"builds/{latest_build}/{arch}" - metapath = f"{builddir}/meta.json" - with open(metapath) as f: - meta = json.load(f) - rhcos_commit = meta['coreos-assembler.container-config-git']['commit'] - imagegit = meta.get('coreos-assembler.container-image-git') - if imagegit is not None: - cosa_commit = imagegit['commit'] - config += ['-l', f"com.coreos.coreos-assembler-commit={cosa_commit}"] - config += ['-l', f"com.coreos.redhat-coreos-commit={rhcos_commit}"] - - if 'extensions' in meta: - tarball = os.path.abspath(os.path.join(builddir, meta['extensions']['path'])) - dest_dir = os.path.join(mnt, 'extensions') - os.makedirs(dest_dir, exist_ok=True) - cmdlib.runcmd(["tar", "-xf", tarball, "--no-same-owner"], cwd=dest_dir) - - with open(os.path.join(dest_dir, 'extensions.json')) as f: - extensions = json.load(f) - - extensions_label = ';'.join([ext for (ext, obj) in extensions['extensions'].items() - if obj.get('kind', 'os-extension') == 'os-extension']) - config += ['-l', f"com.coreos.os-extensions={extensions_label}"] - - for pkgname in meta['extensions']['manifest']: - if pkgname in labeled_pkgs: - evra = meta['extensions']['manifest'][pkgname] - config += ['-l', f"com.coreos.rpm.{pkgname}={evra}"] - - if display_name is not None: - config += ['-l', 'io.openshift.build.version-display-names=machine-os=' + display_name, - '-l', 'io.openshift.build.versions=machine-os=' + ostree_version] - cmdlib.runcmd(buildah_base_argv + ['config'] + config + [bid]) - print("Committing container...") - iid = run_get_string(buildah_base_argv + ['commit', bid, image_name_and_tag]) - print("{} {}".format(image_name_and_tag, iid)) - finally: - subprocess.call(buildah_base_argv + ['umount', bid], stdout=subprocess.DEVNULL) - subprocess.call(buildah_base_argv + ['rm', bid], stdout=subprocess.DEVNULL) - - print("Saving container to oci-archive") - podCmd = buildah_base_argv + ['push'] - - if pushformat is not None: - podCmd.append(f'--format={pushformat}') - - # Historically upload-oscontainer would require --name which was in our - # pipeline a repository URL. Going forward buildextend-legacy-oscontainer - # just creates an oci-archive and a url is not a valid name/tag combination. - if '/' in image_name_and_tag: - image_name_and_tag = image_name_and_tag.rsplit('/', 1)[1] - - podCmd.append(image_name_and_tag) - - podCmd.append(f'oci-archive:{output}') - - cmdlib.runcmd(podCmd) - - -def main(): - # Parse args and dispatch - parser = argparse.ArgumentParser() - parser.add_argument("--workdir", help="Temporary working directory") - parser.add_argument("--disable-tls-verify", - help="Disable TLS for pushes and pulls", - default=(True if os.environ.get("DISABLE_TLS_VERIFICATION", False) else False), - action="store_true") - parser.add_argument("--cert-dir", help="Extra certificate directories", - default=os.environ.get("OSCONTAINER_CERT_DIR", '')) - parser.add_argument("--authfile", help="Path to authentication file", - action="store", - default=os.environ.get("REGISTRY_AUTH_FILE", '')) - subparsers = parser.add_subparsers(dest='action') - parser_extract = subparsers.add_parser( - 'extract', help='Extract an oscontainer') - parser_extract.add_argument("src", help="Image reference") - parser_extract.add_argument("dest", help="Destination directory") - parser_extract.add_argument("--ref", help="Also set an ostree ref") - parser_build = subparsers.add_parser('build', help='Build an oscontainer') - parser_build.add_argument( - "--from", - help="Base image (default 'scratch')", - default='scratch') - parser_build.add_argument("src", help="OSTree repository") - parser_build.add_argument("rev", help="OSTree ref (or revision)") - parser_build.add_argument("name", help="Image name") - parser_build.add_argument("--display-name", help="Name used for an OpenShift component") - parser_build.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", - metavar='DIR', action='append', default=[]) - parser_build.add_argument("--labeled-packages", help="Packages whose NEVRAs are included as labels on the image") - # For now we forcibly override to v2s2 https://bugzilla.redhat.com/show_bug.cgi?id=2058421 - parser_build.add_argument("--format", help="Pass through push format to buildah", default="v2s2") - parser.add_argument("--output", required=True, help="Write image as OCI archive to file") - args = parser.parse_args() - - labeled_pkgs = [] - if args.labeled_packages is not None: - labeled_pkgs = args.labeled_packages.split() - - containers_storage = None - tmpdir = None - if args.workdir is not None: - containers_storage = os.path.join(args.workdir, 'containers-storage') - if os.path.exists(containers_storage): - shutil.rmtree(containers_storage) - tmpdir = os.path.join(args.workdir, 'tmp') - if os.path.exists(tmpdir): - shutil.rmtree(tmpdir) - os.makedirs(tmpdir) - - try: - if args.action == 'extract': - oscontainer_extract( - containers_storage, tmpdir, args.src, args.dest, - tls_verify=not args.disable_tls_verify, - cert_dir=args.cert_dir, - ref=args.ref, - authfile=args.authfile) - elif args.action == 'build': - oscontainer_build( - containers_storage, tmpdir, args.src, args.rev, args.name, - getattr(args, 'from'), - display_name=args.display_name, - output=args.output, - add_directories=args.add_directory, - pushformat=args.format, - tls_verify=not args.disable_tls_verify, - cert_dir=args.cert_dir, - authfile=args.authfile, - labeled_pkgs=labeled_pkgs) - finally: - if containers_storage is not None and os.path.isdir(containers_storage): - shutil.rmtree(containers_storage) - - -if __name__ == '__main__': - main() diff --git a/src/buildextend-legacy-oscontainer.sh b/src/buildextend-legacy-oscontainer.sh deleted file mode 100755 index a1822cc898..0000000000 --- a/src/buildextend-legacy-oscontainer.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail -# shellcheck source=src/cmdlib.sh -. /usr/lib/coreos-assembler/cmdlib.sh - -# Start VM and call buildah -final_outfile=$(realpath "$1"); shift -IMAGE_TYPE=legacy-oscontainer -prepare_build -tmp_outfile=${tmp_builddir}/legacy-oscontainer.ociarchive -runvm -- /usr/lib/coreos-assembler/buildextend-legacy-oscontainer.py \ - --output "${tmp_outfile}" "$@" -/usr/lib/coreos-assembler/finalize-artifact "${tmp_outfile}" "${final_outfile}" diff --git a/src/cmd-buildextend-extensions b/src/cmd-buildextend-extensions deleted file mode 100755 index 8764a1fb83..0000000000 --- a/src/cmd-buildextend-extensions +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/python3 -u - -import argparse -import json -import os -import shutil -import sys - -import createrepo_c as cr - -sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) - -from cosalib import cmdlib -from cosalib.builds import Builds -from cosalib.meta import GenericBuildMeta - - -def main(): - args = parse_args() - - workdir = os.path.abspath(os.getcwd()) - - builds = Builds() - builddir = builds.get_build_dir(args.build) - buildmeta = GenericBuildMeta(workdir=workdir, build=args.build) - - if 'extensions' in buildmeta and not args.force: - print(f"Extensions already exist: {buildmeta['extensions']['path']}") - print("Use --force to force a rebuild") - return - - init_config = 'src/config.json' - if os.path.exists(init_config): - with open(init_config, encoding='utf-8') as f: - init_cfg = json.loads(f.read()) - variant = init_cfg["coreos-assembler.config-variant"] - treefile_src = f"src/config/manifest-{variant}.yaml" - extensions_src = f"src/config/extensions-{variant}.yaml" - else: - treefile_src = 'src/config/manifest.yaml' - extensions_src = 'src/config/extensions.yaml' - - if not os.path.exists(extensions_src): - raise Exception(f"Missing {extensions_src}") - - commit = buildmeta['ostree-commit'] - cmdlib.import_ostree_commit(workdir, builddir, buildmeta) - - tmpworkdir = prepare_tmpworkdir() - changed = run_rpmostree(tmpworkdir, commit, treefile_src, extensions_src) - if not changed: - # For now, rpm-ostree will always detect a change because we don't seed - # state from the previous build, so we won't hit this. Need to rework - # how change detection is wired in `cmd-build` to do this properly. - return - - outputdir = f"{tmpworkdir}/output" - with open(f'{outputdir}/.rpm-ostree-state-chksum', encoding='utf-8') as f: - rpm_ostree_state_chksum = f.read() - - pkglist = create_yumrepo(outputdir) - extensions_tarball = create_tarball(buildmeta, outputdir, tmpworkdir) - extensions_tarball_base = os.path.basename(extensions_tarball) - - buildmeta['extensions'] = { - "path": extensions_tarball_base, - "sha256": cmdlib.sha256sum_file(extensions_tarball), - "rpm-ostree-state": rpm_ostree_state_chksum, - "manifest": pkglist, - } - - cmdlib.rm_allow_noent(f'{builddir}/{extensions_tarball_base}') - shutil.move(extensions_tarball, builddir) - buildmeta.write(artifact_name='extensions') - - shutil.rmtree(tmpworkdir) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("--build", help="Build ID", default='latest') - parser.add_argument("--force", help="Force rebuild", action='store_true') - return parser.parse_args() - - -def prepare_tmpworkdir(): - tmpworkdir = 'tmp/extensions' - if os.path.exists(tmpworkdir): - shutil.rmtree(tmpworkdir) - os.mkdir(tmpworkdir) - configdir = 'src/config' - for f in os.listdir(configdir): - if os.path.isfile(f"{configdir}/{f}") and f.endswith('.repo'): - shutil.copyfile(f"{configdir}/{f}", f"{tmpworkdir}/{f}") - yumreposdir = 'src/yumrepos' - if os.path.exists(yumreposdir): - for f in os.listdir(yumreposdir): - if os.path.isfile(f"{yumreposdir}/{f}") and f.endswith('.repo'): - shutil.copyfile(f"{yumreposdir}/{f}", f"{tmpworkdir}/{f}") - return tmpworkdir - - -def run_rpmostree(workdir, commit, treefile, extensions): - cmdlib.cmdlib_sh(f''' - cat > "{workdir}/manifest-override.yaml" < 0: - epoch = f'{pkg.epoch}:' - pkglist[pkg.name] = f'{epoch}{pkg.version}-{pkg.release}.{pkg.arch}' - - for record in repomd.records: - if record.type == 'primary': - primary_xml = os.path.join(repodir, record.location_href) - cr.xml_parse_primary(primary_xml, do_files=False, pkgcb=cb) - break - - if len(pkglist) == 0: - raise Exception("No RPMs found in output dir") - return pkglist - - -def create_tarball(buildmeta, srcdir, destdir): - destdir = os.path.abspath(destdir) - basearch = buildmeta['coreos-assembler.basearch'] - tarfile = f'{destdir}/{buildmeta["name"]}-{buildmeta["buildid"]}-extensions.{basearch}.tar' - cmdlib.runcmd(['tar', '-cf', tarfile, '.'], cwd=srcdir) - return tarfile - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/src/cmd-buildextend-legacy-oscontainer b/src/cmd-buildextend-legacy-oscontainer deleted file mode 100755 index c59921a6b8..0000000000 --- a/src/cmd-buildextend-legacy-oscontainer +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/python3 -u -# Upload an oscontainer. This is a wrapper for -# `cosa oscontainer` that just for historical reasons -# used to live downstream in the redhat-coreos pipeline. -# In the future we should just have one `cosa oscontainer` -# command. - -import argparse -import yaml -import os -import shutil -import subprocess -import sys -from cosalib.cmdlib import sha256sum_file - -cosa_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, cosa_dir) - -from cosalib import cmdlib -from cosalib.builds import Builds -from cosalib.meta import GenericBuildMeta - - -parser = argparse.ArgumentParser() -parser.add_argument("--arch-tag", help="append arch name to push tag", - action='store_true') -parser.add_argument("--from", help="Base image", default='scratch', - dest='from_image') -parser.add_argument("--format", help="Format to use for push") -parser.add_argument("--build", help="Build ID") -parser.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", - metavar='DIR', action='append', default=[]) - -args = parser.parse_args() - -builds = Builds() -if not args.build: - args.build = builds.get_latest() -arch = cmdlib.get_basearch() -build_path = f"builds/{args.build}/{arch}" - -meta = GenericBuildMeta(build=args.build) - -name = meta['name'] + '-' + meta['buildid'] + '-legacy-oscontainer.' + arch + '.ociarchive' - -# for backcompat, we auto-build extensions if they're missing -if os.path.exists('src/config/extensions.yaml'): - if 'extensions' not in meta: - cmdlib.runcmd(['coreos-assembler', 'buildextend-extensions']) - meta = GenericBuildMeta(build=args.build) - assert 'extensions' in meta - -configdir = os.path.abspath('src/config') -oscconfigpath = f'{configdir}/oscontainer.yaml' -# XXX: fold oscontainer.yaml handling into oscontainer.py -configyaml = {} -if os.path.exists(oscconfigpath): - with open(oscconfigpath) as f: - configyaml = yaml.safe_load(f) - -if 'base' in configyaml: - args.from_image = configyaml['base'] - -print(f"Building legacy-oscontainer for build {args.build}") -ostree_commit = meta['ostree-commit'] - -tmprepo = "{}/tmp/repo".format(os.getcwd()) -# if tmprepo is not a directory, but is unexpectedly a file, -# just nuke it -if not os.path.isdir(tmprepo) and os.path.exists(tmprepo): - os.remove(tmprepo) - -# if tmprepo is not a directory and not a file, recreate from -# the tarfile -if not os.path.exists(tmprepo): - os.makedirs(tmprepo, exist_ok=True) - ostree_commit_tar = meta['images']['ostree']['path'] - subprocess.check_call(['tar', '-xf', - f'{build_path}/{ostree_commit_tar}', - '-C', tmprepo]) - -tmp_osreleasedir = 'tmp/usrlib-osrelease' -subprocess.check_call(['rm', '-rf', tmp_osreleasedir]) -cmdlib.runcmd(['/usr/bin/ostree', 'checkout', '--repo', tmprepo, - '--user-mode', '--subpath=/usr/lib/os-release', ostree_commit, - tmp_osreleasedir]) -display_name = None -with open(os.path.join(tmp_osreleasedir, "os-release")) as f: - display_name = subprocess.check_output(['/bin/sh', '-c', 'set -euo pipefail; . /proc/self/fd/0 && echo $NAME'], stdin=f, encoding='UTF-8').strip() -if display_name == "": - raise SystemExit(f"Failed to find NAME= in /usr/lib/os-release in commit {ostree_commit}") -shutil.rmtree(tmp_osreleasedir) - -osc_name_and_tag = f"{name}:{args.build}" -if args.arch_tag: - arch = meta.get("coreos-assembler.basearch", cmdlib.get_basearch) - osc_name_and_tag = f"{name}:{args.build}-{arch}" - -# TODO: Use labels for the build hash and avoid pulling the oscontainer -# every time we want to poll. -# TODO: Remove --from -print("Entering vm to build oscontainer for build: {}".format(args.build)) - -oci_archive = f"{build_path}/{name}" -cosa_argv = (['/usr/lib/coreos-assembler/buildextend-legacy-oscontainer.sh', oci_archive, 'build', f'--from={args.from_image}']) -for d in args.add_directory: - cosa_argv.append(f'--add-directory="{d}"') -cosa_argv.append(f'--display-name={display_name}') -if 'labeled-packages' in configyaml: - pkgs = ' '.join(configyaml['labeled-packages']) - cosa_argv.append(f'--labeled-packages="{pkgs}"') -if args.format is not None: - cosa_argv.append(f'--format={args.format}') -subprocess.check_call(cosa_argv + [tmprepo, meta['ostree-commit'], osc_name_and_tag]) - -# Inject the oscontainer with SHA256 into the build metadata -meta['images']['legacy-oscontainer'] = {'path': name, - 'sha256': sha256sum_file(oci_archive), - 'size': os.path.getsize(oci_archive), - "skip-compression": True} -meta.write(artifact_name='legacy-oscontainer') -print("Updated meta.json") diff --git a/src/cmd-push-container b/src/cmd-push-container index ba1ddec687..6a6d3429fa 100755 --- a/src/cmd-push-container +++ b/src/cmd-push-container @@ -1,8 +1,5 @@ #!/usr/bin/python3 -u -# Upload the container to a registry. Note this -# is distinct from `upload-oscontainer` which -# only applies to (hopefully soon only older) -# versions of RHCOS but not FCOS. +# Upload the container to a registry. import argparse import json diff --git a/src/cmd-upload-oscontainer b/src/cmd-upload-oscontainer deleted file mode 120000 index c494cf5236..0000000000 --- a/src/cmd-upload-oscontainer +++ /dev/null @@ -1 +0,0 @@ -cmd-upload-oscontainer-deprecated-legacy-format \ No newline at end of file diff --git a/src/cmd-upload-oscontainer-deprecated-legacy-format b/src/cmd-upload-oscontainer-deprecated-legacy-format deleted file mode 100755 index 98e90c3f95..0000000000 --- a/src/cmd-upload-oscontainer-deprecated-legacy-format +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/python3 -u -# Upload an oscontainer. This is a wrapper for -# `cosa oscontainer` that just for historical reasons -# used to live downstream in the redhat-coreos pipeline. -# In the future we should just have one `cosa oscontainer` -# command. - -import argparse -import json -import yaml -import os -import shutil -import subprocess -import sys - -cosa_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, cosa_dir) - -from cosalib import cmdlib - -parser = argparse.ArgumentParser() -parser.add_argument("--arch-tag", help="append arch name to push tag", - action='store_true') -parser.add_argument("--name", help="oscontainer name", - action='store', required=True) -parser.add_argument("--from", help="Base image", default='scratch', - dest='from_image') -parser.add_argument("--format", help="Format to use for push") -parser.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", - metavar='DIR', action='append', default=[]) - -args = parser.parse_args() - -with open('builds/builds.json') as f: - builds = json.load(f)['builds'] -if len(builds) == 0: - cmdlib.fatal("No builds found") -latest_build = builds[0]['id'] -arch = cmdlib.get_basearch() -latest_build_path = f"builds/{latest_build}/{arch}" - -metapath = f"{latest_build_path}/meta.json" -with open(metapath) as f: - meta = json.load(f) - -# for backcompat, we auto-build extensions if they're missing -if os.path.exists('src/config/extensions.yaml'): - if 'extensions' not in meta: - cmdlib.runcmd(['coreos-assembler', 'buildextend-extensions']) - with open(metapath) as f: - meta = json.load(f) - assert 'extensions' in meta - -configdir = os.path.abspath('src/config') -oscconfigpath = f'{configdir}/oscontainer.yaml' -# XXX: fold oscontainer.yaml handling into oscontainer.py -configyaml = {} -if os.path.exists(oscconfigpath): - with open(oscconfigpath) as f: - configyaml = yaml.safe_load(f) - -if 'base' in configyaml: - args.from_image = configyaml['base'] - -print("Preparing to upload oscontainer for build: {}".format(latest_build)) -ostree_commit = meta['ostree-commit'] - -tmprepo = "{}/tmp/repo".format(os.getcwd()) -# if tmprepo is not a directory, but is unexpectedly a file, -# just nuke it -if not os.path.isdir(tmprepo) and os.path.exists(tmprepo): - os.remove(tmprepo) - -# if tmprepo is not a directory and not a file, recreate from -# the tarfile -if not os.path.exists(tmprepo): - os.makedirs(tmprepo, exist_ok=True) - ostree_commit_tar = meta['images']['ostree']['path'] - subprocess.check_call(['tar', '-xf', - f'{latest_build_path}/{ostree_commit_tar}', - '-C', tmprepo]) - -tmp_osreleasedir = 'tmp/usrlib-osrelease' -subprocess.check_call(['rm', '-rf', tmp_osreleasedir]) -cmdlib.runcmd(['/usr/bin/ostree', 'checkout', '--repo', tmprepo, - '--user-mode', '--subpath=/usr/lib/os-release', ostree_commit, - tmp_osreleasedir]) -display_name = None -with open(os.path.join(tmp_osreleasedir, "os-release")) as f: - display_name = subprocess.check_output(['/bin/sh', '-c', 'set -euo pipefail; . /proc/self/fd/0 && echo $NAME'], stdin=f, encoding='UTF-8').strip() -if display_name == "": - raise SystemExit(f"Failed to find NAME= in /usr/lib/os-release in commit {ostree_commit}") -shutil.rmtree(tmp_osreleasedir) - -osc_name_and_tag = f"{args.name}:{latest_build}" -if args.arch_tag: - arch = meta.get("coreos-assembler.basearch", cmdlib.get_basearch) - osc_name_and_tag = f"{args.name}:{latest_build}-{arch}" - -# TODO: Use labels for the build hash and avoid pulling the oscontainer -# every time we want to poll. -# TODO: Remove --from -digestfile = "tmp/oscontainer-digest" -# We need to pass the auth file from the unpriv user to the root process -cosa_argv = ['sudo', '--preserve-env=container,DISABLE_TLS_VERIFICATION,SSL_CERT_DIR,SSL_CERT_FILE,REGISTRY_AUTH_FILE,OSCONTAINER_CERT_DIR'] -authfile = os.environ.get("REGISTRY_AUTH_FILE", os.path.expanduser('~/.docker/config.json')) -if not os.path.isfile(authfile): - raise SystemExit(f"Missing {authfile}") -os.environ['REGISTRY_AUTH_FILE'] = authfile -cosa_argv.extend(['/usr/lib/coreos-assembler/oscontainer.py', '--workdir=./tmp', 'build', f"--from={args.from_image}"]) -for d in args.add_directory: - cosa_argv.append(f"--add-directory={d}") -cosa_argv.append(f"--display-name={display_name}") -if 'labeled-packages' in configyaml: - pkgs = ' '.join(configyaml['labeled-packages']) - cosa_argv.append(f"--labeled-packages={pkgs}") -if args.format is not None: - cosa_argv.append(f'--format={args.format}') -subprocess.check_call(cosa_argv + - [f'--digestfile={digestfile}', - '--push', tmprepo, - meta['ostree-commit'], - osc_name_and_tag]) - -with open(digestfile) as f: - osc_digest = f.read().strip() - -# Inject the oscontainer with SHA256 into the build metadata -meta['oscontainer'] = {'image': args.name, - 'digest': osc_digest} -metapath_new = f"{metapath}.new" -with open(metapath_new, 'w') as f: - json.dump(meta, f, sort_keys=True) -shutil.move(metapath_new, metapath) diff --git a/src/oscontainer-deprecated-legacy-format.py b/src/oscontainer-deprecated-legacy-format.py deleted file mode 100755 index bcb56ee0c8..0000000000 --- a/src/oscontainer-deprecated-legacy-format.py +++ /dev/null @@ -1,325 +0,0 @@ -#!/usr/bin/env python3 -# NOTE: PYTHONUNBUFFERED is set in the entrypoint for unbuffered output -# -# An "oscontainer" is an ostree (archive) repository stuck inside -# a Docker/OCI container at /srv/repo. For more information, -# see https://github.com/openshift/pivot -# -# This command manipulates those images. - -import gi - -gi.require_version('OSTree', '1.0') -gi.require_version('RpmOstree', '1.0') - -from gi.repository import GLib, Gio, OSTree, RpmOstree - -import argparse -import json -import os -import shutil -import subprocess -from cosalib import cmdlib -from cosalib.buildah import ( - buildah_base_args -) - -OSCONTAINER_COMMIT_LABEL = 'com.coreos.ostree-commit' - - -def run_get_json(args): - return json.loads(subprocess.check_output(args)) - - -def run_get_string(args): - return subprocess.check_output(args, encoding='UTF-8').strip() - - -def find_commit_from_oscontainer(repo): - """Given an ostree repo, find exactly one commit object in it""" - o = subprocess.check_output(['find', repo + '/objects', '-name', '*.commit'], encoding='UTF-8').strip().split('\n') - if len(o) > 1: - raise SystemExit(f"Multiple commit objects found in {repo}") - d, n = os.path.split(o[0]) - return os.path.basename(d) + n.split('.')[0] - - -# Given a container reference, pull the latest version, then extract the ostree -# repo a new directory dest/repo. -def oscontainer_extract(containers_storage, tmpdir, src, dest, - tls_verify=True, ref=None, cert_dir="", - authfile=""): - dest = os.path.realpath(dest) - subprocess.check_call(["ostree", "--repo=" + dest, "refs"]) - - # FIXME: Today we use skopeo in a hacky way for this. What we - # really want is the equivalent of `oc image extract` as part of - # podman or skopeo. - cmd = ['skopeo'] - # See similar message in oscontainer_build. - if tmpdir is not None: - os.environ['TMPDIR'] = tmpdir - - if not tls_verify: - cmd.append('--tls-verify=false') - - if authfile != "": - cmd.append("--authfile={}".format(authfile)) - if cert_dir != "": - cmd.append("--cert-dir={}".format(cert_dir)) - tmp_tarball = tmpdir + '/container.tar' - cmd += ['copy', "docker://" + src, 'docker-archive://' + tmp_tarball] - cmdlib.runcmd(cmd) - cmdlib.runcmd(['tar', 'xf', tmp_tarball], cwd=tmpdir) - os.unlink(tmp_tarball) - # This is a brutal hack to extract all the layers; we don't even bother with ordering - # because we know we're not removing anything in higher layers. - subprocess.check_call(['find', '-name', '*.tar', '-exec', 'tar', 'xUf', '{}', ';'], cwd=tmpdir) - # Some files/directories aren't writable, and this will cause permission errors - subprocess.check_call(['find', '!', '-perm', '-u+w', '-exec', 'chmod', 'u+w', '{}', ';'], cwd=tmpdir) - - repo = tmpdir + '/srv/repo' - commit = find_commit_from_oscontainer(repo) - print(f"commit: {commit}") - cmdlib.runcmd(["ostree", "--repo=" + dest, "pull-local", repo, commit]) - if ref is not None: - cmdlib.runcmd([ - "ostree", "--repo=" + dest, "refs", '--create=' + ref, commit]) - - -# Given an OSTree repository at src (and exactly one ref) generate an -# oscontainer with it. -def oscontainer_build(containers_storage, tmpdir, src, ref, image_name_and_tag, - base_image, push=False, tls_verify=True, pushformat=None, - add_directories=[], cert_dir="", authfile="", digestfile=None, - display_name=None, labeled_pkgs=[]): - r = OSTree.Repo.new(Gio.File.new_for_path(src)) - r.open(None) - - [_, rev] = r.resolve_rev(ref, True) - if ref != rev: - print("Resolved {} = {}".format(ref, rev)) - [_, ostree_commit, _] = r.load_commit(rev) - ostree_commitmeta = ostree_commit.get_child_value(0) - versionv = ostree_commitmeta.lookup_value( - "version", GLib.VariantType.new("s")) - if versionv: - ostree_version = versionv.get_string() - else: - ostree_version = None - - buildah_base_argv = buildah_base_args(containers_storage) - - # In general, we just stick with the default tmpdir set up. But if a - # workdir is provided, then we want to be sure that all the heavy I/O work - # that happens stays in there since e.g. we might be inside a tiny supermin - # appliance. - if tmpdir is not None: - os.environ['TMPDIR'] = tmpdir - - bid = run_get_string(buildah_base_argv + ['from', base_image]) - mnt = run_get_string(buildah_base_argv + ['mount', bid]) - try: - dest_repo = os.path.join(mnt, 'srv/repo') - subprocess.check_call(['mkdir', '-p', dest_repo]) - subprocess.check_call([ - "ostree", "--repo=" + dest_repo, "init", "--mode=archive"]) - # Note that oscontainers don't have refs; we also disable fsync - # because the repo will be put into a container image and the build - # process should handle its own fsync (or choose not to). - print("Copying ostree commit into container: {} ...".format(rev)) - cmdlib.runcmd(["ostree", "--repo=" + dest_repo, "pull-local", "--disable-fsync", src, rev]) - - for d in add_directories: - with os.scandir(d) as it: - for entry in it: - dest = os.path.join(mnt, entry.name) - subprocess.check_call(['/usr/lib/coreos-assembler/cp-reflink', entry.path, dest]) - print(f"Copied in content from: {d}") - - # We use /noentry to trick `podman create` into not erroring out - # on a container with no cmd/entrypoint. It won't actually be run. - config = ['--entrypoint', '["/noentry"]', - '-l', OSCONTAINER_COMMIT_LABEL + '=' + rev] - if ostree_version is not None: - config += ['-l', 'version=' + ostree_version] - - base_pkgs = RpmOstree.db_query_all(r, rev, None) - for pkg in base_pkgs: - name = pkg.get_name() - if name in labeled_pkgs: - config += ['-l', f"com.coreos.rpm.{name}={pkg.get_evr()}.{pkg.get_arch()}"] - - # Generate pkglist.txt in to the oscontainer at / - pkg_list_dest = os.path.join(mnt, 'pkglist.txt') - # should already be sorted, but just re-sort to be sure - nevras = sorted([pkg.get_nevra() for pkg in base_pkgs]) - with open(pkg_list_dest, 'w') as f: - for nevra in nevras: - f.write(nevra) - f.write('\n') - - meta = {} - builddir = None - if os.path.isfile('builds/builds.json'): - with open('builds/builds.json') as fb: - builds = json.load(fb)['builds'] - latest_build = builds[0]['id'] - arch = cmdlib.get_basearch() - builddir = f"builds/{latest_build}/{arch}" - metapath = f"{builddir}/meta.json" - with open(metapath) as f: - meta = json.load(f) - rhcos_commit = meta['coreos-assembler.container-config-git']['commit'] - imagegit = meta.get('coreos-assembler.container-image-git') - if imagegit is not None: - cosa_commit = imagegit['commit'] - config += ['-l', f"com.coreos.coreos-assembler-commit={cosa_commit}"] - config += ['-l', f"com.coreos.redhat-coreos-commit={rhcos_commit}"] - - if 'extensions' in meta: - tarball = os.path.abspath(os.path.join(builddir, meta['extensions']['path'])) - dest_dir = os.path.join(mnt, 'extensions') - os.makedirs(dest_dir, exist_ok=True) - cmdlib.runcmd(["tar", "-xf", tarball], cwd=dest_dir) - - with open(os.path.join(dest_dir, 'extensions.json')) as f: - extensions = json.load(f) - - extensions_label = ';'.join([ext for (ext, obj) in extensions['extensions'].items() - if obj.get('kind', 'os-extension') == 'os-extension']) - config += ['-l', f"com.coreos.os-extensions={extensions_label}"] - - for pkgname in meta['extensions']['manifest']: - if pkgname in labeled_pkgs: - evra = meta['extensions']['manifest'][pkgname] - config += ['-l', f"com.coreos.rpm.{pkgname}={evra}"] - - if display_name is not None: - config += ['-l', 'io.openshift.build.version-display-names=machine-os=' + display_name, - '-l', 'io.openshift.build.versions=machine-os=' + ostree_version] - cmdlib.runcmd(buildah_base_argv + ['config'] + config + [bid]) - print("Committing container...") - iid = run_get_string(buildah_base_argv + ['commit', bid, image_name_and_tag]) - print("{} {}".format(image_name_and_tag, iid)) - finally: - subprocess.call(buildah_base_argv + ['umount', bid], stdout=subprocess.DEVNULL) - subprocess.call(buildah_base_argv + ['rm', bid], stdout=subprocess.DEVNULL) - - if push: - print("Pushing container") - podCmd = buildah_base_argv + ['push'] - if not tls_verify: - tls_arg = '--tls-verify=false' - else: - tls_arg = '--tls-verify' - podCmd.append(tls_arg) - - if authfile != "": - podCmd.append("--authfile={}".format(authfile)) - - if cert_dir != "": - podCmd.append("--cert-dir={}".format(cert_dir)) - - if digestfile is not None: - podCmd.append(f'--digestfile={digestfile}') - - if pushformat is not None: - podCmd.append(f'--format={pushformat}') - - podCmd.append(image_name_and_tag) - - cmdlib.runcmd(podCmd) - elif digestfile is not None: - inspect = run_get_json(buildah_base_argv + ['inspect', image_name_and_tag])[0] - with open(digestfile, 'w') as f: - f.write(inspect['Digest']) - - -def main(): - # Parse args and dispatch - parser = argparse.ArgumentParser() - parser.add_argument("--workdir", help="Temporary working directory") - parser.add_argument("--disable-tls-verify", - help="Disable TLS for pushes and pulls", - default=(True if os.environ.get("DISABLE_TLS_VERIFICATION", False) else False), - action="store_true") - parser.add_argument("--cert-dir", help="Extra certificate directories", - default=os.environ.get("OSCONTAINER_CERT_DIR", '')) - parser.add_argument("--authfile", help="Path to authentication file", - action="store", - default=os.environ.get("REGISTRY_AUTH_FILE", '')) - subparsers = parser.add_subparsers(dest='action') - parser_extract = subparsers.add_parser( - 'extract', help='Extract an oscontainer') - parser_extract.add_argument("src", help="Image reference") - parser_extract.add_argument("dest", help="Destination directory") - parser_extract.add_argument("--ref", help="Also set an ostree ref") - parser_build = subparsers.add_parser('build', help='Build an oscontainer') - parser_build.add_argument( - "--from", - help="Base image (default 'scratch')", - default='scratch') - parser_build.add_argument("src", help="OSTree repository") - parser_build.add_argument("rev", help="OSTree ref (or revision)") - parser_build.add_argument("name", help="Image name") - parser_build.add_argument("--display-name", help="Name used for an OpenShift component") - parser_build.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", - metavar='DIR', action='append', default=[]) - parser_build.add_argument("--labeled-packages", help="Packages whose NEVRAs are included as labels on the image") - # For now we forcibly override to v2s2 https://bugzilla.redhat.com/show_bug.cgi?id=2058421 - parser_build.add_argument("--format", help="Pass through push format to buildah", default="v2s2") - parser_build.add_argument( - "--digestfile", - help="Write image digest to file", - action='store', - metavar='FILE') - parser_build.add_argument( - "--push", - help="Push to registry", - action='store_true') - args = parser.parse_args() - - labeled_pkgs = [] - if args.labeled_packages is not None: - labeled_pkgs = args.labeled_packages.split() - - containers_storage = None - tmpdir = None - if args.workdir is not None: - containers_storage = os.path.join(args.workdir, 'containers-storage') - if os.path.exists(containers_storage): - shutil.rmtree(containers_storage) - tmpdir = os.path.join(args.workdir, 'tmp') - if os.path.exists(tmpdir): - shutil.rmtree(tmpdir) - os.makedirs(tmpdir) - - try: - if args.action == 'extract': - oscontainer_extract( - containers_storage, tmpdir, args.src, args.dest, - tls_verify=not args.disable_tls_verify, - cert_dir=args.cert_dir, - ref=args.ref, - authfile=args.authfile) - elif args.action == 'build': - oscontainer_build( - containers_storage, tmpdir, args.src, args.rev, args.name, - getattr(args, 'from'), - display_name=args.display_name, - digestfile=args.digestfile, - add_directories=args.add_directory, - push=args.push, - pushformat=args.format, - tls_verify=not args.disable_tls_verify, - cert_dir=args.cert_dir, - authfile=args.authfile, - labeled_pkgs=labeled_pkgs) - finally: - if containers_storage is not None and os.path.isdir(containers_storage): - shutil.rmtree(containers_storage) - - -if __name__ == '__main__': - main() diff --git a/src/oscontainer.py b/src/oscontainer.py deleted file mode 120000 index 06a195ec0e..0000000000 --- a/src/oscontainer.py +++ /dev/null @@ -1 +0,0 @@ -oscontainer-deprecated-legacy-format.py \ No newline at end of file