Skip to content

Commit

Permalink
[kubevirt] Hackish minimal rhcos auto-detection
Browse files Browse the repository at this point in the history
Discover the rhcos container image from the release payload based on the
checksum. The openstack mirror and the release payload use different
names for the images. The checksum is used to cross-reference the
release payload version with the quay.io/containerdisks/rhcos version.

Check for containerDisks or DataVolumeTemplates which are named
`containervolume` and have in their container source `{rhcos:version}`
set and replace it with the discovered rhcos container version.

As an example: `quay.io/containerdisks/rhcos:{rhcos:version}` would ge
replaced be modified to `quay.io/containerdisks/rhcos:mytag`, if `mytag`
would be discovered in the release payload.

Signed-off-by: Roman Mohr <rmohr@redhat.com>
  • Loading branch information
rmohr committed Feb 10, 2022
1 parent 1f3f30b commit b9511a6
Show file tree
Hide file tree
Showing 6 changed files with 105 additions and 13 deletions.
1 change: 1 addition & 0 deletions api/v1alpha1/nodepool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
const (
NodePoolValidReleaseImageConditionType = "ValidReleaseImage"
NodePoolValidAMIConditionType = "ValidAMI"
NodePoolValidRHCOSImageConditionType = "ValidRHCOSImage"
NodePoolConfigValidConfigConditionType = "ValidConfig"
NodePoolUpdateManagementEnabledConditionType = "UpdateManagementEnabled"
NodePoolAutoscalingEnabledConditionType = "AutoscalingEnabled"
Expand Down
1 change: 1 addition & 0 deletions cmd/cluster/core/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ type KubevirtPlatformCreateOptions struct {
Memory string
Cores uint32
ContainerDiskImage string
ContainerDiskRepo string
}

type AWSPlatformOptions struct {
Expand Down
11 changes: 9 additions & 2 deletions cmd/cluster/kubevirt/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command {
cmd.Flags().StringVar(&opts.KubevirtPlatform.Memory, "memory", opts.KubevirtPlatform.Memory, "The amount of memory which is visible inside the Guest OS (type BinarySI, e.g. 5Gi, 100Mi)")
cmd.Flags().Uint32Var(&opts.KubevirtPlatform.Cores, "cores", opts.KubevirtPlatform.Cores, "The number of cores inside the vmi, Must be a value greater or equal 1")
cmd.Flags().StringVar(&opts.KubevirtPlatform.ContainerDiskImage, "containerdisk", opts.KubevirtPlatform.ContainerDiskImage, "A reference to docker image with the embedded disk to be used to create the machines")
cmd.Flags().StringVar(&opts.KubevirtPlatform.ContainerDiskRepo, "containerdisk-repository", opts.KubevirtPlatform.ContainerDiskRepo, "A reference to docker image registry with the embedded disk to be used to create the machines, the tag will be auto-discovered")

cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
Expand Down Expand Up @@ -64,8 +65,14 @@ func applyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur
// As long as there is no official container image
// The image must be provided by user
// Otherwise it must fail
if opts.KubevirtPlatform.ContainerDiskImage == "" {
return errors.New("the container disk image for the Kubevirt machine must be provided by user (\"--containerdisk\" flag)")
if opts.KubevirtPlatform.ContainerDiskImage != "" && opts.KubevirtPlatform.ContainerDiskRepo != "" {
return errors.New("only one of \"--containerdisk\" and \"--containerdisk-repository\" can be specified")
}
if opts.KubevirtPlatform.ContainerDiskImage == "" && opts.KubevirtPlatform.ContainerDiskRepo == "" {
return errors.New("the container disk image for the Kubevirt machine must be provided by user (\"--containerdisk\" or \"--containerdisk-repository\" flag)")
}
if opts.KubevirtPlatform.ContainerDiskRepo != "" {
opts.KubevirtPlatform.ContainerDiskImage = opts.KubevirtPlatform.ContainerDiskRepo + ":{rhcos:version}"
}
}

Expand Down
16 changes: 16 additions & 0 deletions cmd/nodepool/kubevirt/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package kubevirt

import (
"context"
"errors"

"github.com/spf13/cobra"
apiresource "k8s.io/apimachinery/pkg/api/resource"
Expand All @@ -17,6 +18,7 @@ type KubevirtPlatformCreateOptions struct {
Memory string
Cores uint32
ContainerDiskImage string
ContainerDiskRepo string
}

func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
Expand All @@ -34,6 +36,7 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
cmd.Flags().StringVar(&platformOpts.Memory, "memory", platformOpts.Memory, "The amount of memory which is visible inside the Guest OS (type BinarySI, e.g. 5Gi, 100Mi)")
cmd.Flags().Uint32Var(&platformOpts.Cores, "cores", platformOpts.Cores, "The number of cores inside the vmi, Must be a value greater or equal 1")
cmd.Flags().StringVar(&platformOpts.ContainerDiskImage, "containerdisk", platformOpts.ContainerDiskImage, "A reference to docker image with the embedded disk to be used to create the machines")
cmd.Flags().StringVar(&platformOpts.ContainerDiskRepo, "containerdisk-repository", platformOpts.ContainerDiskRepo, "A reference to docker image registry with the embedded disk to be used to create the machines, the tag will be auto-discovered")

// TODO (nargaman): replace with official container image, after RFE-2501 is completed
// As long as there is no official container image
Expand All @@ -47,6 +50,19 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
}

func (o *KubevirtPlatformCreateOptions) UpdateNodePool(_ context.Context, nodePool *hyperv1.NodePool, _ *hyperv1.HostedCluster, _ crclient.Client) error {
// TODO (nargaman): replace with official container image, after RFE-2501 is completed
// As long as there is no official container image
// The image must be provided by user
// Otherwise it must fail
if o.ContainerDiskImage != "" && o.ContainerDiskRepo != "" {
return errors.New("only one of \"--containerdisk\" and \"--containerdisk-repository\" can be specified")
}
if o.ContainerDiskImage == "" && o.ContainerDiskRepo == "" {
return errors.New("the container disk image for the Kubevirt machine must be provided by user (\"--containerdisk\" or \"containerdisk-repository\" flag)")
}
if o.ContainerDiskRepo != "" {
o.ContainerDiskImage = o.ContainerDiskRepo + ":{rhcos:version}"
}
runAlways := kubevirtv1.RunStrategyAlways
guestQuantity := apiresource.MustParse(o.Memory)
nodePool.Spec.Platform.Kubevirt = &hyperv1.KubevirtNodePoolPlatform{
Expand Down
25 changes: 23 additions & 2 deletions hypershift-operator/controllers/nodepool/kubevirt.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,36 @@
package nodepool

import (
"strings"

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
)

func kubevirtMachineTemplateSpec(nodePool *hyperv1.NodePool) *capikubevirt.KubevirtMachineTemplateSpec {
func kubevirtMachineTemplateSpec(nodePool *hyperv1.NodePool, containerDiskChecksum string) *capikubevirt.KubevirtMachineTemplateSpec {
template := nodePool.Spec.Platform.Kubevirt.NodeTemplate.DeepCopy()
dataVolumeTemplates := template.Spec.DataVolumeTemplates
for i, dv := range dataVolumeTemplates {
if dv.Name == "containervolume" {
if dv.Spec.Source != nil && dv.Spec.Source.Registry != nil &&
dv.Spec.Source.Registry.URL != nil && strings.Contains(*dv.Spec.Source.Registry.URL, "{rhcos:version}") {
image := strings.ReplaceAll(*dv.Spec.Source.Registry.URL, "{rhcos:version}", containerDiskChecksum)
dataVolumeTemplates[i].Spec.Source.Registry.URL = &image
}
}
}
volumes := template.Spec.Template.Spec.Volumes
for i, volume := range volumes {
if volume.Name == "containervolume" {
if volume.ContainerDisk != nil && strings.Contains(volume.ContainerDisk.Image, "{rhcos:version}") {
volumes[i].ContainerDisk.Image = strings.ReplaceAll(volume.ContainerDisk.Image, "{rhcos:version}", containerDiskChecksum)
}
}
}
return &capikubevirt.KubevirtMachineTemplateSpec{
Template: capikubevirt.KubevirtMachineTemplateResource{
Spec: capikubevirt.KubevirtMachineSpec{
VirtualMachineTemplate: *nodePool.Spec.Platform.Kubevirt.NodeTemplate,
VirtualMachineTemplate: *template,
},
},
}
Expand Down
64 changes: 55 additions & 9 deletions hypershift-operator/controllers/nodepool/nodepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,8 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho

// Validate platform specific input.
var ami string
if nodePool.Spec.Platform.Type == hyperv1.AWSPlatform {
switch nodePool.Spec.Platform.Type {
case hyperv1.AWSPlatform:
if hcluster.Spec.Platform.AWS == nil {
return ctrl.Result{}, fmt.Errorf("the HostedCluster for this NodePool has no .Spec.Platform.AWS, this is unsupported")
}
Expand All @@ -359,14 +360,33 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
})
return ctrl.Result{}, fmt.Errorf("couldn't discover an AMI for release image: %w", err)
}
meta.SetStatusCondition(&nodePool.Status.Conditions, metav1.Condition{
Type: hyperv1.NodePoolValidAMIConditionType,
Status: metav1.ConditionTrue,
Reason: hyperv1.NodePoolAsExpectedConditionReason,
Message: fmt.Sprintf("Bootstrap AMI is %q", ami),
ObservedGeneration: nodePool.Generation,
})
case hyperv1.KubevirtPlatform:
ami, err = getContainerDiskChecksum(releaseImage)
if err != nil {
meta.SetStatusCondition(&nodePool.Status.Conditions, metav1.Condition{
Type: hyperv1.NodePoolValidRHCOSImageConditionType,
Status: metav1.ConditionFalse,
Reason: hyperv1.NodePoolValidationFailedConditionReason,
Message: fmt.Sprintf("Couldn't discover any openstack rhcos checksum for release image %q: %s", nodePool.Spec.Release.Image, err.Error()),
ObservedGeneration: nodePool.Generation,
})
return ctrl.Result{}, fmt.Errorf("couldn't discover any openstack rhcos checksum for release image: %w", err)
}
meta.SetStatusCondition(&nodePool.Status.Conditions, metav1.Condition{
Type: hyperv1.NodePoolValidRHCOSImageConditionType,
Status: metav1.ConditionTrue,
Reason: hyperv1.NodePoolAsExpectedConditionReason,
Message: fmt.Sprintf("Bootstrap openstack RHCOS image checksum is %q", ami),
ObservedGeneration: nodePool.Generation,
})
}
meta.SetStatusCondition(&nodePool.Status.Conditions, metav1.Condition{
Type: hyperv1.NodePoolValidAMIConditionType,
Status: metav1.ConditionTrue,
Reason: hyperv1.NodePoolAsExpectedConditionReason,
Message: fmt.Sprintf("Bootstrap AMI is %q", ami),
ObservedGeneration: nodePool.Generation,
})

// Validate config input.
// 3 generic core config resoures: fips, ssh and haproxy.
Expand Down Expand Up @@ -807,6 +827,10 @@ func getAMI(nodePool *hyperv1.NodePool, region string, releaseImage *releaseinfo
return defaultNodePoolAMI(region, releaseImage)
}

func getContainerDiskChecksum(releaseImage *releaseinfo.ReleaseImage) (string, error) {
return defaultContainerDiskChecksum(releaseImage)
}

func ignConfig(encodedCACert, encodedToken, endpoint string) ignitionapi.Config {
return ignitionapi.Config{
Ignition: ignitionapi.Ignition{
Expand Down Expand Up @@ -990,6 +1014,28 @@ func validateAutoscaling(nodePool *hyperv1.NodePool) error {
return nil
}

func defaultContainerDiskChecksum(releaseImage *releaseinfo.ReleaseImage) (string, error) {
// TODO: The architecture should be specified from the API
arch, foundArch := releaseImage.StreamMetadata.Architectures["x86_64"]
if !foundArch {
return "", fmt.Errorf("couldn't find OS metadata for architecture %q", "x64_64")
}
openStack, exists := arch.Artifacts["openstack"]
if !exists {
return "", fmt.Errorf("couldn't find OS metadata for openstack")
}
artifact, exists := openStack.Formats["qcow2.gz"]
if !exists {
return "", fmt.Errorf("couldn't find OS metadata for openstack artifact %v", "qcow2.gz")
}
disk, exists := artifact["disk"]
if !exists {
return "", fmt.Errorf("couldn't find OS metadata for the openstack disk")
}

return disk.SHA256, nil
}

func defaultNodePoolAMI(region string, releaseImage *releaseinfo.ReleaseImage) (string, error) {
// TODO: The architecture should be specified from the API
arch, foundArch := releaseImage.StreamMetadata.Architectures["x86_64"]
Expand Down Expand Up @@ -1319,7 +1365,7 @@ func machineTemplateBuilders(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.
}
case hyperv1.KubevirtPlatform:
template = &capikubevirt.KubevirtMachineTemplate{}
machineTemplateSpec = kubevirtMachineTemplateSpec(nodePool)
machineTemplateSpec = kubevirtMachineTemplateSpec(nodePool, ami)
mutateTemplate = func(object client.Object) error {
o, _ := object.(*capikubevirt.KubevirtMachineTemplate)
o.Spec = *machineTemplateSpec.(*capikubevirt.KubevirtMachineTemplateSpec)
Expand Down

0 comments on commit b9511a6

Please sign in to comment.