Skip to content

Commit

Permalink
[kubevirt] unify template and make a http datavolume import the default
Browse files Browse the repository at this point in the history
Prepares the hypershift cli to auto-detect the required rhcos version
from the cluster release payload. Since this is not known at cluster
creation time, template parameters are introduced:
`{rhcos:openstack:checksum}` for container disks, and
`{rhcos:openstack:url}` for datavolume imports.

The hypershift cli is extended the following way:

 * By default the hypershift CLI adds now a datavolume which will
   http-import the later-on auto-detected rhcos image.
 * `--containerdisk-repository` can be specified, to let the cluster
   figure out the right rhcos version when containerdisks should be
used. Example:
`--containerdisk-repository=quay.io/containerdisks/rhcos`.
 * `--root-volume-size` and `--root-volume-storage-class` can be used to
   customize the datavolume.

Finally the nodepool-create and the cluster-create command now share the
same kubevirt template to remove the duplication.

Signed-off-by: Roman Mohr <rmohr@redhat.com>
  • Loading branch information
rmohr committed Mar 8, 2022
1 parent ca08c87 commit fc8a723
Show file tree
Hide file tree
Showing 9 changed files with 218 additions and 123 deletions.
64 changes: 4 additions & 60 deletions api/fixtures/example.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,11 @@ import (
"crypto/rand"
"fmt"

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"

apiresource "k8s.io/apimachinery/pkg/api/resource"
kubevirtv1 "kubevirt.io/api/core/v1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)

Expand Down Expand Up @@ -86,6 +82,8 @@ type ExampleKubevirtOptions struct {
Memory string
Cores uint32
Image string
RootVolumeSize uint32
RootVolumeStorageClass string
}

type ExampleAWSOptionsZones struct {
Expand Down Expand Up @@ -488,61 +486,7 @@ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
}
case hyperv1.KubevirtPlatform:
nodePool := defaultNodePool(cluster.Name)
runAlways := kubevirtv1.RunStrategyAlways
guestQuantity := apiresource.MustParse(o.Kubevirt.Memory)
nodePool.Spec.Platform.Kubevirt = &hyperv1.KubevirtNodePoolPlatform{
NodeTemplate: &capikubevirt.VirtualMachineTemplateSpec{
Spec: kubevirtv1.VirtualMachineSpec{
RunStrategy: &runAlways,
Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{
Spec: kubevirtv1.VirtualMachineInstanceSpec{
Domain: kubevirtv1.DomainSpec{
CPU: &kubevirtv1.CPU{Cores: o.Kubevirt.Cores},
Memory: &kubevirtv1.Memory{Guest: &guestQuantity},
Devices: kubevirtv1.Devices{
Disks: []kubevirtv1.Disk{
{
Name: "containervolume",
DiskDevice: kubevirtv1.DiskDevice{
Disk: &kubevirtv1.DiskTarget{
Bus: "virtio",
},
},
},
},
Interfaces: []kubevirtv1.Interface{
{
Name: "default",
InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{
Bridge: &kubevirtv1.InterfaceBridge{},
},
},
},
},
},
Volumes: []kubevirtv1.Volume{
{
Name: "containervolume",
VolumeSource: kubevirtv1.VolumeSource{
ContainerDisk: &kubevirtv1.ContainerDiskSource{
Image: o.Kubevirt.Image,
},
},
},
},
Networks: []kubevirtv1.Network{
{
Name: "default",
NetworkSource: kubevirtv1.NetworkSource{
Pod: &kubevirtv1.PodNetwork{},
},
},
},
},
},
},
},
}
nodePool.Spec.Platform.Kubevirt = ExampleKubeVirtTemplate(o.Kubevirt)
nodePools = append(nodePools, nodePool)
case hyperv1.NonePlatform, hyperv1.AgentPlatform:
nodePools = append(nodePools, defaultNodePool(cluster.Name))
Expand Down
123 changes: 123 additions & 0 deletions api/fixtures/example_kubevirt.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
package fixtures

import (
"fmt"

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
"github.com/openshift/hypershift/cmd/util"
corev1 "k8s.io/api/core/v1"
apiresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubevirtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
)

func ExampleKubeVirtTemplate(o *ExampleKubevirtOptions) *hyperv1.KubevirtNodePoolPlatform {
runAlways := kubevirtv1.RunStrategyAlways
guestQuantity := apiresource.MustParse(o.Memory)
exampleTemplate := &hyperv1.KubevirtNodePoolPlatform{
NodeTemplate: &capikubevirt.VirtualMachineTemplateSpec{
Spec: kubevirtv1.VirtualMachineSpec{
RunStrategy: &runAlways,
Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{
Spec: kubevirtv1.VirtualMachineInstanceSpec{
Domain: kubevirtv1.DomainSpec{
CPU: &kubevirtv1.CPU{Cores: o.Cores},
Memory: &kubevirtv1.Memory{Guest: &guestQuantity},
Devices: kubevirtv1.Devices{
Interfaces: []kubevirtv1.Interface{
{
Name: "default",
InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{
Bridge: &kubevirtv1.InterfaceBridge{},
},
},
},
},
},
Networks: []kubevirtv1.Network{
{
Name: "default",
NetworkSource: kubevirtv1.NetworkSource{
Pod: &kubevirtv1.PodNetwork{},
},
},
},
},
},
},
},
}
exampleTemplate.NodeTemplate.Spec.Template.Spec.Domain.Devices.Disks = []kubevirtv1.Disk{
{
Name: util.RHCOSMagicVolumeName,
DiskDevice: kubevirtv1.DiskDevice{
Disk: &kubevirtv1.DiskTarget{
Bus: "virtio",
},
},
},
}
if o.Image != "" {
exampleTemplate.NodeTemplate.Spec.Template.Spec.Volumes = []kubevirtv1.Volume{
{
Name: util.RHCOSMagicVolumeName,
VolumeSource: kubevirtv1.VolumeSource{
ContainerDisk: &kubevirtv1.ContainerDiskSource{
Image: o.Image,
},
},
},
}
} else {
dataVolume := defaultDataVolume()
setDataVolumeDefaults(&dataVolume, o)
exampleTemplate.NodeTemplate.Spec.DataVolumeTemplates = []kubevirtv1.DataVolumeTemplateSpec{dataVolume}
exampleTemplate.NodeTemplate.Spec.Template.Spec.Volumes = []kubevirtv1.Volume{
{
Name: util.RHCOSMagicVolumeName,
VolumeSource: kubevirtv1.VolumeSource{
DataVolume: &kubevirtv1.DataVolumeSource{
Name: util.RHCOSMagicVolumeName,
},
},
},
}
}
return exampleTemplate
}

func defaultDataVolume() kubevirtv1.DataVolumeTemplateSpec {
return kubevirtv1.DataVolumeTemplateSpec{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: util.RHCOSMagicVolumeName,
},
Spec: v1beta1.DataVolumeSpec{
Source: &v1beta1.DataVolumeSource{
HTTP: &v1beta1.DataVolumeSourceHTTP{URL: util.RHCOSOpenStackURLParam},
},
},
}
}
func setDataVolumeDefaults(spec *kubevirtv1.DataVolumeTemplateSpec, o *ExampleKubevirtOptions) {
if spec.Spec.Storage == nil {
spec.Spec.Storage = &v1beta1.StorageSpec{
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]apiresource.Quantity{
corev1.ResourceStorage: util.KubeVirtVolumeDefaultSize,
},
},
StorageClassName: &o.RootVolumeStorageClass,
}
}
if o.RootVolumeSize != 0 {
size := apiresource.MustParse(fmt.Sprintf("%vGi", o.RootVolumeSize))
spec.Spec.Storage.Resources = corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]apiresource.Quantity{
corev1.ResourceStorage: size,
},
}
}
}
1 change: 1 addition & 0 deletions api/v1alpha1/nodepool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
const (
NodePoolValidReleaseImageConditionType = "ValidReleaseImage"
NodePoolValidAMIConditionType = "ValidAMI"
NodePoolValidRHCOSImageConditionType = "ValidRHCOSImage"
NodePoolConfigValidConfigConditionType = "ValidConfig"
NodePoolUpdateManagementEnabledConditionType = "UpdateManagementEnabled"
NodePoolAutoscalingEnabledConditionType = "AutoscalingEnabled"
Expand Down
3 changes: 3 additions & 0 deletions cmd/cluster/core/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ type KubevirtPlatformCreateOptions struct {
Memory string
Cores uint32
ContainerDiskImage string
ContainerDiskRepo string
RootVolumeSize int64
RootVolumeStorageClass string
}

type AWSPlatformOptions struct {
Expand Down
11 changes: 9 additions & 2 deletions cmd/cluster/kubevirt/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"fmt"

"github.com/openshift/hypershift/cmd/util"
"github.com/spf13/cobra"

apifixtures "github.com/openshift/hypershift/api/fixtures"
Expand Down Expand Up @@ -33,8 +34,11 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command {

cmd.Flags().StringVar(&opts.KubevirtPlatform.Memory, "memory", opts.KubevirtPlatform.Memory, "The amount of memory which is visible inside the Guest OS (type BinarySI, e.g. 5Gi, 100Mi)")
cmd.Flags().Uint32Var(&opts.KubevirtPlatform.Cores, "cores", opts.KubevirtPlatform.Cores, "The number of cores inside the vmi, Must be a value greater or equal 1")
cmd.Flags().StringVar(&opts.KubevirtPlatform.RootVolumeStorageClass, "root-volume-storage-class", opts.KubevirtPlatform.RootVolumeStorageClass, "The storage class to use for machines in the NodePool")
cmd.Flags().Int64Var(&opts.KubevirtPlatform.RootVolumeSize, "root-volume-size", opts.KubevirtPlatform.RootVolumeSize, "The size of the root volume for machines in the NodePool in Gi")
cmd.Flags().StringVar(&opts.KubevirtPlatform.ContainerDiskImage, "containerdisk", opts.KubevirtPlatform.ContainerDiskImage, "A reference to docker image with the embedded disk to be used to create the machines")
cmd.Flags().StringVar(&opts.KubevirtPlatform.ServicePublishingStrategy, "service-publishing-strategy", opts.KubevirtPlatform.ServicePublishingStrategy, fmt.Sprintf("Define how to expose the cluster services. Supported options: %s (Use LoadBalancer and Route to expose services), %s (Select a random node to expose service access through)", IngressServicePublishingStrategy, NodePortServicePublishingStrategy))
cmd.Flags().StringVar(&opts.KubevirtPlatform.ContainerDiskRepo, "containerdisk-repository", opts.KubevirtPlatform.ContainerDiskRepo, "A reference to docker image registry with the embedded disk to be used to create the machines, the tag will be auto-discovered")

cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
Expand Down Expand Up @@ -76,8 +80,11 @@ func applyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur
// As long as there is no official container image
// The image must be provided by user
// Otherwise it must fail
if opts.KubevirtPlatform.ContainerDiskImage == "" {
return errors.New("the container disk image for the Kubevirt machine must be provided by user (\"--containerdisk\" flag)")
if (opts.KubevirtPlatform.ContainerDiskImage != "" && opts.KubevirtPlatform.ContainerDiskRepo != "") && (opts.KubevirtPlatform.RootVolumeSize > 0 || opts.KubevirtPlatform.RootVolumeStorageClass != "") {
return errors.New("container disk options (\"--containerdisk*\" flag) can not be used together with customized root volume options (\"--root-volume-*\" flags)")
}
if opts.KubevirtPlatform.ContainerDiskRepo != "" {
opts.KubevirtPlatform.ContainerDiskImage = fmt.Sprintf("%v:%v", opts.KubevirtPlatform.ContainerDiskRepo, util.RHCOSOpenStackChecksumParameter)
}
}

Expand Down
89 changes: 29 additions & 60 deletions cmd/nodepool/kubevirt/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,25 @@ package kubevirt

import (
"context"
"errors"
"fmt"

"github.com/openshift/hypershift/api/fixtures"
"github.com/openshift/hypershift/cmd/util"
"github.com/spf13/cobra"
apiresource "k8s.io/apimachinery/pkg/api/resource"
kubevirtv1 "kubevirt.io/api/core/v1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
"github.com/openshift/hypershift/cmd/nodepool/core"
)

type KubevirtPlatformCreateOptions struct {
Memory string
Cores uint32
ContainerDiskImage string
Memory string
Cores uint32
ContainerDiskImage string
ContainerDiskRepo string
RootVolumeSize uint32
RootVolumeStorageClass string
}

func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
Expand All @@ -33,7 +37,10 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {

cmd.Flags().StringVar(&platformOpts.Memory, "memory", platformOpts.Memory, "The amount of memory which is visible inside the Guest OS (type BinarySI, e.g. 5Gi, 100Mi)")
cmd.Flags().Uint32Var(&platformOpts.Cores, "cores", platformOpts.Cores, "The number of cores inside the vmi, Must be a value greater or equal 1")
cmd.Flags().StringVar(&platformOpts.RootVolumeStorageClass, "root-volume-storage-class", platformOpts.RootVolumeStorageClass, "The storage class to use for machines in the NodePool")
cmd.Flags().Uint32Var(&platformOpts.RootVolumeSize, "root-volume-size", platformOpts.RootVolumeSize, "The size of the root volume for machines in the NodePool in Gi")
cmd.Flags().StringVar(&platformOpts.ContainerDiskImage, "containerdisk", platformOpts.ContainerDiskImage, "A reference to docker image with the embedded disk to be used to create the machines")
cmd.Flags().StringVar(&platformOpts.ContainerDiskRepo, "containerdisk-repository", platformOpts.ContainerDiskRepo, "A reference to docker image registry with the embedded disk to be used to create the machines, the tag will be auto-discovered")

// TODO (nargaman): replace with official container image, after RFE-2501 is completed
// As long as there is no official container image
Expand All @@ -47,61 +54,23 @@ func NewCreateCommand(coreOpts *core.CreateNodePoolOptions) *cobra.Command {
}

func (o *KubevirtPlatformCreateOptions) UpdateNodePool(_ context.Context, nodePool *hyperv1.NodePool, _ *hyperv1.HostedCluster, _ crclient.Client) error {
runAlways := kubevirtv1.RunStrategyAlways
guestQuantity := apiresource.MustParse(o.Memory)
nodePool.Spec.Platform.Kubevirt = &hyperv1.KubevirtNodePoolPlatform{
NodeTemplate: &capikubevirt.VirtualMachineTemplateSpec{
Spec: kubevirtv1.VirtualMachineSpec{
RunStrategy: &runAlways,
Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{
Spec: kubevirtv1.VirtualMachineInstanceSpec{
Domain: kubevirtv1.DomainSpec{
CPU: &kubevirtv1.CPU{Cores: o.Cores},
Memory: &kubevirtv1.Memory{Guest: &guestQuantity},
Devices: kubevirtv1.Devices{
Disks: []kubevirtv1.Disk{
{
Name: "containervolume",
DiskDevice: kubevirtv1.DiskDevice{
Disk: &kubevirtv1.DiskTarget{
Bus: "virtio",
},
},
},
},
Interfaces: []kubevirtv1.Interface{
kubevirtv1.Interface{
Name: "default",
InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{
Bridge: &kubevirtv1.InterfaceBridge{},
},
},
},
},
},
Volumes: []kubevirtv1.Volume{
{
Name: "containervolume",
VolumeSource: kubevirtv1.VolumeSource{
ContainerDisk: &kubevirtv1.ContainerDiskSource{
Image: o.ContainerDiskImage,
},
},
},
},
Networks: []kubevirtv1.Network{
kubevirtv1.Network{
Name: "default",
NetworkSource: kubevirtv1.NetworkSource{
Pod: &kubevirtv1.PodNetwork{},
},
},
},
},
},
},
},
// TODO (nargaman): replace with official container image, after RFE-2501 is completed
// As long as there is no official container image
// The image must be provided by user
// Otherwise it must fail
if (o.ContainerDiskImage != "" && o.ContainerDiskRepo != "") && (o.RootVolumeSize > 0 || o.RootVolumeStorageClass != "") {
return errors.New("container disk options (\"--containerdisk*\" flag) can not be used together with customized root volume options (\"--root-volume-*\" flags)")
}
if o.ContainerDiskRepo != "" {
o.ContainerDiskImage = fmt.Sprintf("%v:%v", o.ContainerDiskRepo, util.RHCOSOpenStackChecksumParameter)
}
nodePool.Spec.Platform.Kubevirt = fixtures.ExampleKubeVirtTemplate(&fixtures.ExampleKubevirtOptions{
Memory: o.Memory,
Cores: o.Cores,
Image: o.ContainerDiskImage,
RootVolumeSize: o.RootVolumeSize,
RootVolumeStorageClass: o.RootVolumeStorageClass,
})
return nil
}

Expand Down
11 changes: 11 additions & 0 deletions cmd/util/kubevirt.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
package util

import "k8s.io/apimachinery/pkg/api/resource"

const (
RHCOSOpenStackChecksumParameter string = "{rhcos:openstack:checksum}"
RHCOSMagicVolumeName string = "rhcos"
RHCOSOpenStackURLParam string = "{rhcos:openstack:url}"
)

var KubeVirtVolumeDefaultSize = resource.MustParse("16Gi")
Loading

0 comments on commit fc8a723

Please sign in to comment.