From 86e2d5a05622d78213c5fcda737992d3ec48f3fb Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 17 Jun 2021 14:27:14 -0400 Subject: [PATCH 01/10] update Packet UPI for Equinix Metal rename Signed-off-by: Marques Johansson --- docs/user/metal/install_upi.md | 10 +++++++--- upi/metal/README.md | 25 +++++++++++++++++-------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/docs/user/metal/install_upi.md b/docs/user/metal/install_upi.md index 4f2d0ea75bf..79b95841072 100644 --- a/docs/user/metal/install_upi.md +++ b/docs/user/metal/install_upi.md @@ -1,5 +1,7 @@ # Install: BareMetal User Provided Infrastructure +The upstream project that provides management of bare metal hosts is [metal.equinix.com][equinix-metal]. + The steps for performing a UPI-based install are outlined here. Several [Terraform][upi-metal-example] templates are provided as an example to help model your own. ## Table of contents @@ -224,11 +226,11 @@ INFO Waiting up to 30m0s for the cluster to initialize... ## Example Bare-Metal UPI deployment -Terraform [templates][upi-metal-example] provides an example of using OpenShift Installer to create an bare-metal UPI OpenShift cluster on Packet.net +Terraform [templates][upi-metal-example] provides an example of using OpenShift Installer to create an bare-metal UPI OpenShift cluster on [Equinix Metal][equinix-metal]. ### Overview -* Compute: Uses Packet.net to deploy bare-metal machines. +* Compute: Uses [Equinix Metal][equinix-metal] to deploy bare-metal machines. Uses [matchbox] to serve PXE scripts and Ignition configs for bootstrap, control plane and worker machines. Uses `public` IPv4 addresses for each machine, so that all the machines are accessible on the internet. @@ -274,7 +276,7 @@ Use the bootstrap [monitoring](#monitor-for-bootstrap-complete) to track when cl terraform apply -auto-approve -var=bootstrap_dns="false" ``` -NOTE: The bootstrap resources like the bootstrap machines currently cannot be removed using terraform. You can use the Packet.net console to remove the bootstrap machine. All the resources will be cleaned up by `terraform destroy` +NOTE: The bootstrap resources like the bootstrap machines currently cannot be removed using terraform. You can use the [Equinix Metal console][equinix-metal-console] to remove the bootstrap machine. All the resources will be cleaned up by `terraform destroy` ### Approving server certificates for nodes @@ -312,6 +314,8 @@ terraform destroy -auto-approve [coreos-installer-args]: https://github.com/coreos/coreos-installer#kernel-command-line-options-for-coreos-installer-running-in-the-initramfs [coreos-installer]: https://github.com/coreos/coreos-installer#coreos-installer [csr-requests]: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/#requesting-a-certificate +[equinix-metal]: https://metal.equinix.com +[equinix-metal-console]: https://console.equinix.com [etcd-ports]: https://github.com/openshift/origin/pull/21520 [machine-config-server]: https://github.com/openshift/machine-config-operator/blob/master/docs/MachineConfigServer.md [matchbox]: https://github.com/coreos/matchbox diff --git a/upi/metal/README.md b/upi/metal/README.md index da6e8a37efa..2e6ad93ee30 100644 --- a/upi/metal/README.md +++ b/upi/metal/README.md @@ -1,4 +1,4 @@ -# UPI Bare Metal +# Equinix Metal UPI ## Pre-requisites @@ -14,13 +14,19 @@ Create a public route53 zone using this [guide][aws-create-public-route53-zone]. Setup `default` AWS cli profile on the host that will run the example terraform scripts using this [guide][aws-cli-configure-creds] -### Packet +### Equinix Metal -Setup a Project in Packet.net that will be used to deploy servers, for example using this [guide][packet-deploy-server] +Some portions of this guide refer to "Packet" variables and tools. [Packet was +acquired by Equinix][acquisition] and the features became available as [Equinix +Metal][introducing-equinix-metal] in 2020. -Setup API keys for your Project in Packet.net using this [guide][packet-api-keys] +Setup a Project in [Equinix Metal][equinix-metal] that will be used to deploy servers, for example using this [guide][metal-deploy-server]. -Store the API keys in `PACKET_AUTH_TOKEN` so that `terraform-provide-packet` can use it to deploy servers in the project. For more info see [this][terraform-provider-packet-auth] +Setup API keys for your project in Equinix Metal using this [guide][metal-api-keys] + +Store the API keys in `PACKET_AUTH_TOKEN` so that `terraform-provider-packet` +can use it to deploy servers in the project. For more info see +[this][terraform-provider-packet-auth] #### Terraform @@ -43,8 +49,11 @@ terraform-examples config.tf > terraform.tfvars.example [aws-cli-configure-creds]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html [aws-create-public-route53-zone]: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html [coreos-matchbox-getting-started]: https://matchbox.psdn.io/getting-started/ -[packet-api-keys]: https://www.packet.com/developers/changelog/project-only-api-keys/ -[packet-deploy-server]: https://support.packet.com/kb/articles/deploy-a-server +[metal-api-keys]: https://metal.equinix.com/developers/docs/accounts/users/#api-keys +[metal-deploy-server]: https://metal.equinix.com/developers/docs/deploy/on-demand/ [terraform-examples]: https://github.com/s-urbaniak/terraform-examples#terraform-examples [terraform-getting-started]: https://learn.hashicorp.com/terraform/getting-started/install.html -[terraform-provider-packet-auth]: https://www.terraform.io/docs/providers/packet/index.html#auth_token +[terraform-provider-packet-auth]: https://registry.terraform.io/providers/packethost/packet/latest/docs#auth_token +[acquisition]: https://www.equinix.com/newsroom/press-releases/2020/03/equinix-completes-acquisition-of-bare-metal-leader-packet +[introducing-equinix-metal]: https://blog.equinix.com/blog/2020/10/06/equinix-metal-metal-and-more/ +[equinix-metal]: https://metal.equinix.com \ No newline at end of file From 79745e064173969fda9ec624ae3771812bec7768 Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Wed, 20 May 2020 22:07:26 -0400 Subject: [PATCH 02/10] vSphere IPI: Enable thin provisioning via the OVA import This PR enables thin provisioning via the importing process of the RHCOS ova. Terraform will then clone the virtual machine based on the existing template disk type. This adds an additional option in the vsphere platform struct `Thin`. --- data/data/vsphere/pre-bootstrap/main.tf | 1 + data/data/vsphere/variables-vsphere.tf | 3 +++ pkg/asset/cluster/tfvars.go | 1 + .../resource_vsphereprivate_import_ova.go | 19 +++++++++++++++++-- pkg/tfvars/vsphere/vsphere.go | 3 +++ pkg/types/vsphere/platform.go | 3 +++ 6 files changed, 28 insertions(+), 2 deletions(-) diff --git a/data/data/vsphere/pre-bootstrap/main.tf b/data/data/vsphere/pre-bootstrap/main.tf index 253083ff35f..ab4574bcab9 100644 --- a/data/data/vsphere/pre-bootstrap/main.tf +++ b/data/data/vsphere/pre-bootstrap/main.tf @@ -50,6 +50,7 @@ resource "vsphereprivate_import_ova" "import" { network = var.vsphere_network folder = local.folder tag = vsphere_tag.tag.id + thin = var.template_thin_vmdk } resource "vsphere_tag_category" "category" { diff --git a/data/data/vsphere/variables-vsphere.tf b/data/data/vsphere/variables-vsphere.tf index 3870e5ddef6..c98641f180a 100644 --- a/data/data/vsphere/variables-vsphere.tf +++ b/data/data/vsphere/variables-vsphere.tf @@ -77,3 +77,6 @@ variable "vsphere_control_plane_cores_per_socket" { type = number } +variable "template_thin_vmdk" { + type = bool +} \ No newline at end of file diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index ad9835d8473..68893b383b5 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -628,6 +628,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Cluster: installConfig.Config.VSphere.Cluster, ImageURL: string(*rhcosImage), PreexistingFolder: preexistingFolder, + Thin: installConfig.Config.Platform.VSphere.Thin, }, ) if err != nil { diff --git a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go index 1c95add97c6..240511dbc3e 100644 --- a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go +++ b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go @@ -93,6 +93,12 @@ func resourceVSpherePrivateImportOva() *schema.Resource { ForceNew: true, ValidateFunc: validation.NoZeroValues, }, + "thin": { + Type: schema.TypeBool, + Description: "If set to True then the OVA will be imported as a Thin provisioned VMDK.", + Required: true, + ForceNew: true, + }, }, } } @@ -347,11 +353,20 @@ func resourceVSpherePrivateImportOvaCreate(d *schema.ResourceData, meta interfac Name: ovfEnvelope.Network.Networks[0].Name, Network: importOvaParams.Network.Reference(), }} + + var diskType types.OvfCreateImportSpecParamsDiskProvisioningType + + if d.Get("thin").(bool) { + diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeThin + } else { + diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick + } // This is a very minimal spec for importing // an OVF. cisp := types.OvfCreateImportSpecParams{ - EntityName: d.Get("name").(string), - NetworkMapping: networkMappings, + DiskProvisioning: string(diskType), + EntityName: d.Get("name").(string), + NetworkMapping: networkMappings, } m := ovf.NewManager(client) diff --git a/pkg/tfvars/vsphere/vsphere.go b/pkg/tfvars/vsphere/vsphere.go index 17d46de117f..dcd05a43f94 100644 --- a/pkg/tfvars/vsphere/vsphere.go +++ b/pkg/tfvars/vsphere/vsphere.go @@ -26,6 +26,7 @@ type config struct { Template string `json:"vsphere_template"` OvaFilePath string `json:"vsphere_ova_filepath"` PreexistingFolder bool `json:"vsphere_preexisting_folder"` + Thin bool `json:"template_thin_vmdk"` } // TFVarsSources contains the parameters to be converted into Terraform variables @@ -36,6 +37,7 @@ type TFVarsSources struct { Cluster string ImageURL string PreexistingFolder bool + Thin bool } //TFVars generate vSphere-specific Terraform variables @@ -68,6 +70,7 @@ func TFVars(sources TFVarsSources) ([]byte, error) { Template: controlPlaneConfig.Template, OvaFilePath: cachedImage, PreexistingFolder: sources.PreexistingFolder, + Thin: sources.Thin, } return json.MarshalIndent(cfg, "", " ") diff --git a/pkg/types/vsphere/platform.go b/pkg/types/vsphere/platform.go index 6b5034e1797..fda64524306 100644 --- a/pkg/types/vsphere/platform.go +++ b/pkg/types/vsphere/platform.go @@ -47,4 +47,7 @@ type Platform struct { // Network specifies the name of the network to be used by the cluster. Network string `json:"network,omitempty"` + + // Thin specifies if thin disks should be use instead of thick + Thin bool `json:"thin,omitempty"` } From 02006136b8348c43ece5f8c994513ac980d73a50 Mon Sep 17 00:00:00 2001 From: ayesha54 Date: Fri, 6 Aug 2021 20:08:27 +0200 Subject: [PATCH 03/10] vSphere IPI: Enable thin disk provisioning via the OVA import --- data/data/install.openshift.io_installconfigs.yaml | 3 +++ data/data/vsphere/pre-bootstrap/main.tf | 2 +- data/data/vsphere/variables-vsphere.tf | 7 +++---- pkg/asset/cluster/tfvars.go | 2 +- .../resource_vsphereprivate_import_ova.go | 14 ++++++++------ pkg/tfvars/vsphere/vsphere.go | 6 +++--- pkg/types/vsphere/platform.go | 4 ++-- 7 files changed, 21 insertions(+), 17 deletions(-) diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 6d58be6dba1..a3a7474f848 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -2133,6 +2133,9 @@ spec: vCenter: description: VCenter is the domain name or IP address of the vCenter. type: string + diskType: + description: DiskType is the name of the disk provisioning type for vsphere, for e.g eagerZeroedThick or thin, by default it will be thick. + type: string required: - datacenter - defaultDatastore diff --git a/data/data/vsphere/pre-bootstrap/main.tf b/data/data/vsphere/pre-bootstrap/main.tf index ab4574bcab9..e04e1b40f4d 100644 --- a/data/data/vsphere/pre-bootstrap/main.tf +++ b/data/data/vsphere/pre-bootstrap/main.tf @@ -50,7 +50,7 @@ resource "vsphereprivate_import_ova" "import" { network = var.vsphere_network folder = local.folder tag = vsphere_tag.tag.id - thin = var.template_thin_vmdk + diskType = var.disk_type } resource "vsphere_tag_category" "category" { diff --git a/data/data/vsphere/variables-vsphere.tf b/data/data/vsphere/variables-vsphere.tf index c98641f180a..95107eb09fc 100644 --- a/data/data/vsphere/variables-vsphere.tf +++ b/data/data/vsphere/variables-vsphere.tf @@ -76,7 +76,6 @@ variable "vsphere_control_plane_num_cpus" { variable "vsphere_control_plane_cores_per_socket" { type = number } - -variable "template_thin_vmdk" { - type = bool -} \ No newline at end of file +variable "disk_type" { + type = string +} diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 68893b383b5..4986ffec61b 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -628,7 +628,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Cluster: installConfig.Config.VSphere.Cluster, ImageURL: string(*rhcosImage), PreexistingFolder: preexistingFolder, - Thin: installConfig.Config.Platform.VSphere.Thin, + DiskType: installConfig.Config.Platform.VSphere.DiskType, }, ) if err != nil { diff --git a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go index 240511dbc3e..a311c214812 100644 --- a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go +++ b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go @@ -93,10 +93,10 @@ func resourceVSpherePrivateImportOva() *schema.Resource { ForceNew: true, ValidateFunc: validation.NoZeroValues, }, - "thin": { - Type: schema.TypeBool, - Description: "If set to True then the OVA will be imported as a Thin provisioned VMDK.", - Required: true, + "diskType": { + Type: schema.TypeString, + Description: "The name of the disk provisioning, for e.g eagerZeroedThick or thin, by default it will be thick.", + Required: false, ForceNew: true, }, }, @@ -356,10 +356,12 @@ func resourceVSpherePrivateImportOvaCreate(d *schema.ResourceData, meta interfac var diskType types.OvfCreateImportSpecParamsDiskProvisioningType - if d.Get("thin").(bool) { + if d.Get("diskType").(string) == "thin" { diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeThin - } else { + } else if d.Get("diskType").(string) == "eagerZeroedThick" { diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick + } else { + diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeThick } // This is a very minimal spec for importing // an OVF. diff --git a/pkg/tfvars/vsphere/vsphere.go b/pkg/tfvars/vsphere/vsphere.go index dcd05a43f94..97a7f22d6e4 100644 --- a/pkg/tfvars/vsphere/vsphere.go +++ b/pkg/tfvars/vsphere/vsphere.go @@ -26,7 +26,7 @@ type config struct { Template string `json:"vsphere_template"` OvaFilePath string `json:"vsphere_ova_filepath"` PreexistingFolder bool `json:"vsphere_preexisting_folder"` - Thin bool `json:"template_thin_vmdk"` + DiskType string `json:"disk_type"` } // TFVarsSources contains the parameters to be converted into Terraform variables @@ -37,7 +37,7 @@ type TFVarsSources struct { Cluster string ImageURL string PreexistingFolder bool - Thin bool + DiskType string } //TFVars generate vSphere-specific Terraform variables @@ -70,7 +70,7 @@ func TFVars(sources TFVarsSources) ([]byte, error) { Template: controlPlaneConfig.Template, OvaFilePath: cachedImage, PreexistingFolder: sources.PreexistingFolder, - Thin: sources.Thin, + DiskType: sources.DiskType, } return json.MarshalIndent(cfg, "", " ") diff --git a/pkg/types/vsphere/platform.go b/pkg/types/vsphere/platform.go index fda64524306..40c0fb16cd1 100644 --- a/pkg/types/vsphere/platform.go +++ b/pkg/types/vsphere/platform.go @@ -48,6 +48,6 @@ type Platform struct { // Network specifies the name of the network to be used by the cluster. Network string `json:"network,omitempty"` - // Thin specifies if thin disks should be use instead of thick - Thin bool `json:"thin,omitempty"` + // Disk Type Thin specifies if thin disks should be use instead of thick + DiskType string `json:"diskType,omitempty"` } From cbeb11448bbe7553e3164f400c776f03365c69f8 Mon Sep 17 00:00:00 2001 From: Caleb Boylan Date: Tue, 26 Oct 2021 16:07:51 -0700 Subject: [PATCH 04/10] aws: Output public zone id correctly when deleting hosted dns records fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1965969 --- pkg/destroy/aws/shared.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/destroy/aws/shared.go b/pkg/destroy/aws/shared.go index 520a2178de5..b898c75ddfb 100644 --- a/pkg/destroy/aws/shared.go +++ b/pkg/destroy/aws/shared.go @@ -181,7 +181,6 @@ func (o *ClusterUninstaller) cleanSharedRoute53(ctx context.Context, session *se if err != nil { return err } - logger = logger.WithField("id", id) switch resourceType { case "hostedzone": @@ -203,6 +202,7 @@ func (o *ClusterUninstaller) cleanSharedHostedZone(ctx context.Context, client * if err != nil { return err } + logger = logger.WithField("id", publicZoneID) var lastError error err = client.ListResourceRecordSetsPagesWithContext( From 51a40345a7993cc8042d94816dbdb7187684e154 Mon Sep 17 00:00:00 2001 From: Vladimir Belousov Date: Sun, 22 Aug 2021 12:11:02 +0300 Subject: [PATCH 05/10] docs: correct spelling in docs Correct spelling to improve the readability of the documentation --- docs/design/openstack/networking-infrastructure.md | 2 +- docs/dev/libvirt/README.md | 4 ++-- docs/user/aws/limits.md | 2 +- docs/user/azure/install_upi.md | 4 ++-- docs/user/azure/limits.md | 6 +++--- docs/user/customization.md | 4 ++-- docs/user/gcp/limits.md | 2 +- docs/user/metal/README.md | 2 +- docs/user/metal/customization_ipi.md | 2 +- docs/user/openstack/README.md | 6 ++++++ docs/user/openstack/install_upi.md | 6 +++--- docs/user/openstack/privileges.md | 2 +- docs/user/ovirt/install_upi.md | 6 +++--- docs/user/troubleshootingbootstrap.md | 8 ++++---- docs/user/vsphere/install_upi.md | 2 +- docs/user/vsphere/vips-dns.md | 2 +- 16 files changed, 33 insertions(+), 27 deletions(-) diff --git a/docs/design/openstack/networking-infrastructure.md b/docs/design/openstack/networking-infrastructure.md index 2bd6d7de3bd..308a2ba12c6 100644 --- a/docs/design/openstack/networking-infrastructure.md +++ b/docs/design/openstack/networking-infrastructure.md @@ -48,7 +48,7 @@ Keepalived. While the bootstrap node is up, it will have priority running the AP The Master nodes run dhcp, HAProxy, CoreDNS, and Keepalived. Haproxy loadbalances incoming requests to the API across all running masters. It also runs a stats and healthcheck server. Keepalived manages both VIPs on the master, where each -master has an equal chance of being assigned one of the VIPs. Initially, the bootstrap node has the highest priority for hosting the API VIP, so they will point to addresses there at startup. Meanwhile, the master nodes will try to get the control plane, and the OpenShift API up. Keepalived implements periodic health checks for each VIP that are used to determine the weight assigned to each server. The server with the highest weight is assigned the VIP. Keepalived has two seperate healthchecks that attempt to reach the OpenShift API and CoreDNS on the localhost of each master node. When the API on a master node is reachable, Keepalived substantially increases it's weight for that VIP, making its priority higher than that of the bootstrap node and any node that does not yet have the that service running. This ensures that nodes that are incapable of serving DNS records or the OpenShift API do not get assigned the respective VIP. The Ingress VIP is also managed by a healthcheck that queries for an OCP Router HAProxy healthcheck, not the HAProxy we stand up in static pods for the API. This makes sure that the Ingress VIP is pointing to a server that is running the necessary OpenShift Ingress Operator resources to enable external access to the node. +master has an equal chance of being assigned one of the VIPs. Initially, the bootstrap node has the highest priority for hosting the API VIP, so they will point to addresses there at startup. Meanwhile, the master nodes will try to get the control plane, and the OpenShift API up. Keepalived implements periodic health checks for each VIP that are used to determine the weight assigned to each server. The server with the highest weight is assigned the VIP. Keepalived has two separate healthchecks that attempt to reach the OpenShift API and CoreDNS on the localhost of each master node. When the API on a master node is reachable, Keepalived substantially increases it's weight for that VIP, making its priority higher than that of the bootstrap node and any node that does not yet have the that service running. This ensures that nodes that are incapable of serving DNS records or the OpenShift API do not get assigned the respective VIP. The Ingress VIP is also managed by a healthcheck that queries for an OCP Router HAProxy healthcheck, not the HAProxy we stand up in static pods for the API. This makes sure that the Ingress VIP is pointing to a server that is running the necessary OpenShift Ingress Operator resources to enable external access to the node. The Worker Nodes run dhcp, CoreDNS, and Keepalived. On workers, Keepalived is only responsible for managing the Ingress VIP. It's algorithm is the same as the one run on the masters. diff --git a/docs/dev/libvirt/README.md b/docs/dev/libvirt/README.md index 6344048cae5..11d00ea0658 100644 --- a/docs/dev/libvirt/README.md +++ b/docs/dev/libvirt/README.md @@ -113,7 +113,7 @@ First, you need to start the libvirtd TCP socket, which is managed by systemd: sudo systemctl start libvirtd-tcp.socket ``` -To make this change persistent accross reboots you can optionally enable it: +To make this change persistent across reboots you can optionally enable it: ```sh sudo systemctl enable libvirtd-tcp.socket @@ -415,7 +415,7 @@ FATA[0019] failed to run Terraform: exit status 1 it is likely that your install configuration contains three backslashes after the protocol (e.g. `qemu+tcp:///...`), when it should only be two. -### Random domain creation errors due to libvirt race conditon +### Random domain creation errors due to libvirt race condition Depending on your libvirt version you might encounter [a race condition][bugzilla_libvirt_race] leading to an error similar to: diff --git a/docs/user/aws/limits.md b/docs/user/aws/limits.md index 542ce5d7af5..e46e638a3bb 100644 --- a/docs/user/aws/limits.md +++ b/docs/user/aws/limits.md @@ -42,7 +42,7 @@ For multiple clusters, a higher limit will likely be required (and will certainl ### Example: Using North Virginia (us-east-1) -North Virginia (us-east-1) has six availablity zones, so a higher limit is required unless you configure your cluster to use fewer zones. +North Virginia (us-east-1) has six availability zones, so a higher limit is required unless you configure your cluster to use fewer zones. To support the default, all-zone installation, please submit a limit increase for VPC Elastic IPs similar to the following in the support dashboard (to create more than one cluster, a higher limit will be necessary): ![Increase Elastic IP limit in AWS](images/support_increase_elastic_ip.png) diff --git a/docs/user/azure/install_upi.md b/docs/user/azure/install_upi.md index 9371b9721c4..f4082f57252 100644 --- a/docs/user/azure/install_upi.md +++ b/docs/user/azure/install_upi.md @@ -12,7 +12,7 @@ example. * the following binaries installed and in $PATH: * [openshift-install][openshiftinstall] * It is recommended that the OpenShift installer CLI version is the same of the cluster being deployed. The version used in this example is 4.3.0 GA. - * [az (Azure CLI)][azurecli] installed and aunthenticated + * [az (Azure CLI)][azurecli] installed and authenticated * Commands flags and structure may vary between `az` versions. The recommended version used in this example is 2.0.80. * python3 * [jq][jqjson] @@ -455,7 +455,7 @@ csr-wpvxq 19m system:serviceaccount:openshift-machine-config-operator:node- csr-xpp49 19m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued ``` -You should inspect each pending CSR with the `oc describe csr ` command and verify that it comes from a node you recognise. If it does, they can be approved: +You should inspect each pending CSR with the `oc describe csr ` command and verify that it comes from a node you recognize. If it does, they can be approved: ```console $ oc adm certificate approve csr-8bppf csr-dj2w4 csr-ph8s8 diff --git a/docs/user/azure/limits.md b/docs/user/azure/limits.md index eb9ec64ead8..2d8435223f5 100644 --- a/docs/user/azure/limits.md +++ b/docs/user/azure/limits.md @@ -32,7 +32,7 @@ A public IP address is also created for the bootstrap machine during installatio ## Network Security Groups Each cluster creates network security groups for every subnet within the VNet. The default install creates network -security groups for the control plane and for the compuete node subnets. The default limit of 5000 for new accounts +security groups for the control plane and for the compute node subnets. The default limit of 5000 for new accounts allows for many clusters to be created. The network security groups which exist after the default install are: 1. controlplane @@ -94,13 +94,13 @@ By default, each cluster will create 3 network load balancers. The default limit 3. external * Public IP address that load balances requests to port 6443 across control-plane nodes -Additional Kuberntes LoadBalancer Service objects will create additional [load balancers][load-balancing]. +Additional Kubernetes LoadBalancer Service objects will create additional [load balancers][load-balancing]. ## Increasing limits -To increase a limit beyond the maximum, a suppport request will need to be filed. +To increase a limit beyond the maximum, a support request will need to be filed. First, click on "help + support". It is located on the bottom left menu. diff --git a/docs/user/customization.md b/docs/user/customization.md index 08729d2629b..fec93d2c080 100644 --- a/docs/user/customization.md +++ b/docs/user/customization.md @@ -39,7 +39,7 @@ The following `install-config.yaml` properties are available: The default is 10.128.0.0/14 with a host prefix of /23. * `cidr` (required [IP network](#ip-networks)): The IP block address pool. * `hostPrefix` (required integer): The prefix size to allocate to each node from the CIDR. - For example, 24 would allocate 2^8=256 adresses to each node. If this field is not used by the plugin, it can be left unset. + For example, 24 would allocate 2^8=256 addresses to each node. If this field is not used by the plugin, it can be left unset. * `machineNetwork` (optional array of objects): The IP address pools for machines. * `cidr` (required [IP network](#ip-networks)): The IP block address pool. The default is 10.0.0.0/16 for all platforms other than libvirt. @@ -72,7 +72,7 @@ For example, 10.0.0.0/16 represents IP addresses 10.0.0.0 through 10.0.255.255. The following machine-pool properties are available: -* `architecture` (optional string): Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. +* `architecture` (optional string): Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default). * `hyperthreading` (optional string): Determines the mode of hyperthreading that machines in the pool will utilize. Valid values are `Enabled` (the default) and `Disabled`. diff --git a/docs/user/gcp/limits.md b/docs/user/gcp/limits.md index 5066b585a80..5db51f9aceb 100644 --- a/docs/user/gcp/limits.md +++ b/docs/user/gcp/limits.md @@ -55,7 +55,7 @@ A standard OpenShift installation creates 2 forwarding rules. A standard OpenShift installation creates 3 in-use global IP addresses. ### Networks -A standard OpenShift instlalation creates 2 networks. +A standard OpenShift installation creates 2 networks. ### Routers A standard OpenShift installation creates 1 router. diff --git a/docs/user/metal/README.md b/docs/user/metal/README.md index 04984f0741d..5fe576ad6dd 100644 --- a/docs/user/metal/README.md +++ b/docs/user/metal/README.md @@ -2,7 +2,7 @@ OpenShift has support for bare metal deployments with either [User provided infrastructure (UPI)](install_upi.md), or [Installer-provided -instrastructure (IPI)](install_ipi.md). +infrastructure (IPI)](install_ipi.md). The following is a summary of key differences: diff --git a/docs/user/metal/customization_ipi.md b/docs/user/metal/customization_ipi.md index fe7abecf6b5..4a620573293 100644 --- a/docs/user/metal/customization_ipi.md +++ b/docs/user/metal/customization_ipi.md @@ -57,7 +57,7 @@ and TFTP server in the cluster to support provisioning. Much of this can be customized. -* `provisioningNetorkCIDR` (optional string): Override the default provisioning network. +* `provisioningNetworkCIDR` (optional string): Override the default provisioning network. * `bootstrapProvisioningIP` (optional string): Override the bootstrap provisioning IP. If unspecified, uses the 2nd address in the provisioning network's subnet. diff --git a/docs/user/openstack/README.md b/docs/user/openstack/README.md index 2f671fab9d3..88fc1ca899a 100644 --- a/docs/user/openstack/README.md +++ b/docs/user/openstack/README.md @@ -36,9 +36,15 @@ In addition, it covers the installation with the default CNI (OpenShiftSDN), as - [Destroying The Cluster](#destroying-the-cluster) - [Post Install Operations](#post-install-operations) - [Adding a MachineSet](#adding-a-machineset) + - [Defining a MachineSet That Uses Multiple Networks](#defining-a-machineset-that-uses-multiple-networks) - [Using a Server Group](#using-a-server-group) - [Setting Nova Availability Zones](#setting-nova-availability-zones) - [Using a Custom External Load Balancer](#using-a-custom-external-load-balancer) + - [External Facing OpenShift Services](#external-facing-openshift-services) + - [HAProxy Example Load Balancer Config](#haproxy-example-load-balancer-config) + - [DNS Lookups](#dns-lookups) + - [Verifying that the API is Reachable](#verifying-that-the-api-is-reachable) + - [Verifying that Apps Reachable](#verifying-that-apps-reachable) - [Reconfiguring cloud provider](#reconfiguring-cloud-provider) - [Modifying cloud provider options](#modifying-cloud-provider-options) - [Refreshing a CA Certificate](#refreshing-a-ca-certificate) diff --git a/docs/user/openstack/install_upi.md b/docs/user/openstack/install_upi.md index 62e0d501b86..570eb051a31 100644 --- a/docs/user/openstack/install_upi.md +++ b/docs/user/openstack/install_upi.md @@ -63,7 +63,7 @@ of this method of installation. ## Prerequisites -The file `inventory.yaml` contains the variables most likely to need customisation. +The file `inventory.yaml` contains the variables most likely to need customization. **NOTE**: some of the default pods (e.g. the `openshift-router`) require at least two nodes so that is the effective minimum. The requirements for UPI are broadly similar to the [ones for OpenStack IPI][ipi-reqs]: @@ -580,7 +580,7 @@ Possible choices include: * Swift (see Example 1 below); * Glance (see Example 2 below); * Amazon S3; -* Internal web server inside your organisation; +* Internal web server inside your organization; * A throwaway Nova server in `$INFRA_ID-nodes` hosting a static web server exposing the file. In this guide, we will assume the file is at the following URL: @@ -932,7 +932,7 @@ csr-lrtlk 15m system:serviceaccount:openshift-machine-config-operator:node- csr-wkm94 16m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued ``` -You should inspect each pending CSR and verify that it comes from a node you recognise: +You should inspect each pending CSR and verify that it comes from a node you recognize: ```sh $ oc describe csr csr-88jp8 diff --git a/docs/user/openstack/privileges.md b/docs/user/openstack/privileges.md index ed1a4a73abb..a634a837c5b 100644 --- a/docs/user/openstack/privileges.md +++ b/docs/user/openstack/privileges.md @@ -1,6 +1,6 @@ # Required Privileges -In order to succesfully deploy an OpenShift cluster on OpenStack, the user passed to the installer needs a particular set of permissions in a given project. Our recommendation is to create a user in the project that you intend to install your cluster onto with the role *member*. In the event that you want to customize the permissions for a more restricted install, the following use cases can accomodate them. +In order to successfully deploy an OpenShift cluster on OpenStack, the user passed to the installer needs a particular set of permissions in a given project. Our recommendation is to create a user in the project that you intend to install your cluster onto with the role *member*. In the event that you want to customize the permissions for a more restricted install, the following use cases can accommodate them. ## Bring Your Own Networks diff --git a/docs/user/ovirt/install_upi.md b/docs/user/ovirt/install_upi.md index 4b8ccf1804e..57f02d81445 100644 --- a/docs/user/ovirt/install_upi.md +++ b/docs/user/ovirt/install_upi.md @@ -160,7 +160,7 @@ the URL related to the `OpenStack` qcow2 image type, like in the example below https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/pre-release/4.6.0-0.nightly-2020-07-16-122837/rhcos-4.6.0-0.nightly-2020-07-16-122837-x86_64-openstack.x86_64.qcow2.gz ``` -The version of the image should be choosen according to the OpenShift version you're about to install (in general less than or equal to the OCP +The version of the image should be chosen according to the OpenShift version you're about to install (in general less than or equal to the OCP version). Once you have the URL set in the [inventory.yml](../../../upi/ovirt/inventory.yml) a dedicated Ansible playbook will be in charge to download the `qcow2.gz` file, uncompress it in a specified folder and use it to create oVirt/RHV templates. @@ -478,7 +478,7 @@ parameters needed to reach the oVirt/RHV engine and use its REST API. **NOTE:** Some of the parameters added during the `openshift-install` workflow, in particular the `Internal API virtual IP` and `Ingress virtual IP`, will not be used because already configured in your infrastructure DNS (see [DNS](#dns) section). -Other paramenters like `oVirt cluster`, `oVirt storage`, `oVirt network`, will be used as specified in the [inventory.yml](../../../upi/ovirt/inventory.yml) +Other parameters like `oVirt cluster`, `oVirt storage`, `oVirt network`, will be used as specified in the [inventory.yml](../../../upi/ovirt/inventory.yml) and removed from the `install-config.yaml` with the previously mentioned `virtual IPs`, using a script reported in a [section below](#set-platform-to-none). @@ -612,7 +612,7 @@ The `infraID` will be used by the UPI Ansible playbooks as prefix for the VMs cr process avoiding name clashes in case of multiple installations in the same oVirt/RHV cluster. **Note:** certificates contained into ignition config files expire after 24 hours. You must complete the cluster installation -and keep the cluster running for 24 hours in a non-degradated state to ensure that the first certificate rotation has finished. +and keep the cluster running for 24 hours in a non-degraded state to ensure that the first certificate rotation has finished. ## Create templates and VMs diff --git a/docs/user/troubleshootingbootstrap.md b/docs/user/troubleshootingbootstrap.md index 79fef855636..9a8917528d0 100644 --- a/docs/user/troubleshootingbootstrap.md +++ b/docs/user/troubleshootingbootstrap.md @@ -97,7 +97,7 @@ This directory contains all the operators or their operands running on the boots For each container the directory has two files, * `.log`, which contains the log of the container. -* `.inspect`, which containts the information about the container like the image, volume mounts, arguments etc. +* `.inspect`, which contains the information about the container like the image, volume mounts, arguments etc. #### directory: bootstrap/journals @@ -107,7 +107,7 @@ The journals directory contains the logs for *important* systemd units. These un * `crio-configure.log` and `crio.log`, these units are responsible for configuring the CRI-O on the bootstrap host and CRI-O daemon respectively. * `kubelet.log`, the kubelet service is responsible for running the kubelet on the bootstrap host. The kubelet on the bootstrap host is responsible for running the static pods for etcd, bootstrap-kube-controlplane and various other operators in bootstrap mode. * `approve-csr.log`, the approve-csr unit is responsible for allowing control-plane machines to join OpenShift cluster. This unit performs the job of in-cluster approver while the bootstrapping is in progress. -* `bootkube.log`, the bootkube service is the unit that performs the bootstrapping of OpenShift clusters using all the operators. This service is respnsible for running all the required steps to bootstrap the API and then wait for success. +* `bootkube.log`, the bootkube service is the unit that performs the bootstrapping of OpenShift clusters using all the operators. This service is responsible for running all the required steps to bootstrap the API and then wait for success. There might also be other services that are important for some platforms like OpenStack, that will have logs in this directory. @@ -118,7 +118,7 @@ The pods directory contains the information and logs from all the render command For each container the directory has two files, * `.log`, which contains the log of the container. -* `.inspect`, which containts the information about the container like the image, volume mounts, arguments etc. +* `.inspect`, which contains the information about the container like the image, volume mounts, arguments etc. ### directory: resources @@ -216,4 +216,4 @@ control-plane 3 directories, 0 files ``` -The troubleshooting would require the logs of the installer gathering the log bundle, which are easily availble in `.openshift_install.log`. +The troubleshooting would require the logs of the installer gathering the log bundle, which are easily available in `.openshift_install.log`. diff --git a/docs/user/vsphere/install_upi.md b/docs/user/vsphere/install_upi.md index dd621a37ac3..3567eb75eea 100644 --- a/docs/user/vsphere/install_upi.md +++ b/docs/user/vsphere/install_upi.md @@ -284,7 +284,7 @@ The Ignition config created by the OpenShift Installer cannot be used directly b The hostname of each control plane and worker machine must be resolvable from all nodes within the cluster. -Preferrably, the hostname and IP address will be set via DHCP. +Preferably, the hostname and IP address will be set via DHCP. If you need to manually set a hostname and/or configure a static IP address, you can pass a custom networking command-line `ip=` parameter to Afterburn for configuration. In order to do so, set the vApp property `guestinfo.afterburn.initrd.network-kargs` to the `ip` parameter using this format: `ip=:::::::`, e.g. `ip=10.0.0.2::10.0.0.2:255.255.255.0:compute-1:ens192:none:8.8.8.8` diff --git a/docs/user/vsphere/vips-dns.md b/docs/user/vsphere/vips-dns.md index 1b6c576beae..3900f3416ff 100644 --- a/docs/user/vsphere/vips-dns.md +++ b/docs/user/vsphere/vips-dns.md @@ -1,4 +1,4 @@ -# IP Adresses +# IP Addresses An installer-provisioned vSphere installation requires two static IP addresses: From 2c0145ef40a2d6614fffd5c69c1d74637bcec016 Mon Sep 17 00:00:00 2001 From: Andrea Fasano Date: Wed, 15 Sep 2021 04:09:39 -0400 Subject: [PATCH 06/10] baremetal: improve host role management during assets creation The logic for generating baremetal manifests and Terraform variables now takes into account the host role tag properly, if defined. This allows to define master/worker hosts in any order. To remain backward compatible, if an host does not have any role defined (or supported value), it's considered eligible for the ControlPlane or Compute respectively, based on the configured requirements. The validation rule has been enhanced to verify that the correct number of hosts have been configured. --- data/data/baremetal/main.tf | 2 +- data/data/baremetal/masters/main.tf | 16 +- data/data/baremetal/masters/variables.tf | 12 +- data/data/baremetal/variables-baremetal.tf | 12 +- pkg/asset/cluster/tfvars.go | 1 + .../ignition/bootstrap/baremetal/template.go | 2 +- pkg/asset/machines/baremetal/hosts.go | 124 ++-- pkg/asset/machines/baremetal/hosts_test.go | 602 ++++++++++++------ pkg/tfvars/baremetal/baremetal.go | 32 +- pkg/tfvars/baremetal/baremetal_test.go | 174 +++++ pkg/types/baremetal/platform.go | 10 + pkg/types/baremetal/validation/platform.go | 50 +- .../baremetal/validation/platform_test.go | 164 ++++- pkg/types/validation/installconfig_test.go | 2 + 14 files changed, 899 insertions(+), 304 deletions(-) create mode 100644 pkg/tfvars/baremetal/baremetal_test.go diff --git a/data/data/baremetal/main.tf b/data/data/baremetal/main.tf index 1c5c634e331..bdfe4127911 100644 --- a/data/data/baremetal/main.tf +++ b/data/data/baremetal/main.tf @@ -28,7 +28,7 @@ module "masters" { master_count = var.master_count ignition = var.ignition_master - hosts = var.hosts + masters = var.masters properties = var.properties root_devices = var.root_devices driver_infos = var.driver_infos diff --git a/data/data/baremetal/masters/main.tf b/data/data/baremetal/masters/main.tf index f30d93a565e..26439f7ca7c 100644 --- a/data/data/baremetal/masters/main.tf +++ b/data/data/baremetal/masters/main.tf @@ -1,6 +1,6 @@ resource "ironic_node_v1" "openshift-master-host" { count = var.master_count - name = var.hosts[count.index]["name"] + name = var.masters[count.index]["name"] resource_class = "baremetal" inspect = true @@ -9,7 +9,7 @@ resource "ironic_node_v1" "openshift-master-host" { ports = [ { - address = var.hosts[count.index]["port_address"] + address = var.masters[count.index]["port_address"] pxe_enabled = "true" }, ] @@ -17,14 +17,14 @@ resource "ironic_node_v1" "openshift-master-host" { properties = var.properties[count.index] root_device = var.root_devices[count.index] - driver = var.hosts[count.index]["driver"] + driver = var.masters[count.index]["driver"] driver_info = var.driver_infos[count.index] - boot_interface = var.hosts[count.index]["boot_interface"] - management_interface = var.hosts[count.index]["management_interface"] - power_interface = var.hosts[count.index]["power_interface"] - raid_interface = var.hosts[count.index]["raid_interface"] - vendor_interface = var.hosts[count.index]["vendor_interface"] + boot_interface = var.masters[count.index]["boot_interface"] + management_interface = var.masters[count.index]["management_interface"] + power_interface = var.masters[count.index]["power_interface"] + raid_interface = var.masters[count.index]["raid_interface"] + vendor_interface = var.masters[count.index]["vendor_interface"] } resource "ironic_deployment" "openshift-master-deployment" { diff --git a/data/data/baremetal/masters/variables.tf b/data/data/baremetal/masters/variables.tf index b6f39fec3d5..7475593911e 100644 --- a/data/data/baremetal/masters/variables.tf +++ b/data/data/baremetal/masters/variables.tf @@ -9,27 +9,27 @@ variable "ignition" { description = "The content of the master ignition file" } -variable "hosts" { +variable "masters" { type = list(map(string)) - description = "Hardware details for hosts" + description = "Hardware details for masters" } variable "properties" { type = list(map(string)) - description = "Properties for hosts" + description = "Properties for masters" } variable "root_devices" { type = list(map(string)) - description = "Root devices for hosts" + description = "Root devices for masters" } variable "driver_infos" { type = list(map(string)) - description = "BMC information for hosts" + description = "BMC information for masters" } variable "instance_infos" { type = list(map(string)) - description = "Instance information for hosts" + description = "Instance information for masters" } diff --git a/data/data/baremetal/variables-baremetal.tf b/data/data/baremetal/variables-baremetal.tf index 8203223ba13..b62ae3bc655 100644 --- a/data/data/baremetal/variables-baremetal.tf +++ b/data/data/baremetal/variables-baremetal.tf @@ -28,9 +28,9 @@ variable "ironic_password" { description = "Password for authentication to Ironic" } -variable "hosts" { +variable "masters" { type = list(map(string)) - description = "Hardware details for hosts" + description = "Hardware details for masters" } variable "bridges" { @@ -40,20 +40,20 @@ variable "bridges" { variable "properties" { type = list(map(string)) - description = "Properties for hosts" + description = "Properties for masters" } variable "root_devices" { type = list(map(string)) - description = "Root devices for hosts" + description = "Root devices for masters" } variable "driver_infos" { type = list(map(string)) - description = "BMC information for hosts" + description = "BMC information for masters" } variable "instance_infos" { type = list(map(string)) - description = "Instance information for hosts" + description = "Instance information for masters" } diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 6bc5f7edc62..b89d4a7a2a2 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -525,6 +525,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { } data, err = baremetaltfvars.TFVars( + *installConfig.Config.ControlPlane.Replicas, installConfig.Config.Platform.BareMetal.LibvirtURI, installConfig.Config.Platform.BareMetal.APIVIP, imageCacheIP, diff --git a/pkg/asset/ignition/bootstrap/baremetal/template.go b/pkg/asset/ignition/bootstrap/baremetal/template.go index 69cef1c333b..fe8a07fbb66 100644 --- a/pkg/asset/ignition/bootstrap/baremetal/template.go +++ b/pkg/asset/ignition/bootstrap/baremetal/template.go @@ -74,7 +74,7 @@ func GetTemplateData(config *baremetal.Platform, networks []types.MachineNetwork var dhcpAllowList []string for _, host := range config.Hosts { - if host.Role == "master" { + if host.IsMaster() { dhcpAllowList = append(dhcpAllowList, host.BootMACAddress) } } diff --git a/pkg/asset/machines/baremetal/hosts.go b/pkg/asset/machines/baremetal/hosts.go index 8a6be420cbe..f92a7d6cd4f 100644 --- a/pkg/asset/machines/baremetal/hosts.go +++ b/pkg/asset/machines/baremetal/hosts.go @@ -12,6 +12,7 @@ import ( baremetalhost "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/baremetal" ) // HostSettings hold the information needed to build the manifests to @@ -25,6 +26,64 @@ type HostSettings struct { Secrets []corev1.Secret } +func createSecret(host *baremetal.Host) (*corev1.Secret, baremetalhost.BMCDetails) { + bmc := baremetalhost.BMCDetails{} + if host.BMC.Username != "" && host.BMC.Password != "" { + // Each host needs a secret to hold the credentials for + // communicating with the management controller that drives + // that host. + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-bmc-secret", host.Name), + Namespace: "openshift-machine-api", + }, + Data: map[string][]byte{ + "username": []byte(host.BMC.Username), + "password": []byte(host.BMC.Password), + }, + } + bmc.Address = host.BMC.Address + bmc.CredentialsName = secret.Name + bmc.DisableCertificateVerification = host.BMC.DisableCertificateVerification + + return secret, bmc + } + return nil, bmc +} + +func createBaremetalHost(host *baremetal.Host, bmc baremetalhost.BMCDetails) baremetalhost.BareMetalHost { + + // Map string 'default' to hardware.DefaultProfileName + if host.HardwareProfile == "default" { + host.HardwareProfile = hardware.DefaultProfileName + } + + newHost := baremetalhost.BareMetalHost{ + TypeMeta: metav1.TypeMeta{ + APIVersion: baremetalhost.GroupVersion.String(), + Kind: "BareMetalHost", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: host.Name, + Namespace: "openshift-machine-api", + }, + Spec: baremetalhost.BareMetalHostSpec{ + Online: true, + BMC: bmc, + BootMACAddress: host.BootMACAddress, + HardwareProfile: host.HardwareProfile, + BootMode: baremetalhost.BootMode(host.BootMode), + RootDeviceHints: host.RootDeviceHints.MakeCRDHints(), + }, + } + + return newHost +} + // Hosts returns the HostSettings with details of the hardware being // used to construct the cluster. func Hosts(config *types.InstallConfig, machines []machineapi.Machine) (*HostSettings, error) { @@ -34,68 +93,27 @@ func Hosts(config *types.InstallConfig, machines []machineapi.Machine) (*HostSet return nil, fmt.Errorf("no baremetal platform in configuration") } - for i, host := range config.Platform.BareMetal.Hosts { - bmc := baremetalhost.BMCDetails{} - if host.BMC.Username != "" && host.BMC.Password != "" { - // Each host needs a secret to hold the credentials for - // communicating with the management controller that drives - // that host. - secret := corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Secret", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-bmc-secret", host.Name), - Namespace: "openshift-machine-api", - }, - Data: map[string][]byte{ - "username": []byte(host.BMC.Username), - "password": []byte(host.BMC.Password), - }, - } - bmc.Address = host.BMC.Address - bmc.CredentialsName = secret.Name - bmc.DisableCertificateVerification = host.BMC.DisableCertificateVerification - settings.Secrets = append(settings.Secrets, secret) - } + numRequiredMasters := len(machines) + numMasters := 0 + for _, host := range config.Platform.BareMetal.Hosts { - // Map string 'default' to hardware.DefaultProfileName - if host.HardwareProfile == "default" { - host.HardwareProfile = hardware.DefaultProfileName + secret, bmc := createSecret(host) + if secret != nil { + settings.Secrets = append(settings.Secrets, *secret) } + newHost := createBaremetalHost(host, bmc) - newHost := baremetalhost.BareMetalHost{ - TypeMeta: metav1.TypeMeta{ - APIVersion: baremetalhost.GroupVersion.String(), - Kind: "BareMetalHost", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: host.Name, - Namespace: "openshift-machine-api", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: bmc, - BootMACAddress: host.BootMACAddress, - HardwareProfile: host.HardwareProfile, - BootMode: baremetalhost.BootMode(host.BootMode), - RootDeviceHints: host.RootDeviceHints.MakeCRDHints(), - }, - } - if i < len(machines) { + if !host.IsWorker() && numMasters < numRequiredMasters { // Setting ExternallyProvisioned to true and adding a // ConsumerRef without setting Image associates the host // with a machine without triggering provisioning. We only - // want to do that for control plane hosts. We assume the - // first known hosts are the control plane and that the - // hosts are in the same order as the control plane - // machines. + // want to do that for control plane hosts. newHost.Spec.ExternallyProvisioned = true // Pause reconciliation until we can annotate with the initial // status containing the HardwareDetails newHost.ObjectMeta.Annotations = map[string]string{"baremetalhost.metal3.io/paused": ""} - machine := machines[i] + // Link the new host to the currently available machine + machine := machines[numMasters] newHost.Spec.ConsumerRef = &corev1.ObjectReference{ APIVersion: machine.TypeMeta.APIVersion, Kind: machine.TypeMeta.Kind, @@ -103,7 +121,9 @@ func Hosts(config *types.InstallConfig, machines []machineapi.Machine) (*HostSet Name: machine.ObjectMeta.Name, } newHost.Spec.Online = true + numMasters++ } + settings.Hosts = append(settings.Hosts, newHost) } diff --git a/pkg/asset/machines/baremetal/hosts_test.go b/pkg/asset/machines/baremetal/hosts_test.go index 27b42efed76..73c8049114c 100644 --- a/pkg/asset/machines/baremetal/hosts_test.go +++ b/pkg/asset/machines/baremetal/hosts_test.go @@ -1,10 +1,10 @@ package baremetal import ( - "fmt" "testing" machineapi "github.com/openshift/api/machine/v1beta1" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -14,240 +14,422 @@ import ( baremetalhost "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" ) -func makeConfig(nHosts int) *types.InstallConfig { - config := &types.InstallConfig{ - Platform: types.Platform{ - BareMetal: &baremetaltypes.Platform{}, +func TestHosts(t *testing.T) { + testCases := []struct { + Scenario string + Machines []machineapi.Machine + Config *types.InstallConfig + ExpectedSecrets []corev1.Secret + ExpectedHosts []baremetalhost.BareMetalHost + ExpectedError string + ExpectedSetting *HostSettings + }{ + { + Scenario: "no-platform", + Config: &types.InstallConfig{ + Platform: types.Platform{ + BareMetal: nil, + }, + }, + + ExpectedError: "no baremetal platform in configuration", + }, + { + Scenario: "no-hosts", + Config: config().build(), + + ExpectedSetting: settings().build(), + }, + { + Scenario: "default", + Machines: machines(machine("machine-0")), + Config: configHosts(hostType("master-0").bmc("usr0", "pwd0").role("master")), + + ExpectedSetting: settings(). + secrets(secret("master-0-bmc-secret").data("usr0", "pwd0")). + hosts(host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), + }, + { + Scenario: "default-norole", + Machines: machines(machine("machine-0")), + Config: configHosts(hostType("master-0").bmc("usr0", "pwd0")), + + ExpectedSetting: settings(). + secrets(secret("master-0-bmc-secret").data("usr0", "pwd0")). + hosts(host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), + }, + { + Scenario: "3-hosts-3-machines", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-1").bmc("usr1", "pwd1"), + hostType("master-2").bmc("usr2", "pwd2")), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2")). + hosts( + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), + }, + { + Scenario: "4-hosts-3-machines", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("master-2").bmc("usr2", "pwd2").role("master"), + hostType("master-3").bmc("usr3", "pwd3").role("worker")), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2"), + secret("master-3-bmc-secret").data("usr3", "pwd3")). + hosts( + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-3")).build(), + }, + { + Scenario: "4-hosts-3-machines-norole", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-1").bmc("usr1", "pwd1"), + hostType("master-2").bmc("usr2", "pwd2"), + hostType("master-3").bmc("usr3", "pwd3")), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2"), + secret("master-3-bmc-secret").data("usr3", "pwd3")). + hosts( + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-3")).build(), + }, + { + Scenario: "5-hosts-3-machines-mixed", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("worker-0").bmc("wrk0", "pwd0").role("worker"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("worker-1").bmc("wrk1", "pwd1").role("worker"), + hostType("master-2").bmc("usr2", "pwd2").role("master")), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("worker-0-bmc-secret").data("wrk0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("worker-1-bmc-secret").data("wrk1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2")). + hosts( + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("worker-0"), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("worker-1"), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), + }, + { + Scenario: "5-hosts-3-machines-mixed-norole", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-0").bmc("usr0", "pwd0"), + hostType("worker-0").bmc("wrk0", "pwd0").role("worker"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("worker-1").bmc("wrk1", "pwd1").role("worker"), + hostType("master-2").bmc("usr2", "pwd2")), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("worker-0-bmc-secret").data("wrk0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("worker-1-bmc-secret").data("wrk1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2")). + hosts( + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("worker-0"), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("worker-1"), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), + }, + { + Scenario: "3-hosts-mixed", + Machines: machines( + machine("machine-0")), + Config: configHosts( + hostType("server-0").bmc("usr0", "pwd0"), + hostType("server-1").bmc("usr1", "pwd1"), + hostType("server-2").bmc("usr2", "pwd2").role("master")), + + ExpectedSetting: settings(). + secrets( + secret("server-0-bmc-secret").data("usr0", "pwd0"), + secret("server-1-bmc-secret").data("usr1", "pwd1"), + secret("server-2-bmc-secret").data("usr2", "pwd2")). + hosts( + host("server-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("server-1"), + host("server-2")).build(), }, } - config.Platform.BareMetal.Hosts = make([]*baremetaltypes.Host, nHosts) - for i := 0; i < nHosts; i++ { - config.Platform.BareMetal.Hosts[i] = &baremetaltypes.Host{ - Name: fmt.Sprintf("host%d", i), - BMC: baremetaltypes.BMC{ - Username: fmt.Sprintf("user%d", i), - Password: fmt.Sprintf("password%d", i), - }, - } + for _, tc := range testCases { + t.Run(tc.Scenario, func(t *testing.T) { + settings, err := Hosts(tc.Config, tc.Machines) + + if tc.ExpectedError != "" { + assert.EqualError(t, err, tc.ExpectedError) + } + + if tc.ExpectedSetting != nil { + assert.Equal(t, tc.ExpectedSetting, settings) + } + }) } +} + +func configHosts(builders ...*hostTypeBuilder) *types.InstallConfig { + return config().hosts(builders...).build() +} - return config +type installConfigBuilder struct { + types.InstallConfig } -func makeMachines(n int) []machineapi.Machine { - results := []machineapi.Machine{} +func (ib *installConfigBuilder) build() *types.InstallConfig { + return &ib.InstallConfig +} - for i := 0; i < n; i++ { - results = append(results, machineapi.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("machine%d", i), +func config() *installConfigBuilder { + return &installConfigBuilder{ + types.InstallConfig{ + Platform: types.Platform{ + BareMetal: &baremetaltypes.Platform{}, }, - }) + }, } +} - return results +func (ib *installConfigBuilder) hosts(builders ...*hostTypeBuilder) *installConfigBuilder { + ib.Platform.BareMetal.Hosts = []*baremetaltypes.Host{} + + for _, hb := range builders { + ib.Platform.BareMetal.Hosts = append(ib.Platform.BareMetal.Hosts, hb.build()) + } + return ib } -func TestHosts(t *testing.T) { - testCases := []struct { - Scenario string - Machines []machineapi.Machine - Config *types.InstallConfig - Expected HostSettings - ExpectError bool - }{ - { - Scenario: "no platform", - Config: &types.InstallConfig{}, - ExpectError: true, +type hostTypeBuilder struct { + baremetaltypes.Host +} + +func (htb *hostTypeBuilder) build() *baremetaltypes.Host { + return &htb.Host +} + +func hostType(name string) *hostTypeBuilder { + return &hostTypeBuilder{ + Host: baremetaltypes.Host{ + Name: name, + BootMACAddress: "c0:ff:ee:ca:fe:00", + BootMode: baremetaltypes.UEFI, + RootDeviceHints: &baremetaltypes.RootDeviceHints{ + DeviceName: "userd_devicename", + }, }, + } +} - { - Scenario: "no hosts", - Config: makeConfig(0), - Expected: HostSettings{}, +func (htb *hostTypeBuilder) role(role string) *hostTypeBuilder { + htb.Role = role + return htb +} + +func (htb *hostTypeBuilder) bmc(user, password string) *hostTypeBuilder { + htb.BMC = baremetaltypes.BMC{ + Username: user, + Password: password, + } + return htb +} + +type machineBuilder struct { + machineapi.Machine +} + +func (mb *machineBuilder) build() *machineapi.Machine { + return &mb.Machine +} + +func machine(name string) *machineBuilder { + return &machineBuilder{ + machineapi.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "namespace", + }, }, + } +} - { - Scenario: "3 hosts and machines", - Config: makeConfig(3), - Machines: makeMachines(3), - Expected: HostSettings{ - Hosts: []baremetalhost.BareMetalHost{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host0", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host0-bmc-secret", - }, - ConsumerRef: &corev1.ObjectReference{ - Name: "machine0", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host1", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host1-bmc-secret", - }, - ConsumerRef: &corev1.ObjectReference{ - Name: "machine1", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host2", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host2-bmc-secret", - }, - ConsumerRef: &corev1.ObjectReference{ - Name: "machine2", - }, - }, - }, +func machines(builders ...*machineBuilder) []machineapi.Machine { + m := []machineapi.Machine{} + + for _, mb := range builders { + m = append(m, *mb.build()) + } + return m +} + +type hostBuilder struct { + baremetalhost.BareMetalHost +} + +func host(name string) *hostBuilder { + return &hostBuilder{ + baremetalhost.BareMetalHost{ + + TypeMeta: metav1.TypeMeta{ + APIVersion: "metal3.io/v1alpha1", + Kind: "BareMetalHost", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "openshift-machine-api", + }, + Spec: baremetalhost.BareMetalHostSpec{ + BMC: baremetalhost.BMCDetails{ + CredentialsName: name + "-bmc-secret", + }, + RootDeviceHints: &baremetalhost.RootDeviceHints{ + DeviceName: "userd_devicename", }, + BootMode: "UEFI", + BootMACAddress: "c0:ff:ee:ca:fe:00", + Online: true, }, }, + } +} - { - Scenario: "4 hosts and 3 machines", - Config: makeConfig(4), - Machines: makeMachines(3), - Expected: HostSettings{ - Hosts: []baremetalhost.BareMetalHost{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host0", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host0-bmc-secret", - }, - ConsumerRef: &corev1.ObjectReference{ - Name: "machine0", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host1", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host1-bmc-secret", - }, - ConsumerRef: &corev1.ObjectReference{ - Name: "machine1", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host2", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host2-bmc-secret", - }, - ConsumerRef: &corev1.ObjectReference{ - Name: "machine2", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "host3", - }, - Spec: baremetalhost.BareMetalHostSpec{ - Online: true, - BMC: baremetalhost.BMCDetails{ - CredentialsName: "host3-bmc-secret", - }, - }, - }, - }, +func (hb *hostBuilder) build() *baremetalhost.BareMetalHost { + return &hb.BareMetalHost +} + +func (hb *hostBuilder) externallyProvisioned() *hostBuilder { + hb.Spec.ExternallyProvisioned = true + return hb +} + +func (hb *hostBuilder) annotation(key, value string) *hostBuilder { + if hb.Annotations == nil { + hb.Annotations = map[string]string{} + } + + hb.Annotations[key] = value + return hb +} + +func (hb *hostBuilder) consumerRef(name string) *hostBuilder { + hb.Spec.ConsumerRef = &corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Machine", + Namespace: "namespace", + Name: name, + } + return hb +} + +type secretBuilder struct { + corev1.Secret +} + +func secret(name string) *secretBuilder { + return &secretBuilder{ + corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "openshift-machine-api", }, }, } +} - for _, tc := range testCases { - t.Run(tc.Scenario, func(t *testing.T) { - actual, err := Hosts(tc.Config, tc.Machines) - - if tc.ExpectError { - if err == nil { - t.Error("expected error but did not get one") - } - } else { - - if err != nil { - t.Error("did not expect error but got one") - return - } - - if len(tc.Expected.Hosts) != len(actual.Hosts) { - t.Errorf("Expected %d hosts but received %d (%v)", - len(tc.Expected.Hosts), - len(actual.Hosts), - actual.Hosts, - ) - return - } - - if len(tc.Expected.Hosts) != len(actual.Secrets) { - t.Errorf("Expected %d secrets to go with hosts, got %d", - len(tc.Expected.Hosts), len(actual.Secrets)) - return - } - - // Make sure the first few hosts that correspond to - // machines have their consumer ref set correctly - for i := 0; i < len(tc.Machines); i++ { - if actual.Hosts[i].Spec.ConsumerRef.Name != tc.Expected.Hosts[i].Spec.ConsumerRef.Name { - t.Errorf("Expected host %d to link to %q but got %q", - i, - tc.Expected.Hosts[i].Spec.ConsumerRef.Name, - actual.Hosts[i].Spec.ConsumerRef.Name, - ) - } - } - - // Make sure any extra hosts do not have a consumer set. - for i := len(actual.Hosts) - 1; i > len(tc.Machines); i-- { - if actual.Hosts[i].Spec.ConsumerRef != nil { - t.Errorf("Expected host %d to have no consumer but has %v", - i, actual.Hosts[i].Spec.ConsumerRef, - ) - } - } - - for i, sec := range actual.Secrets { - expectedName := fmt.Sprintf("host%d-bmc-secret", i) - if sec.Name != expectedName { - t.Errorf("Expected secret name %d to be %q but got %q", - i, - expectedName, - sec.Name, - ) - } - } +func (sb *secretBuilder) data(user, password string) *secretBuilder { + sb.Data = map[string][]byte{ + "username": []byte(user), + "password": []byte(password), + } + return sb +} - } - }) +func (sb *secretBuilder) build() *corev1.Secret { + return &sb.Secret +} + +type hostSettingsBuilder struct { + HostSettings +} + +func (hsb *hostSettingsBuilder) secrets(builders ...*secretBuilder) *hostSettingsBuilder { + hsb.Secrets = []corev1.Secret{} + for _, sb := range builders { + hsb.Secrets = append(hsb.Secrets, *sb.build()) + } + return hsb +} + +func (hsb *hostSettingsBuilder) hosts(builders ...*hostBuilder) *hostSettingsBuilder { + hsb.Hosts = []baremetalhost.BareMetalHost{} + for _, hb := range builders { + hsb.Hosts = append(hsb.Hosts, *hb.build()) + } + return hsb +} + +func (hsb *hostSettingsBuilder) build() *HostSettings { + return &hsb.HostSettings +} + +func settings() *hostSettingsBuilder { + return &hostSettingsBuilder{ + HostSettings: HostSettings{}, } } diff --git a/pkg/tfvars/baremetal/baremetal.go b/pkg/tfvars/baremetal/baremetal.go index 65ebb41824b..4ae7042f3fd 100644 --- a/pkg/tfvars/baremetal/baremetal.go +++ b/pkg/tfvars/baremetal/baremetal.go @@ -27,23 +27,43 @@ type config struct { IronicPassword string `json:"ironic_password"` // Data required for control plane deployment - several maps per host, because of terraform's limitations - Hosts []map[string]interface{} `json:"hosts"` + Masters []map[string]interface{} `json:"masters"` RootDevices []map[string]interface{} `json:"root_devices"` Properties []map[string]interface{} `json:"properties"` DriverInfos []map[string]interface{} `json:"driver_infos"` InstanceInfos []map[string]interface{} `json:"instance_infos"` } +type imageDownloadFunc func(baseURL string) (string, error) + +var ( + imageDownloader imageDownloadFunc +) + +func init() { + imageDownloader = cache.DownloadImageFile +} + // TFVars generates bare metal specific Terraform variables. -func TFVars(libvirtURI, apiVIP, imageCacheIP, bootstrapOSImage, externalBridge, externalMAC, provisioningBridge, provisioningMAC string, platformHosts []*baremetal.Host, image, ironicUsername, ironicPassword, ignition string) ([]byte, error) { - bootstrapOSImage, err := cache.DownloadImageFile(bootstrapOSImage) +func TFVars(numControlPlaneReplicas int64, libvirtURI, apiVIP, imageCacheIP, bootstrapOSImage, externalBridge, externalMAC, provisioningBridge, provisioningMAC string, platformHosts []*baremetal.Host, image, ironicUsername, ironicPassword, ignition string) ([]byte, error) { + bootstrapOSImage, err := imageDownloader(bootstrapOSImage) if err != nil { return nil, errors.Wrap(err, "failed to use cached bootstrap libvirt image") } - var hosts, rootDevices, properties, driverInfos, instanceInfos []map[string]interface{} + var masters, rootDevices, properties, driverInfos, instanceInfos []map[string]interface{} + // Select the first N hosts as masters, excluding the workers for _, host := range platformHosts { + if len(masters) >= int(numControlPlaneReplicas) { + break + } + + if host.IsWorker() { + //Skipping workers + continue + } + // Get hardware profile if host.HardwareProfile == "default" { host.HardwareProfile = hardware.DefaultProfileName @@ -145,7 +165,7 @@ func TFVars(libvirtURI, apiVIP, imageCacheIP, bootstrapOSImage, externalBridge, instanceInfo["capabilities"] = "secure_boot:true" } - hosts = append(hosts, hostMap) + masters = append(masters, hostMap) properties = append(properties, propertiesMap) driverInfos = append(driverInfos, driverInfo) rootDevices = append(rootDevices, rootDevice) @@ -176,7 +196,7 @@ func TFVars(libvirtURI, apiVIP, imageCacheIP, bootstrapOSImage, externalBridge, BootstrapOSImage: bootstrapOSImage, IronicUsername: ironicUsername, IronicPassword: ironicPassword, - Hosts: hosts, + Masters: masters, Bridges: bridges, Properties: properties, DriverInfos: driverInfos, diff --git a/pkg/tfvars/baremetal/baremetal_test.go b/pkg/tfvars/baremetal/baremetal_test.go new file mode 100644 index 00000000000..b6f2991b2d2 --- /dev/null +++ b/pkg/tfvars/baremetal/baremetal_test.go @@ -0,0 +1,174 @@ +// Package baremetal contains bare metal specific Terraform-variable logic. +package baremetal + +import ( + "encoding/json" + "testing" + + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/stretchr/testify/assert" +) + +func TestMastersSelectionByRole(t *testing.T) { + + cases := []struct { + scenario string + + numControlPlaneReplicas int64 + libvirtURI string + apiVIP string + imageCacheIP string + bootstrapOSImage string + externalBridge string + externalMAC string + provisioningBridge string + provisioningMAC string + platformHosts []*baremetal.Host + image string + ironicUsername string + ironicPassword string + ignition string + + expectedError string + expectedHosts []string + }{ + { + scenario: "filter-workers", + numControlPlaneReplicas: 2, + platformHosts: platformHosts( + host("master-0", "master"), + host("master-1", "master"), + host("worker-0", "worker"), + ), + expectedHosts: []string{"master-0", "master-1"}, + }, + { + scenario: "mixed-order", + numControlPlaneReplicas: 1, + platformHosts: platformHosts( + host("worker-0", "worker"), + host("master-0", "master"), + host("worker-1", "worker"), + ), + expectedHosts: []string{"master-0"}, + }, + { + scenario: "hosts-norole", + numControlPlaneReplicas: 2, + platformHosts: platformHosts( + host("master-0", ""), + host("worker-0", "worker"), + host("master-1", ""), + ), + expectedHosts: []string{"master-0", "master-1"}, + }, + { + scenario: "no-role", + numControlPlaneReplicas: 3, + platformHosts: platformHosts( + host("master-0", ""), + host("master-1", ""), + host("master-2", ""), + ), + expectedHosts: []string{"master-0", "master-1", "master-2"}, + }, + { + scenario: "more-hosts-than-required", + numControlPlaneReplicas: 2, + platformHosts: platformHosts( + host("master-0", "master"), + host("master-1", "master"), + host("master-2", ""), + ), + expectedHosts: []string{"master-0", "master-1"}, + }, + { + scenario: "more-hosts-than-required-norole", + numControlPlaneReplicas: 2, + platformHosts: platformHosts( + host("master-0", ""), + host("master-1", ""), + host("master-2", ""), + ), + expectedHosts: []string{"master-0", "master-1"}, + }, + { + scenario: "more-hosts-than-required-mixed", + numControlPlaneReplicas: 2, + platformHosts: platformHosts( + host("worker-0", "worker"), + host("master-0", ""), + host("worker-1", "worker"), + host("master-1", ""), + host("worker-2", "worker"), + host("master-2", ""), + ), + expectedHosts: []string{"master-0", "master-1"}, + }, + { + scenario: "more-masters-than-required", + numControlPlaneReplicas: 1, + platformHosts: platformHosts( + host("server-0", ""), + host("server-1", ""), + host("server-2", "master"), + ), + expectedHosts: []string{"server-0"}, + }, + } + + for _, tc := range cases { + t.Run(tc.scenario, func(t *testing.T) { + + imageDownloader = func(baseURL string) (string, error) { + return "", nil + } + + data, err := TFVars( + tc.numControlPlaneReplicas, + tc.libvirtURI, + tc.apiVIP, + tc.imageCacheIP, + tc.bootstrapOSImage, + tc.externalBridge, + tc.externalMAC, + tc.provisioningBridge, + tc.provisioningMAC, + tc.platformHosts, + tc.image, + tc.ironicUsername, + tc.ironicPassword, + tc.ignition) + + if tc.expectedError == "" { + assert.NoError(t, err) + } else { + assert.Regexp(t, tc.expectedError, err) + } + + var cfg config + err = json.Unmarshal(data, &cfg) + assert.Nil(t, err) + + assert.Equal(t, len(tc.expectedHosts), len(cfg.Masters)) + for i, hostName := range tc.expectedHosts { + assert.Equal(t, hostName, cfg.Masters[i]["name"]) + } + }) + } +} + +func host(name, tag string) *baremetal.Host { + return &baremetal.Host{ + Name: name, + Role: tag, + HardwareProfile: "default", + BMC: baremetal.BMC{ + Address: "redfish+http://192.168.111.1:8000/redfish/v1/Systems/e4427260-6250-4df9-9e8a-120f78a46aa6", + }, + } +} + +func platformHosts(hosts ...*baremetal.Host) []*baremetal.Host { + return hosts +} diff --git a/pkg/types/baremetal/platform.go b/pkg/types/baremetal/platform.go index 56d1396043f..1bb7abd47ab 100644 --- a/pkg/types/baremetal/platform.go +++ b/pkg/types/baremetal/platform.go @@ -36,6 +36,16 @@ type Host struct { BootMode BootMode `json:"bootMode,omitempty"` } +// IsMaster checks if the current host is a master +func (h *Host) IsMaster() bool { + return h.Role == "master" +} + +// IsWorker checks if the current host is a worker +func (h *Host) IsWorker() bool { + return h.Role == "worker" +} + // ProvisioningNetwork determines how we will use the provisioning network. // +kubebuilder:validation:Enum="";Managed;Unmanaged;Disabled type ProvisioningNetwork string diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go index 78c12967a1c..cc536113d5f 100644 --- a/pkg/types/baremetal/validation/platform.go +++ b/pkg/types/baremetal/validation/platform.go @@ -13,6 +13,7 @@ import ( "github.com/go-playground/validator/v10" "github.com/metal3-io/baremetal-operator/pkg/bmc" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" @@ -278,21 +279,56 @@ func validateOSImages(p *baremetal.Platform, fldPath *field.Path) field.ErrorLis return platformErrs } +// ensure that the number of hosts is enough to cover the ControlPlane +// and Compute replicas. Hosts without role will be considered eligible +// for the ControlPlane or Compute requirements. func validateHostsCount(hosts []*baremetal.Host, installConfig *types.InstallConfig) error { - hostsNum := int64(len(hosts)) - counter := int64(0) + numRequiredMasters := int64(0) + if installConfig.ControlPlane != nil && installConfig.ControlPlane.Replicas != nil { + numRequiredMasters += *installConfig.ControlPlane.Replicas + } + numRequiredWorkers := int64(0) for _, worker := range installConfig.Compute { if worker.Replicas != nil { - counter += *worker.Replicas + numRequiredWorkers += *worker.Replicas } } - if installConfig.ControlPlane != nil && installConfig.ControlPlane.Replicas != nil { - counter += *installConfig.ControlPlane.Replicas + + numMasters := int64(0) + numWorkers := int64(0) + + for _, h := range hosts { + // The first N hosts, not explicitly tagged with the worker role, + // are considered eligible as master + if numMasters < numRequiredMasters { + if !h.IsWorker() { + numMasters++ + } else { + numWorkers++ + } + } else { + // The remaining hosts, not explicitly tagged with the master role, + // are considered eligible as worker + if !h.IsMaster() { + numWorkers++ + } else { + numMasters++ + } + } + } + + if numMasters < numRequiredMasters { + return fmt.Errorf("not enough hosts found (%v) to support all the configured ControlPlane replicas (%v)", numMasters, numRequiredMasters) + } + + if numMasters > numRequiredMasters { + logrus.Warn("Found more hosts configured as master than required") } - if hostsNum < counter { - return fmt.Errorf("not enough hosts found (%v) to support all the configured ControlPlane and Compute replicas (%v)", hostsNum, counter) + + if numWorkers < numRequiredWorkers { + return fmt.Errorf("not enough hosts found (%v) to support all the configured Compute replicas (%v)", numWorkers, numRequiredWorkers) } return nil diff --git a/pkg/types/baremetal/validation/platform_test.go b/pkg/types/baremetal/validation/platform_test.go index a90f71f8326..4353e930e57 100644 --- a/pkg/types/baremetal/validation/platform_test.go +++ b/pkg/types/baremetal/validation/platform_test.go @@ -67,32 +67,135 @@ func TestValidatePlatform(t *testing.T) { expected: "bare metal hosts are missing", }, { - name: "toofew_hosts", + name: "toofew_masters_norole", config: installConfig(). BareMetalPlatform( platform().Hosts( - host1())). + host1(), + host2().Role("worker"))). + ControlPlane( + machinePool().Replicas(3)). + Compute( + machinePool().Replicas(1)).build(), + expected: "baremetal.Hosts: Required value: not enough hosts found \\(1\\) to support all the configured ControlPlane replicas \\(3\\)", + }, + { + name: "toofew_masters", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1().Role("master"), + host2().Role("worker"))). ControlPlane( machinePool().Replicas(3)). Compute( - machinePool().Replicas(2), + machinePool().Replicas(1)).build(), + expected: "baremetal.Hosts: Required value: not enough hosts found \\(1\\) to support all the configured ControlPlane replicas \\(3\\)", + }, + { + name: "toofew_workers", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1().Role("master"), + host2().Role("worker"))). + ControlPlane( + machinePool().Replicas(1)). + Compute( machinePool().Replicas(3)).build(), - expected: "baremetal.Hosts: Required value: not enough hosts found \\(1\\) to support all the configured ControlPlane and Compute replicas \\(8\\)", + expected: "baremetal.Hosts: Required value: not enough hosts found \\(1\\) to support all the configured Compute replicas \\(3\\)", }, { name: "enough_hosts", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1().Role("master"), + host2().Role("worker"))). + ControlPlane( + machinePool().Replicas(1)). + Compute( + machinePool().Replicas(1)).build(), + }, + { + name: "enough_hosts_norole", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1(), + host2(), + host3())). + ControlPlane( + machinePool().Replicas(1)). + Compute( + machinePool().Replicas(2)).build(), + }, + { + name: "enough_hosts_mixed", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1().Role("master"), + host2(), + host3(), + host4().Role("worker"))). + ControlPlane( + machinePool().Replicas(2)). + Compute( + machinePool().Replicas(2)).build(), + }, + { + name: "not_enough_hosts_norole", config: installConfig(). BareMetalPlatform( platform().Hosts( host1(), - host2())). + host2(), + host3())). ControlPlane( + machinePool().Replicas(2)). + Compute( machinePool().Replicas(2)).build(), + expected: "baremetal.Hosts: Required value: not enough hosts found \\(1\\) to support all the configured Compute replicas \\(2\\)", + }, + { + name: "more_than_enough_hosts", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1().Role("master"), + host2().Role("master"), + host3(), + host4().Role("worker"), + host5().Role("worker"))). + ControlPlane( + machinePool().Replicas(1)). + Compute( + machinePool().Replicas(1)).build(), + }, + { + name: "not_enough_workers_norole", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1(), + host2(), + host3().Role("master"), + host4().Role("master"), + host5().Role("master"))). + ControlPlane( + machinePool().Replicas(3)). + Compute( + machinePool().Replicas(2)).build(), + expected: "baremetal.Hosts: Required value: not enough hosts found \\(0\\) to support all the configured Compute replicas \\(2\\)", }, { name: "missing_name", - platform: platform(). - Hosts(host1().Name("")).build(), + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1().Name(""))). + ControlPlane(machinePool().Replicas(1)).build(), expected: "baremetal.hosts\\[0\\].Name: Required value: missing Name", }, { @@ -548,6 +651,48 @@ func host2() *hostBuilder { } } +func host3() *hostBuilder { + return &hostBuilder{ + baremetal.Host{ + Name: "host3", + BootMACAddress: "CA:FE:CA:FE:00:02", + BMC: baremetal.BMC{ + Username: "root", + Password: "password", + Address: "ipmi://192.168.111.3", + }, + }, + } +} + +func host4() *hostBuilder { + return &hostBuilder{ + baremetal.Host{ + Name: "host4", + BootMACAddress: "CA:FE:CA:FE:00:03", + BMC: baremetal.BMC{ + Username: "root", + Password: "password", + Address: "ipmi://192.168.111.4", + }, + }, + } +} + +func host5() *hostBuilder { + return &hostBuilder{ + baremetal.Host{ + Name: "host5", + BootMACAddress: "CA:FE:CA:FE:00:04", + BMC: baremetal.BMC{ + Username: "root", + Password: "password", + Address: "ipmi://192.168.111.5", + }, + }, + } +} + func (hb *hostBuilder) build() *baremetal.Host { return &hb.Host } @@ -582,6 +727,11 @@ func (hb *hostBuilder) BMCPassword(value string) *hostBuilder { return hb } +func (hb *hostBuilder) Role(value string) *hostBuilder { + hb.Host.Role = value + return hb +} + type platformBuilder struct { baremetal.Platform } diff --git a/pkg/types/validation/installconfig_test.go b/pkg/types/validation/installconfig_test.go index b9c908d971f..98950046d07 100644 --- a/pkg/types/validation/installconfig_test.go +++ b/pkg/types/validation/installconfig_test.go @@ -109,6 +109,7 @@ func validBareMetalPlatform() *baremetal.Platform { Hosts: []*baremetal.Host{ { Name: "host1", + Role: "master", BootMACAddress: "CA:FE:CA:FE:00:00", BMC: baremetal.BMC{ Username: "root", @@ -118,6 +119,7 @@ func validBareMetalPlatform() *baremetal.Platform { }, { Name: "host2", + Role: "worker", BootMACAddress: "CA:FE:CA:FE:00:01", BMC: baremetal.BMC{ Username: "root", From 59908ee8ef9d4dc5ec7f472dbec4b0897310f053 Mon Sep 17 00:00:00 2001 From: Andrea Fasano Date: Mon, 25 Oct 2021 10:41:56 -0400 Subject: [PATCH 07/10] Reorder hosts to first select those ones with a configured role. Hosts without a role will be used either for ControlPlane or Compute respectively if required. --- pkg/asset/machines/baremetal/hosts_test.go | 96 +++++++++++++------ pkg/tfvars/baremetal/baremetal_test.go | 34 +++---- pkg/types/baremetal/defaults/platform.go | 10 +- pkg/types/baremetal/defaults/platform_test.go | 83 ++++++++++++++++ pkg/types/baremetal/platform.go | 16 +++- pkg/types/baremetal/validation/platform.go | 25 ++--- .../baremetal/validation/platform_test.go | 23 +++-- 7 files changed, 208 insertions(+), 79 deletions(-) diff --git a/pkg/asset/machines/baremetal/hosts_test.go b/pkg/asset/machines/baremetal/hosts_test.go index 73c8049114c..226e81332dc 100644 --- a/pkg/asset/machines/baremetal/hosts_test.go +++ b/pkg/asset/machines/baremetal/hosts_test.go @@ -1,6 +1,7 @@ package baremetal import ( + "fmt" "testing" machineapi "github.com/openshift/api/machine/v1beta1" @@ -59,7 +60,7 @@ func TestHosts(t *testing.T) { hosts(host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), }, { - Scenario: "3-hosts-3-machines", + Scenario: "3-hosts-3-machines-norole-all", Machines: machines( machine("machine-0"), machine("machine-1"), @@ -113,92 +114,121 @@ func TestHosts(t *testing.T) { hostType("master-0").bmc("usr0", "pwd0"), hostType("master-1").bmc("usr1", "pwd1"), hostType("master-2").bmc("usr2", "pwd2"), - hostType("master-3").bmc("usr3", "pwd3")), + hostType("worker-0").bmc("wrk0", "pwd0")), ExpectedSetting: settings(). secrets( secret("master-0-bmc-secret").data("usr0", "pwd0"), secret("master-1-bmc-secret").data("usr1", "pwd1"), secret("master-2-bmc-secret").data("usr2", "pwd2"), - secret("master-3-bmc-secret").data("usr3", "pwd3")). + secret("worker-0-bmc-secret").data("wrk0", "pwd0")). hosts( host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), - host("master-3")).build(), + host("worker-0")).build(), }, { - Scenario: "5-hosts-3-machines-mixed", + Scenario: "5-hosts-3-machines", Machines: machines( machine("machine-0"), machine("machine-1"), machine("machine-2")), Config: configHosts( hostType("master-0").bmc("usr0", "pwd0").role("master"), - hostType("worker-0").bmc("wrk0", "pwd0").role("worker"), hostType("master-1").bmc("usr1", "pwd1").role("master"), - hostType("worker-1").bmc("wrk1", "pwd1").role("worker"), - hostType("master-2").bmc("usr2", "pwd2").role("master")), + hostType("master-2").bmc("usr2", "pwd2").role("master"), + hostType("worker-0").bmc("wrk0", "pwd0").role("worker"), + hostType("worker-1").bmc("wrk1", "pwd1").role("worker")), ExpectedSetting: settings(). secrets( secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2"), secret("worker-0-bmc-secret").data("wrk0", "pwd0"), + secret("worker-1-bmc-secret").data("wrk1", "pwd1")). + hosts( + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("worker-0"), + host("worker-1")).build(), + }, + { + Scenario: "5-hosts-3-machines-mixed", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("worker-0").bmc("wrk0", "pwd0").role("worker"), + hostType("worker-1").bmc("wrk1", "pwd1").role("worker"), + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-2").bmc("usr2", "pwd2")), + + ExpectedSetting: settings(). + secrets( secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("worker-0-bmc-secret").data("wrk0", "pwd0"), secret("worker-1-bmc-secret").data("wrk1", "pwd1"), + secret("master-0-bmc-secret").data("usr0", "pwd0"), secret("master-2-bmc-secret").data("usr2", "pwd2")). hosts( - host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-1").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("worker-0"), - host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("worker-1"), + host("master-0").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), }, { - Scenario: "5-hosts-3-machines-mixed-norole", + Scenario: "4-hosts-3-machines-norole-master", Machines: machines( machine("machine-0"), machine("machine-1"), machine("machine-2")), Config: configHosts( - hostType("master-0").bmc("usr0", "pwd0"), hostType("worker-0").bmc("wrk0", "pwd0").role("worker"), - hostType("master-1").bmc("usr1", "pwd1").role("master"), - hostType("worker-1").bmc("wrk1", "pwd1").role("worker"), + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-1").bmc("usr1", "pwd1"), hostType("master-2").bmc("usr2", "pwd2")), ExpectedSetting: settings(). secrets( - secret("master-0-bmc-secret").data("usr0", "pwd0"), secret("worker-0-bmc-secret").data("wrk0", "pwd0"), + secret("master-0-bmc-secret").data("usr0", "pwd0"), secret("master-1-bmc-secret").data("usr1", "pwd1"), - secret("worker-1-bmc-secret").data("wrk1", "pwd1"), secret("master-2-bmc-secret").data("usr2", "pwd2")). hosts( - host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("worker-0"), + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), - host("worker-1"), host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned()).build(), }, { - Scenario: "3-hosts-mixed", + Scenario: "4-hosts-3-machines-norole-worker", Machines: machines( - machine("machine-0")), + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), Config: configHosts( - hostType("server-0").bmc("usr0", "pwd0"), - hostType("server-1").bmc("usr1", "pwd1"), - hostType("server-2").bmc("usr2", "pwd2").role("master")), + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("master-2").bmc("usr2", "pwd2").role("master"), + hostType("worker-0").bmc("wrk0", "pwd0")), ExpectedSetting: settings(). secrets( - secret("server-0-bmc-secret").data("usr0", "pwd0"), - secret("server-1-bmc-secret").data("usr1", "pwd1"), - secret("server-2-bmc-secret").data("usr2", "pwd2")). + secret("master-0-bmc-secret").data("usr0", "pwd0"), + secret("master-1-bmc-secret").data("usr1", "pwd1"), + secret("master-2-bmc-secret").data("usr2", "pwd2"), + secret("worker-0-bmc-secret").data("wrk0", "pwd0")). hosts( - host("server-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), - host("server-1"), - host("server-2")).build(), + host("master-0").consumerRef("machine-0").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-1").consumerRef("machine-1").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("master-2").consumerRef("machine-2").annotation("baremetalhost.metal3.io/paused", "").externallyProvisioned(), + host("worker-0")).build(), }, } @@ -211,7 +241,13 @@ func TestHosts(t *testing.T) { } if tc.ExpectedSetting != nil { - assert.Equal(t, tc.ExpectedSetting, settings) + for i, h := range tc.ExpectedSetting.Hosts { + assert.Equal(t, h, settings.Hosts[i], fmt.Sprintf("%s and %s are not equal", h.Name, settings.Hosts[i].Name)) + } + + for i, s := range tc.ExpectedSetting.Secrets { + assert.Equal(t, s, settings.Secrets[i], s.Name, fmt.Sprintf("%s and %s are not equal", s.Name, settings.Secrets[i].Name)) + } } }) } diff --git a/pkg/tfvars/baremetal/baremetal_test.go b/pkg/tfvars/baremetal/baremetal_test.go index b6f2991b2d2..b33c0ed7eeb 100644 --- a/pkg/tfvars/baremetal/baremetal_test.go +++ b/pkg/tfvars/baremetal/baremetal_test.go @@ -43,11 +43,11 @@ func TestMastersSelectionByRole(t *testing.T) { expectedHosts: []string{"master-0", "master-1"}, }, { - scenario: "mixed-order", + scenario: "filter-all-workers", numControlPlaneReplicas: 1, platformHosts: platformHosts( - host("worker-0", "worker"), host("master-0", "master"), + host("worker-0", "worker"), host("worker-1", "worker"), ), expectedHosts: []string{"master-0"}, @@ -56,8 +56,8 @@ func TestMastersSelectionByRole(t *testing.T) { scenario: "hosts-norole", numControlPlaneReplicas: 2, platformHosts: platformHosts( - host("master-0", ""), host("worker-0", "worker"), + host("master-0", ""), host("master-1", ""), ), expectedHosts: []string{"master-0", "master-1"}, @@ -73,7 +73,17 @@ func TestMastersSelectionByRole(t *testing.T) { expectedHosts: []string{"master-0", "master-1", "master-2"}, }, { - scenario: "more-hosts-than-required", + scenario: "more-masters-than-required", + numControlPlaneReplicas: 2, + platformHosts: platformHosts( + host("master-0", "master"), + host("master-1", "master"), + host("master-2", "master"), + ), + expectedHosts: []string{"master-0", "master-1"}, + }, + { + scenario: "more-hosts-than-required-mixed", numControlPlaneReplicas: 2, platformHosts: platformHosts( host("master-0", "master"), @@ -93,28 +103,18 @@ func TestMastersSelectionByRole(t *testing.T) { expectedHosts: []string{"master-0", "master-1"}, }, { - scenario: "more-hosts-than-required-mixed", + scenario: "more-hosts-than-required-mixed-again", numControlPlaneReplicas: 2, platformHosts: platformHosts( host("worker-0", "worker"), - host("master-0", ""), host("worker-1", "worker"), - host("master-1", ""), host("worker-2", "worker"), + host("master-0", ""), + host("master-1", ""), host("master-2", ""), ), expectedHosts: []string{"master-0", "master-1"}, }, - { - scenario: "more-masters-than-required", - numControlPlaneReplicas: 1, - platformHosts: platformHosts( - host("server-0", ""), - host("server-1", ""), - host("server-2", "master"), - ), - expectedHosts: []string{"server-0"}, - }, } for _, tc := range cases { diff --git a/pkg/types/baremetal/defaults/platform.go b/pkg/types/baremetal/defaults/platform.go index 7c096a5bb19..fb6c55345ed 100644 --- a/pkg/types/baremetal/defaults/platform.go +++ b/pkg/types/baremetal/defaults/platform.go @@ -2,8 +2,10 @@ package defaults import ( "fmt" - "github.com/openshift/installer/pkg/ipnet" "net" + "sort" + + "github.com/openshift/installer/pkg/ipnet" "github.com/apparentlymart/go-cidr/cidr" "github.com/openshift/installer/pkg/types" @@ -116,4 +118,10 @@ func SetPlatformDefaults(p *baremetal.Platform, c *types.InstallConfig) { p.IngressVIP = vip[0] } } + + if p.Hosts != nil { + sort.SliceStable(p.Hosts, func(i, j int) bool { + return p.Hosts[i].CompareByRole(p.Hosts[j]) + }) + } } diff --git a/pkg/types/baremetal/defaults/platform_test.go b/pkg/types/baremetal/defaults/platform_test.go index d19c8d1b806..79fc0a8368d 100644 --- a/pkg/types/baremetal/defaults/platform_test.go +++ b/pkg/types/baremetal/defaults/platform_test.go @@ -189,3 +189,86 @@ func TestSetPlatformDefaults(t *testing.T) { }) } } + +func TestBaremetalHostsSortByRole(t *testing.T) { + cases := []struct { + name string + hosts []*baremetal.Host + expectedHosts []string + }{ + { + name: "default", + hosts: []*baremetal.Host{ + {Name: "master-0", Role: "master"}, + {Name: "master-1", Role: "master"}, + {Name: "master-2", Role: "master"}, + {Name: "worker-0", Role: "worker"}, + {Name: "worker-1", Role: "worker"}, + }, + expectedHosts: []string{ + "master-0", "master-1", "master-2", "worker-0", "worker-1", + }, + }, + { + name: "norole", + hosts: []*baremetal.Host{ + {Name: "master-0"}, + {Name: "master-1"}, + {Name: "master-2"}, + {Name: "worker-0"}, + {Name: "worker-1"}, + }, + expectedHosts: []string{ + "master-0", "master-1", "master-2", "worker-0", "worker-1", + }, + }, + { + name: "mixed", + hosts: []*baremetal.Host{ + {Name: "worker-0", Role: "worker"}, + {Name: "master-0", Role: "master"}, + {Name: "worker-1", Role: "worker"}, + {Name: "master-1", Role: "master"}, + {Name: "master-2", Role: "master"}, + }, + expectedHosts: []string{ + "master-0", "master-1", "master-2", "worker-0", "worker-1", + }, + }, + { + name: "mixed-norole", + hosts: []*baremetal.Host{ + {Name: "worker-0", Role: "worker"}, + {Name: "master-0", Role: "master"}, + {Name: "worker-1", Role: ""}, + {Name: "master-1", Role: "master"}, + {Name: "master-2", Role: "master"}, + }, + expectedHosts: []string{ + "master-0", "master-1", "master-2", "worker-0", "worker-1", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + + ic := &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: testClusterName, + }, + Platform: types.Platform{ + BareMetal: &baremetal.Platform{ + Hosts: tc.hosts, + }, + }, + BaseDomain: "test", + } + SetPlatformDefaults(ic.Platform.BareMetal, ic) + + for i, h := range ic.Platform.BareMetal.Hosts { + assert.Equal(t, h.Name, tc.expectedHosts[i]) + } + }) + } +} diff --git a/pkg/types/baremetal/platform.go b/pkg/types/baremetal/platform.go index 1bb7abd47ab..ab2a2696988 100644 --- a/pkg/types/baremetal/platform.go +++ b/pkg/types/baremetal/platform.go @@ -25,6 +25,11 @@ const ( Legacy BootMode = "legacy" ) +const ( + masterRole string = "master" + workerRole string = "worker" +) + // Host stores all the configuration data for a baremetal host. type Host struct { Name string `json:"name,omitempty" validate:"required,uniqueField"` @@ -38,12 +43,19 @@ type Host struct { // IsMaster checks if the current host is a master func (h *Host) IsMaster() bool { - return h.Role == "master" + return h.Role == masterRole } // IsWorker checks if the current host is a worker func (h *Host) IsWorker() bool { - return h.Role == "worker" + return h.Role == workerRole +} + +var sortIndex = map[string]int{masterRole: -1, workerRole: 0, "": 1} + +// CompareByRole allows to compare two hosts by the Role +func (h *Host) CompareByRole(k *Host) bool { + return sortIndex[h.Role] < sortIndex[k.Role] } // ProvisioningNetwork determines how we will use the provisioning network. diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go index cc536113d5f..200f6c5d9da 100644 --- a/pkg/types/baremetal/validation/platform.go +++ b/pkg/types/baremetal/validation/platform.go @@ -300,21 +300,16 @@ func validateHostsCount(hosts []*baremetal.Host, installConfig *types.InstallCon numWorkers := int64(0) for _, h := range hosts { - // The first N hosts, not explicitly tagged with the worker role, - // are considered eligible as master - if numMasters < numRequiredMasters { - if !h.IsWorker() { - numMasters++ - } else { - numWorkers++ - } + if h.IsMaster() { + numMasters++ + } else if h.IsWorker() { + numWorkers++ } else { - // The remaining hosts, not explicitly tagged with the master role, - // are considered eligible as worker - if !h.IsMaster() { - numWorkers++ - } else { + logrus.Warn(fmt.Sprintf("Host %s hasn't any role configured", h.Name)) + if numMasters < numRequiredMasters { numMasters++ + } else if numWorkers < numRequiredWorkers { + numWorkers++ } } } @@ -323,10 +318,6 @@ func validateHostsCount(hosts []*baremetal.Host, installConfig *types.InstallCon return fmt.Errorf("not enough hosts found (%v) to support all the configured ControlPlane replicas (%v)", numMasters, numRequiredMasters) } - if numMasters > numRequiredMasters { - logrus.Warn("Found more hosts configured as master than required") - } - if numWorkers < numRequiredWorkers { return fmt.Errorf("not enough hosts found (%v) to support all the configured Compute replicas (%v)", numWorkers, numRequiredWorkers) } diff --git a/pkg/types/baremetal/validation/platform_test.go b/pkg/types/baremetal/validation/platform_test.go index 4353e930e57..9e50fe68c07 100644 --- a/pkg/types/baremetal/validation/platform_test.go +++ b/pkg/types/baremetal/validation/platform_test.go @@ -71,8 +71,8 @@ func TestValidatePlatform(t *testing.T) { config: installConfig(). BareMetalPlatform( platform().Hosts( - host1(), - host2().Role("worker"))). + host1().Role("worker"), + host2())). ControlPlane( machinePool().Replicas(3)). Compute( @@ -136,9 +136,9 @@ func TestValidatePlatform(t *testing.T) { BareMetalPlatform( platform().Hosts( host1().Role("master"), - host2(), + host2().Role("worker"), host3(), - host4().Role("worker"))). + host4())). ControlPlane( machinePool().Replicas(2)). Compute( @@ -165,29 +165,28 @@ func TestValidatePlatform(t *testing.T) { platform().Hosts( host1().Role("master"), host2().Role("master"), - host3(), + host3().Role("worker"), host4().Role("worker"), - host5().Role("worker"))). + host5())). ControlPlane( machinePool().Replicas(1)). Compute( machinePool().Replicas(1)).build(), }, { - name: "not_enough_workers_norole", + name: "norole_for_workers", config: installConfig(). BareMetalPlatform( platform().Hosts( - host1(), - host2(), + host1().Role("master"), + host2().Role("master"), host3().Role("master"), - host4().Role("master"), - host5().Role("master"))). + host4(), + host5())). ControlPlane( machinePool().Replicas(3)). Compute( machinePool().Replicas(2)).build(), - expected: "baremetal.Hosts: Required value: not enough hosts found \\(0\\) to support all the configured Compute replicas \\(2\\)", }, { name: "missing_name", From 261e5fdf76e02adb5599d6e63a505d58333cd6fe Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 4 Nov 2021 16:21:45 +0000 Subject: [PATCH 08/10] openstack: Add stephenfin to owners Signed-off-by: Stephen Finucane --- OWNERS_ALIASES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index e372db08593..17dd7e98b91 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -28,6 +28,7 @@ aliases: - mandre - mdbooth - pierreprinetti + - stephenfin openstack-reviewers: - EmilienM - adduarte @@ -35,6 +36,7 @@ aliases: - mandre - mdbooth - pierreprinetti + - stephenfin vsphere-approvers: - dav1x - jcpowermac From b06e40517c111f99715088f03d7f27ff8acd7db6 Mon Sep 17 00:00:00 2001 From: ayesha54 Date: Tue, 9 Nov 2021 14:05:24 +0100 Subject: [PATCH 09/10] Make Disk Type as Enum for Vpshere --- .../install.openshift.io_installconfigs.yaml | 7 ++-- data/data/vsphere/pre-bootstrap/main.tf | 2 +- data/data/vsphere/variables-vsphere.tf | 5 +-- docs/user/vsphere/customization.md | 1 + .../resource_vsphereprivate_import_ova.go | 12 +++---- pkg/tfvars/vsphere/vsphere.go | 35 ++++++++++--------- pkg/types/vsphere/platform.go | 18 ++++++++-- 7 files changed, 49 insertions(+), 31 deletions(-) diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index a3a7474f848..a5120a651de 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -2110,6 +2110,10 @@ spec: type: integer type: object type: object + diskType: + description: Disk Type Thin specifies if thin disks should be + use instead of thick + type: string folder: description: Folder is the absolute path of the folder that will be used and/or created for virtual machines. The absolute path @@ -2133,9 +2137,6 @@ spec: vCenter: description: VCenter is the domain name or IP address of the vCenter. type: string - diskType: - description: DiskType is the name of the disk provisioning type for vsphere, for e.g eagerZeroedThick or thin, by default it will be thick. - type: string required: - datacenter - defaultDatastore diff --git a/data/data/vsphere/pre-bootstrap/main.tf b/data/data/vsphere/pre-bootstrap/main.tf index e04e1b40f4d..984c15d3a36 100644 --- a/data/data/vsphere/pre-bootstrap/main.tf +++ b/data/data/vsphere/pre-bootstrap/main.tf @@ -50,7 +50,7 @@ resource "vsphereprivate_import_ova" "import" { network = var.vsphere_network folder = local.folder tag = vsphere_tag.tag.id - diskType = var.disk_type + disk_type = var.vsphere_disk_type } resource "vsphere_tag_category" "category" { diff --git a/data/data/vsphere/variables-vsphere.tf b/data/data/vsphere/variables-vsphere.tf index 95107eb09fc..09bef4a846d 100644 --- a/data/data/vsphere/variables-vsphere.tf +++ b/data/data/vsphere/variables-vsphere.tf @@ -76,6 +76,7 @@ variable "vsphere_control_plane_num_cpus" { variable "vsphere_control_plane_cores_per_socket" { type = number } -variable "disk_type" { - type = string +variable "vsphere_disk_type" { + type = string + default = "eagerZeroedThick" } diff --git a/docs/user/vsphere/customization.md b/docs/user/vsphere/customization.md index e2b67e4aecd..18cc1e332a7 100644 --- a/docs/user/vsphere/customization.md +++ b/docs/user/vsphere/customization.md @@ -18,6 +18,7 @@ Beyond the [platform-agnostic `install-config.yaml` properties](../customization * `cpus` (optional integer): The total number of virtual processor cores to assign a vm. * `coresPerSocket` (optional integer): The number of cores per socket in a vm. The number of vCPUs on the vm will be cpus/coresPerSocket (default is 1). * `memoryMB` (optional integer): The size of a VM's memory in megabytes. +* `disk_type` (optional string): DiskType is the name of the disk provisioning type for vsphere, for e.g thick or thin, by default it will be eagerZeroedThick. ## Examples diff --git a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go index a311c214812..f98ac3fe401 100644 --- a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go +++ b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go @@ -93,10 +93,10 @@ func resourceVSpherePrivateImportOva() *schema.Resource { ForceNew: true, ValidateFunc: validation.NoZeroValues, }, - "diskType": { + "disk_type": { Type: schema.TypeString, Description: "The name of the disk provisioning, for e.g eagerZeroedThick or thin, by default it will be thick.", - Required: false, + Required: true, ForceNew: true, }, }, @@ -356,12 +356,12 @@ func resourceVSpherePrivateImportOvaCreate(d *schema.ResourceData, meta interfac var diskType types.OvfCreateImportSpecParamsDiskProvisioningType - if d.Get("diskType").(string) == "thin" { + if d.Get("disk_type") == "thin" { diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeThin - } else if d.Get("diskType").(string) == "eagerZeroedThick" { - diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick - } else { + } else if d.Get("disk_type") == "thick" { diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeThick + } else { + diskType = types.OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick } // This is a very minimal spec for importing // an OVF. diff --git a/pkg/tfvars/vsphere/vsphere.go b/pkg/tfvars/vsphere/vsphere.go index 97a7f22d6e4..a48f5fbaa38 100644 --- a/pkg/tfvars/vsphere/vsphere.go +++ b/pkg/tfvars/vsphere/vsphere.go @@ -8,25 +8,26 @@ import ( "github.com/pkg/errors" "github.com/openshift/installer/pkg/tfvars/internal/cache" + "github.com/openshift/installer/pkg/types/vsphere" ) type config struct { - VSphereURL string `json:"vsphere_url"` - VSphereUsername string `json:"vsphere_username"` - VSpherePassword string `json:"vsphere_password"` - MemoryMiB int64 `json:"vsphere_control_plane_memory_mib"` - DiskGiB int32 `json:"vsphere_control_plane_disk_gib"` - NumCPUs int32 `json:"vsphere_control_plane_num_cpus"` - NumCoresPerSocket int32 `json:"vsphere_control_plane_cores_per_socket"` - Cluster string `json:"vsphere_cluster"` - Datacenter string `json:"vsphere_datacenter"` - Datastore string `json:"vsphere_datastore"` - Folder string `json:"vsphere_folder"` - Network string `json:"vsphere_network"` - Template string `json:"vsphere_template"` - OvaFilePath string `json:"vsphere_ova_filepath"` - PreexistingFolder bool `json:"vsphere_preexisting_folder"` - DiskType string `json:"disk_type"` + VSphereURL string `json:"vsphere_url"` + VSphereUsername string `json:"vsphere_username"` + VSpherePassword string `json:"vsphere_password"` + MemoryMiB int64 `json:"vsphere_control_plane_memory_mib"` + DiskGiB int32 `json:"vsphere_control_plane_disk_gib"` + NumCPUs int32 `json:"vsphere_control_plane_num_cpus"` + NumCoresPerSocket int32 `json:"vsphere_control_plane_cores_per_socket"` + Cluster string `json:"vsphere_cluster"` + Datacenter string `json:"vsphere_datacenter"` + Datastore string `json:"vsphere_datastore"` + Folder string `json:"vsphere_folder"` + Network string `json:"vsphere_network"` + Template string `json:"vsphere_template"` + OvaFilePath string `json:"vsphere_ova_filepath"` + PreexistingFolder bool `json:"vsphere_preexisting_folder"` + DiskType vsphere.DiskType `json:"vsphere_disk_type"` } // TFVarsSources contains the parameters to be converted into Terraform variables @@ -37,7 +38,7 @@ type TFVarsSources struct { Cluster string ImageURL string PreexistingFolder bool - DiskType string + DiskType vsphere.DiskType } //TFVars generate vSphere-specific Terraform variables diff --git a/pkg/types/vsphere/platform.go b/pkg/types/vsphere/platform.go index 40c0fb16cd1..4565a4b947d 100644 --- a/pkg/types/vsphere/platform.go +++ b/pkg/types/vsphere/platform.go @@ -1,6 +1,20 @@ package vsphere -// Platform stores any global configuration used for vsphere platforms. +// DiskType is a disk provisioning type for vsphere. +type DiskType string + +const ( + // DiskTypeThin uses Thin disk type for vsphere in the cluster. + DiskTypeThin DiskType = "thin" + + // DiskTypeThick uses Thick disk type for vsphere in the cluster. + DiskTypeThick DiskType = "thick" + + // DiskTypeEagerZeroedThick uses EagerZeroedThick disk type for vsphere in the cluster. + DiskTypeEagerZeroedThick DiskType = "eagerZeroedThick" +) + +// Platform stores any global configuration used for vsphere platforms type Platform struct { // VCenter is the domain name or IP address of the vCenter. VCenter string `json:"vCenter"` @@ -49,5 +63,5 @@ type Platform struct { Network string `json:"network,omitempty"` // Disk Type Thin specifies if thin disks should be use instead of thick - DiskType string `json:"diskType,omitempty"` + DiskType DiskType `json:"diskType,omitempty"` } From 2740ff13c6c1b82d85e1513b9a288905c153af84 Mon Sep 17 00:00:00 2001 From: rna-afk Date: Wed, 10 Nov 2021 09:47:08 -0500 Subject: [PATCH 10/10] vsphere: Relax vcenter hostname check The vsphere vcenter validation in the installer right now is to check if the first character starts with a lowercase alphabet but we should allow digits as well so removing the first character check in validation. --- pkg/validate/validate.go | 4 ---- pkg/validate/validate_test.go | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/validate/validate.go b/pkg/validate/validate.go index 7e17fdbc308..e05f8cc656b 100644 --- a/pkg/validate/validate.go +++ b/pkg/validate/validate.go @@ -277,9 +277,5 @@ func Host(v string) error { if proxyIP != nil { return nil } - re := regexp.MustCompile("^[a-z]") - if !re.MatchString(v) { - return errors.New("domain name must begin with a lower-case letter") - } return validateSubdomain(v) } diff --git a/pkg/validate/validate_test.go b/pkg/validate/validate_test.go index c45b1a53688..fb0e8f08e43 100644 --- a/pkg/validate/validate_test.go +++ b/pkg/validate/validate_test.go @@ -101,7 +101,7 @@ func TestVCenter(t *testing.T) { {"single lowercase", "a", true}, {"single uppercase", "A", false}, {"contains whitespace", "abc D", false}, - {"single number", "1", false}, + {"single number", "1", true}, {"single dot", ".", false}, {"ends with dot", "a.", false}, {"starts with dot", ".a", false}, @@ -115,7 +115,6 @@ func TestVCenter(t *testing.T) { {"contains non-ascii", "a日本語a", false}, {"URLs", "https://hello.openshift.org", false}, {"IP", "192.168.1.1", true}, - {"invalid IP", "192.168.1", false}, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) {