diff --git a/pkg/cli/config/initialize/basecamp.go b/pkg/cli/config/initialize/basecamp.go index 3fa0ac0da..79086eb28 100644 --- a/pkg/cli/config/initialize/basecamp.go +++ b/pkg/cli/config/initialize/basecamp.go @@ -119,6 +119,8 @@ type BaseCampGlobals struct { } // Basecamp Defaults +// See disks.go for disk layout, filesystems, and mounts + // We should try to make these customizable by the user at some point // k8sRunCMD has the list of scripts to run on NCN boot for // all members of the kubernetes cluster @@ -603,19 +605,28 @@ func MakeBaseCampfromNCNs( ShastaRole: "ncn-" + strings.ToLower(ncn.Subrole), IPAM: ncnIPAM, } + userDataMap := make(map[string]interface{}) - if ncn.Subrole == "Storage" { - if strings.HasSuffix( - ncn.Hostname, - "001", - ) { + switch ncn.Subrole { + case "Storage": + userDataMap["bootcmd"] = cephBootCMD + userDataMap["fs_setup"] = cephFileSystems + userDataMap["mounts"] = cephMounts + if strings.HasSuffix(ncn.Hostname, "001") { userDataMap["runcmd"] = cephRunCMD } else { userDataMap["runcmd"] = cephWorkerRunCMD } - } else { - userDataMap["runcmd"] = k8sRunCMD + case "Master": + userDataMap["bootcmd"] = masterBootCMD + userDataMap["fs_setup"] = masterFileSystems + userDataMap["mounts"] = masterMounts + case "Worker": + userDataMap["bootcmd"] = workerBootCMD + userDataMap["fs_setup"] = workerFileSystems + userDataMap["mounts"] = workerMounts } + userDataMap["hostname"] = ncn.Hostname userDataMap["local_hostname"] = ncn.Hostname userDataMap["mac0"] = mac0Interface diff --git a/pkg/cli/config/initialize/disks.go b/pkg/cli/config/initialize/disks.go new file mode 100644 index 000000000..6019434de --- /dev/null +++ b/pkg/cli/config/initialize/disks.go @@ -0,0 +1,333 @@ +/* + MIT License + + (C) Copyright 2024 Hewlett Packard Enterprise Development LP + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. +*/ + +package initialize + +import "fmt" + +// Provides configuration for lvm, filesystems and mounts for NCNS. +const ( + crays3cache = "CRAYS3CACHE" + conrun = "CONRUN" + conlib = "CONLIB" + k8slet = "K8SLET" + cephetc = "CEPHETC" + cephvar = "CEPHVAR" + contain = "CONTAIN" + volumeGroup = "metalvg0" + raidArray = "/dev/md/AUX" +) + +// master bootcmd (cloud-init user-data) +var masterBootCMD = [][]string{ + { + "cloud-init-per", + "once", + "create_PV", + "pvcreate", + "-ff", + "-y", + "-M", + "lvm2", + raidArray, + }, + { + "cloud-init-per", + "once", + "create_VG", + "vgcreate", + volumeGroup, + raidArray, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", crays3cache), + "lvcreate", + "-l", + "25%PVS", + "-n", + crays3cache, + "-y", + volumeGroup, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", conrun), + "lvcreate", + "-l", + "4%PVS", + "-n", + conrun, + "-y", + volumeGroup, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", conlib), + "lvcreate", + "-l", + "36%PVS", + "-n", + conlib, + "-y", + volumeGroup, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", k8slet), + "lvcreate", + "-l", + "10%PVS", + "-n", + k8slet, + "-y", + volumeGroup, + }, +} + +// master fs_setup (cloud-init user-data) +var masterFileSystems = []map[string]interface{}{ + { + "label": crays3cache, + "filesystem": "ext4", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, crays3cache), + "partition": "auto", + "overwrite": true, + }, + { + "label": conrun, + "filesystem": "xfs", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, conrun), + "partition": "auto", + "overwrite": true, + }, + { + "label": conlib, + "filesystem": "xfs", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, conlib), + "partition": "auto", + "overwrite": true, + }, + { + "label": k8slet, + "filesystem": "xfs", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, k8slet), + "partition": "auto", + "overwrite": true, + }, +} + +// master mounts (cloud-init user-data) +var masterMounts = [][]string{ + { + fmt.Sprintf("LABEL=%s", crays3cache), + "/var/lib/s3fs_cache", + "ext4", + "defaults,nofail", + }, + { + fmt.Sprintf("LABEL=%s", conrun), + "/run/containerd", + "xfs", + "defaults,nofail", + }, + { + fmt.Sprintf("LABEL=%s", conlib), + "/var/lib/containerd", + "xfs", + "defaults,nofail", + }, + { + fmt.Sprintf("LABEL=%s", k8slet), + "/var/lib/kubelet", + "xfs", + "defaults,nofail", + }, +} + +// worker bootcmd (cloud-init user-data) +var workerBootCMD = [][]string{ + { + "cloud-init-per", + "once", + "create_PV", + "pvcreate", + "-ff", + "-y", + "-M", + "lvm2", + raidArray, + }, + { + "cloud-init-per", + "once", + "create_VG", + "vgcreate", + volumeGroup, + raidArray, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", crays3cache), + "lvcreate", + "-L", + "200GB", + "-n", + crays3cache, + "-y", + volumeGroup, + }, +} + +// worker fs_setup (cloud-init user-data) +var workerFileSystems = []map[string]interface{}{ + { + "label": crays3cache, + "filesystem": "ext4", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, crays3cache), + "partition": "auto", + "overwrite": true, + }, +} + +// worker mounts (cloud-init user-data) +var workerMounts = [][]string{ + { + fmt.Sprintf("LABEL=%s", crays3cache), + "/var/lib/s3fs_cache", + "ext4", + "defaults,nofail", + }, +} + +// storage bootcmd (cloud-init user-data) +var cephBootCMD = [][]string{ + { + "cloud-init-per", + "once", + "create_PV", + "pvcreate", + "-ff", + "-y", + "-M", + "lvm2", + raidArray, + }, + { + "cloud-init-per", + "once", + "create_VG", + "vgcreate", + volumeGroup, + raidArray, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", cephetc), + "lvcreate", + "-L", + "10GB", + "-n", + cephetc, + "-y", + volumeGroup, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", cephvar), + "lvcreate", + "-L", + "60GB", + "-n", + cephvar, + "-y", + volumeGroup, + }, + { + "cloud-init-per", + "once", + fmt.Sprintf("create_LV_%s", contain), + "lvcreate", + "-L", + "60GB", + "-n", + contain, + "-y", + volumeGroup, + }, +} + +// storage fs_setup (cloud-init user-data) +var cephFileSystems = []map[string]interface{}{ + { + "label": cephetc, + "filesystem": "ext4", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, cephetc), + "partition": "auto", + "overwrite": true, + }, + { + "label": cephvar, + "filesystem": "ext4", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, cephvar), + "partition": "auto", + "overwrite": true, + }, + { + "label": contain, + "filesystem": "xfs", + "device": fmt.Sprintf("/dev/disk/by-id/dm-name-%s-%s", volumeGroup, contain), + "partition": "auto", + "overwrite": true, + }, +} + +// storage mounts (cloud-init user-data) +var cephMounts = [][]string{ + { + fmt.Sprintf("LABEL=%s", cephetc), + "/etc/ceph", + "auto", + "defaults", + }, + { + fmt.Sprintf("LABEL=%s", cephvar), + "/var/lib/ceph", + "auto", + "defaults", + }, + { + fmt.Sprintf("LABEL=%s", contain), + "/var/lib/containers", + "auto", + "defaults", + }, +}