From 6b0942cbec61a89053cb679bb4e838d28a43b228 Mon Sep 17 00:00:00 2001 From: Jesse Nelson Date: Tue, 19 Dec 2023 22:54:43 -0600 Subject: [PATCH] fix(plugin): Fix ability to have custom value for openebs.io/nodeid (#451) * fix(plugin): fix node id to node mapping Signed-off-by: Jesse Nelson * add change log Signed-off-by: Jesse Nelson * fix case when nodeid not set Signed-off-by: Jesse Nelson * simplify logic Signed-off-by: Jesse Nelson * update doc Signed-off-by: Jesse Nelson * fix cases where nodeid not set Signed-off-by: Jesse Nelson * check for error first Signed-off-by: Jesse Nelson --------- Signed-off-by: Jesse Nelson --- README.md | 2 ++ changelogs/unreleased/450-nodeid-fix.md | 1 + pkg/driver/controller.go | 7 +++- pkg/mgmt/zfsnode/start.go | 43 ++++++++++++++++++++++--- 4 files changed, 48 insertions(+), 5 deletions(-) create mode 100644 changelogs/unreleased/450-nodeid-fix.md diff --git a/README.md b/README.md index 4f11a4002..7c8a88d4c 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,8 @@ Configure the custom topology keys (if needed). This can be used for many purpos https://github.com/openebs/zfs-localpv/blob/HEAD/docs/faq.md#6-how-to-add-custom-topology-key ### Installation +In order to support moving data to a new node later on, you must label each node with a unique value for `openebs.io/nodeid`. +For more information on migrating data, please [see here](docs/faq.md#8-how-to-migrate-pvs-to-the-new-node-in-case-old-node-is-not-accessible) We can install the latest release of OpenEBS ZFS driver by running the following command: ```bash diff --git a/changelogs/unreleased/450-nodeid-fix.md b/changelogs/unreleased/450-nodeid-fix.md new file mode 100644 index 000000000..eeef98d0d --- /dev/null +++ b/changelogs/unreleased/450-nodeid-fix.md @@ -0,0 +1 @@ +fix regression introduced with v2.0.0 that caused the plugin code to not be able to start when a user sets openebs.io/nodeid \ No newline at end of file diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 154655641..23acb2b1c 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -918,7 +918,12 @@ func (cs *controller) GetCapacity( var availableCapacity int64 for _, nodeName := range nodeNames { - v, exists, err := zfsNodesCache.GetByKey(zfs.OpenEBSNamespace + "/" + nodeName) + mappedNodeId, mapErr := zfs.GetNodeID(nodeName) + if mapErr != nil { + klog.Warningf("Unable to find mapped node id for %s", nodeName) + mappedNodeId = nodeName + } + v, exists, err := zfsNodesCache.GetByKey(zfs.OpenEBSNamespace + "/" + mappedNodeId) if err != nil { klog.Warning("unexpected error after querying the zfsNode informer cache") continue diff --git a/pkg/mgmt/zfsnode/start.go b/pkg/mgmt/zfsnode/start.go index 395ae4f92..20cf6bdad 100644 --- a/pkg/mgmt/zfsnode/start.go +++ b/pkg/mgmt/zfsnode/start.go @@ -18,6 +18,14 @@ package zfsnode import ( "context" + "fmt" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/klog/v2" + "os" + "strings" "sync" "time" @@ -27,7 +35,6 @@ import ( "github.com/openebs/zfs-localpv/pkg/zfs" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" ) @@ -60,10 +67,38 @@ func Start(controllerMtx *sync.RWMutex, stopCh <-chan struct{}) error { options.FieldSelector = fields.OneTermEqualSelector("metadata.name", zfs.NodeID).String() })) - k8sNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), zfs.NodeID, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "fetch k8s node %s", zfs.NodeID) + nodeName := os.Getenv("OPENEBS_NODE_NAME") + var k8sNode v1.Node + + if len(strings.TrimSpace(zfs.NodeID)) == 0 || nodeName == zfs.NodeID { + k8sNodeCandidate, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), zfs.NodeID, metav1.GetOptions{}) + + if err != nil { + return errors.Wrapf(err, "fetch k8s node %s", zfs.NodeID) + } + + k8sNode = *k8sNodeCandidate + + } else { + topologyRequirement, requirementError := labels.NewRequirement(zfs.ZFSTopologyKey, selection.Equals, []string{zfs.NodeID}) + if requirementError != nil { + return errors.Wrapf(requirementError, "Unable to generate topology requirement by %s for node id %s", zfs.ZFSTopologyKey, zfs.NodeID) + } + topologySelector := labels.NewSelector().Add(*topologyRequirement).String() + klog.Infof("The topology selector is %s", topologySelector) + + k8sNodeCandidate, err := kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + LabelSelector: topologySelector, + }) + if err != nil { + return errors.Wrapf(err, "error trying to find node with label %s having value %s", zfs.ZFSTopologyKey, zfs.NodeID) + } + if k8sNodeCandidate == nil || len(k8sNodeCandidate.Items) != 1 { + return fmt.Errorf("unable to retrieve a single node by %s for %s", zfs.ZFSTopologyKey, zfs.NodeID) + } + k8sNode = k8sNodeCandidate.Items[0] } + isTrue := true // as object returned by client go clears all TypeMeta from it. nodeGVK := &schema.GroupVersionKind{