Skip to content

Commit

Permalink
merge the cluster capacity (#184)
Browse files Browse the repository at this point in the history
Signed-off-by: Wei Liu <liuweixa@redhat.com>
  • Loading branch information
skeeey authored Jun 14, 2023
1 parent 023cdfd commit ecfb6c0
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 46 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ verify-crds: patch-crd

verify-gocilint:
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
golangci-lint run --timeout=3m --modules-download-mode vendor ./...
golangci-lint run --timeout=5m --modules-download-mode vendor ./...

install-golang-gci:
go install github.com/daixiang0/gci@v0.10.1
Expand Down
29 changes: 12 additions & 17 deletions pkg/registration/helpers/testing/assertion.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,14 @@ package testing

import (
"bytes"
"io/ioutil"
"os"
"reflect"
"testing"

authorizationv1 "k8s.io/api/authorization/v1"
certv1 "k8s.io/api/certificates/v1"
certv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand All @@ -26,7 +25,7 @@ func AssertFinalizers(t *testing.T, obj runtime.Object, finalizers []string) {
if len(actual) == 0 && len(finalizers) == 0 {
return
}
if !reflect.DeepEqual(actual, finalizers) {
if !equality.Semantic.DeepEqual(actual, finalizers) {
t.Fatal(diff.ObjectDiff(actual, finalizers))
}
}
Expand All @@ -37,28 +36,24 @@ func AssertManagedClusterClientConfigs(t *testing.T, actual, expected []clusterv
if len(actual) == 0 && len(expected) == 0 {
return
}
if !reflect.DeepEqual(actual, expected) {
if !equality.Semantic.DeepEqual(actual, expected) {
t.Errorf("expected client configs %#v but got: %#v", expected, actual)
}
}

// AssertManagedClusterStatus sserts the actual managed cluster status is the same
// AssertManagedClusterStatus asserts the actual managed cluster status is the same
// with the expected
func AssertManagedClusterStatus(t *testing.T, actual, expected clusterv1.ManagedClusterStatus) {
if !reflect.DeepEqual(actual.Version, expected.Version) {
if !equality.Semantic.DeepEqual(actual.Version, expected.Version) {
t.Errorf("expected version %#v but got: %#v", expected.Version, actual.Version)
}
if !actual.Capacity["cpu"].Equal(expected.Capacity["cpu"]) {
t.Errorf("expected cpu capacity %#v but got: %#v", expected.Capacity["cpu"], actual.Capacity["cpu"])
}
if !actual.Capacity["memory"].Equal(expected.Capacity["memory"]) {
t.Errorf("expected memory capacity %#v but got: %#v", expected.Capacity["memory"], actual.Capacity["memory"])
}
if !actual.Allocatable["cpu"].Equal(expected.Allocatable["cpu"]) {
t.Errorf("expected cpu allocatable %#v but got: %#v", expected.Allocatable["cpu"], actual.Allocatable["cpu"])

if !equality.Semantic.DeepEqual(actual.Capacity, expected.Capacity) {
t.Errorf("expected cluster capacity %#v but got: %#v", expected.Capacity, actual.Capacity)
}
if !actual.Allocatable["memory"].Equal(expected.Allocatable["memory"]) {
t.Errorf("expected memory alocatabel %#v but got: %#v", expected.Allocatable["memory"], actual.Allocatable["memory"])

if !equality.Semantic.DeepEqual(actual.Allocatable, expected.Allocatable) {
t.Errorf("expected cluster allocatable %#v but got: %#v", expected.Allocatable, actual.Allocatable)
}
}

Expand Down Expand Up @@ -142,7 +137,7 @@ func AssertFileExist(t *testing.T, filePath string) {

// AssertFileContent asserts a given file content
func AssertFileContent(t *testing.T, filePath string, expectedContent []byte) {
content, _ := ioutil.ReadFile(filePath)
content, _ := os.ReadFile(filePath)
if !bytes.Equal(content, expectedContent) {
t.Errorf("expect %v, but got %v", expectedContent, content)
}
Expand Down
16 changes: 5 additions & 11 deletions pkg/registration/helpers/testing/testinghelpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"io/ioutil"
"math/big"
"math/rand"
"net"
"os"
"time"

certv1 "k8s.io/api/certificates/v1"
Expand Down Expand Up @@ -118,16 +118,10 @@ func NewJoinedManagedCluster() *clusterv1.ManagedCluster {
return managedCluster
}

func NewManagedClusterWithStatus(capacity, allocatable corev1.ResourceList) *clusterv1.ManagedCluster {
func NewManagedClusterWithStatus(capacity, allocatable clusterv1.ResourceList) *clusterv1.ManagedCluster {
managedCluster := NewJoinedManagedCluster()
managedCluster.Status.Capacity = clusterv1.ResourceList{
"cpu": capacity.Cpu().DeepCopy(),
"memory": capacity.Memory().DeepCopy(),
}
managedCluster.Status.Allocatable = clusterv1.ResourceList{
"cpu": allocatable.Cpu().DeepCopy(),
"memory": allocatable.Memory().DeepCopy(),
}
managedCluster.Status.Capacity = capacity
managedCluster.Status.Allocatable = allocatable
managedCluster.Status.Version = clusterv1.ManagedClusterVersion{
Kubernetes: kubeversion.Get().GitVersion,
}
Expand Down Expand Up @@ -499,7 +493,7 @@ func NewTestCert(commonName string, duration time.Duration) *TestCert {
}

func WriteFile(filename string, data []byte) {
if err := ioutil.WriteFile(filename, data, 0600); err != nil {
if err := os.WriteFile(filename, data, 0600); err != nil {
panic(err)
}
}
8 changes: 8 additions & 0 deletions pkg/registration/spoke/managedcluster/resource_reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ func (r *resoureReconcile) reconcile(ctx context.Context, cluster *clusterv1.Man
return cluster, reconcileStop, fmt.Errorf("unable to get capacity and allocatable of managed cluster %q: %w", cluster.Name, err)
}

// we allow other components update the cluster capacity, so we need merge the capacity to this updated, if
// one current capacity entry does not exist in this updated capacity, we add it back.
for key, val := range cluster.Status.Capacity {
if _, ok := capacity[key]; !ok {
capacity[key] = val
}
}

cluster.Status.Capacity = capacity
cluster.Status.Allocatable = allocatable
cluster.Status.Version = *clusterVersion
Expand Down
44 changes: 27 additions & 17 deletions pkg/registration/spoke/managedcluster/resource_reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"time"

"github.com/openshift/library-go/pkg/operator/events/eventstesting"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -82,27 +81,30 @@ func TestHealthCheck(t *testing.T) {
nodes []runtime.Object
httpStatus int
responseMsg string
validateActions func(t *testing.T, actions []clienttesting.Action)
validateActions func(t *testing.T, clusterClient *clusterfake.Clientset)
expectedErr string
}{
{
name: "there are no managed clusters",
clusters: []runtime.Object{},
validateActions: testingcommon.AssertNoActions,
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
name: "there are no managed clusters",
clusters: []runtime.Object{},
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
testingcommon.AssertNoActions(t, clusterClient.Actions())
},
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
},
{
name: "kube-apiserver is not health",
clusters: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()},
httpStatus: http.StatusInternalServerError,
responseMsg: "internal server error",
validateActions: func(t *testing.T, actions []clienttesting.Action) {
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
expectedCondition := metav1.Condition{
Type: clusterv1.ManagedClusterConditionAvailable,
Status: metav1.ConditionFalse,
Reason: "ManagedClusterKubeAPIServerUnavailable",
Message: "The kube-apiserver is not ok, status code: 500, an error on the server (\"internal server error\") has prevented the request from succeeding",
}
actions := clusterClient.Actions()
testingcommon.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchAction).GetPatch()
managedCluster := &clusterv1.ManagedCluster{}
Expand All @@ -120,7 +122,7 @@ func TestHealthCheck(t *testing.T) {
testinghelpers.NewNode("testnode1", testinghelpers.NewResourceList(32, 64), testinghelpers.NewResourceList(16, 32)),
},
httpStatus: http.StatusOK,
validateActions: func(t *testing.T, actions []clienttesting.Action) {
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
expectedCondition := metav1.Condition{
Type: clusterv1.ManagedClusterConditionAvailable,
Status: metav1.ConditionTrue,
Expand All @@ -140,6 +142,7 @@ func TestHealthCheck(t *testing.T) {
clusterv1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*32), resource.BinarySI),
},
}
actions := clusterClient.Actions()
testingcommon.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchAction).GetPatch()
managedCluster := &clusterv1.ManagedCluster{}
Expand All @@ -156,13 +159,14 @@ func TestHealthCheck(t *testing.T) {
clusters: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()},
nodes: []runtime.Object{},
httpStatus: http.StatusNotFound,
validateActions: func(t *testing.T, actions []clienttesting.Action) {
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
expectedCondition := metav1.Condition{
Type: clusterv1.ManagedClusterConditionAvailable,
Status: metav1.ConditionTrue,
Reason: "ManagedClusterAvailable",
Message: "Managed cluster is available",
}
actions := clusterClient.Actions()
testingcommon.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchAction).GetPatch()
managedCluster := &clusterv1.ManagedCluster{}
Expand All @@ -178,13 +182,14 @@ func TestHealthCheck(t *testing.T) {
clusters: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()},
nodes: []runtime.Object{},
httpStatus: http.StatusForbidden,
validateActions: func(t *testing.T, actions []clienttesting.Action) {
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
expectedCondition := metav1.Condition{
Type: clusterv1.ManagedClusterConditionAvailable,
Status: metav1.ConditionTrue,
Reason: "ManagedClusterAvailable",
Message: "Managed cluster is available",
}
actions := clusterClient.Actions()
testingcommon.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchAction).GetPatch()
managedCluster := &clusterv1.ManagedCluster{}
Expand All @@ -199,18 +204,22 @@ func TestHealthCheck(t *testing.T) {
name: "merge managed cluster status",
clusters: []runtime.Object{
testinghelpers.NewManagedClusterWithStatus(
corev1.ResourceList{
clusterv1.ResourceList{
"sockets": *resource.NewQuantity(int64(1200), resource.DecimalExponent),
"cores": *resource.NewQuantity(int64(128), resource.DecimalExponent),
},
testinghelpers.NewResourceList(16, 32)),
clusterv1.ResourceList{
clusterv1.ResourceCPU: *resource.NewQuantity(int64(16), resource.DecimalExponent),
clusterv1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*32), resource.BinarySI),
},
),
},
nodes: []runtime.Object{
testinghelpers.NewNode("testnode1", testinghelpers.NewResourceList(32, 64), testinghelpers.NewResourceList(16, 32)),
testinghelpers.NewNode("testnode2", testinghelpers.NewResourceList(32, 64), testinghelpers.NewResourceList(16, 32)),
},
httpStatus: http.StatusOK,
validateActions: func(t *testing.T, actions []clienttesting.Action) {
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
expectedCondition := metav1.Condition{
Type: clusterv1.ManagedClusterConditionJoined,
Status: metav1.ConditionTrue,
Expand All @@ -232,10 +241,11 @@ func TestHealthCheck(t *testing.T) {
clusterv1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*64), resource.BinarySI),
},
}
actions := clusterClient.Actions()
testingcommon.AssertActions(t, actions, "patch")
patch := actions[0].(clienttesting.PatchAction).GetPatch()
managedCluster := &clusterv1.ManagedCluster{}
err := json.Unmarshal(patch, managedCluster)

managedCluster, err := clusterClient.ClusterV1().ManagedClusters().Get(
context.TODO(), testinghelpers.TestManagedClusterName, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -279,7 +289,7 @@ func TestHealthCheck(t *testing.T) {
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, ""))
testingcommon.AssertError(t, syncErr, c.expectedErr)

c.validateActions(t, clusterClient.Actions())
c.validateActions(t, clusterClient)
})
}
}

0 comments on commit ecfb6c0

Please sign in to comment.