Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add machine reconcile delete on termination #336

3 changes: 2 additions & 1 deletion api/v1beta2/ocimachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ import (
const (
// MachineFinalizer allows ReconcileMachine to clean up OCI resources associated with OCIMachine before
// removing it from the apiserver.
MachineFinalizer = "ocimachine.infrastructure.cluster.x-k8s.io"
MachineFinalizer = "ocimachine.infrastructure.cluster.x-k8s.io"
DeleteMachineOnInstanceTermination = "ociclusters.x-k8s.io/delete-machine-on-instance-termination"
)

// OCIMachineSpec defines the desired state of OCIMachine
Expand Down
23 changes: 22 additions & 1 deletion controllers/ocimachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,13 @@ func (r *OCIMachineReconciler) OCIManagedClusterToOCIMachines() handler.MapFunc
func (r *OCIMachineReconciler) reconcileNormal(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope) (ctrl.Result, error) {
controllerutil.AddFinalizer(machineScope.OCIMachine, infrastructurev1beta2.MachineFinalizer)
machine := machineScope.OCIMachine
infraMachine := machineScope.Machine

annotations := infraMachine.GetAnnotations()
deleteMachineOnTermination := false
if annotations != nil {
_, deleteMachineOnTermination = annotations[infrastructurev1beta2.DeleteMachineOnInstanceTermination]
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
r.Recorder.Event(machine, corev1.EventTypeNormal, infrastructurev1beta2.WaitingForBootstrapDataReason, "Bootstrap data secret reference is not yet available")
Expand Down Expand Up @@ -364,7 +371,21 @@ func (r *OCIMachineReconciler) reconcileNormal(ctx context.Context, logger logr.
"Instance is in ready state")
conditions.MarkTrue(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition)
machineScope.SetReady()
return reconcile.Result{}, nil
if deleteMachineOnTermination {
// typically, if the VM is terminated, we should get machine events, so ideally, the 300 seconds
// requeue time is not required, but in case, the event is missed, adding the requeue time
return reconcile.Result{RequeueAfter: 300 * time.Second}, nil
} else {
return reconcile.Result{}, nil
}
case core.InstanceLifecycleStateTerminated:
if deleteMachineOnTermination && infraMachine.DeletionTimestamp == nil {
logger.Info("Deleting underlying machine as instance is terminated")
if err := machineScope.Client.Delete(ctx, infraMachine); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to delete machine %s/%s", infraMachine.Namespace, infraMachine.Name)
}
}
fallthrough
default:
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "")
machineScope.SetFailureReason(capierrors.CreateMachineError)
Expand Down
125 changes: 112 additions & 13 deletions controllers/ocimachine_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,10 @@ import (
"k8s.io/client-go/tools/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/client/interceptor"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
Expand Down Expand Up @@ -196,19 +198,22 @@ func TestNormalReconciliationFunction(t *testing.T) {
teardown := func(t *testing.T, g *WithT) {
mockCtrl.Finish()
}
tests := []struct {
type test struct {
name string
errorExpected bool
expectedEvent string
eventNotExpected string
conditionAssertion []conditionAssertion
testSpecificSetup func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient)
}{
deleteMachines []clusterv1.Machine
testSpecificSetup func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient)
validate func(g *WithT, t *test, result ctrl.Result)
}
tests := []test{
{
name: "instance in provisioning state",
errorExpected: false,
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrastructurev1beta2.InstanceNotReadyReason}},
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
InstanceId: common.String("test"),
})).
Expand All @@ -224,7 +229,59 @@ func TestNormalReconciliationFunction(t *testing.T) {
name: "instance in running state",
errorExpected: false,
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}},
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
machineScope.OCIMachine.Status.Addresses = []clusterv1.MachineAddress{
{
Type: clusterv1.MachineInternalIP,
Address: "1.1.1.1",
},
}
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
InstanceId: common.String("test"),
})).
Return(core.GetInstanceResponse{
Instance: core.Instance{
Id: common.String("test"),
LifecycleState: core.InstanceLifecycleStateRunning,
},
}, nil)
},
},
{
name: "instance in running state, reconcile every 5 minutes",
errorExpected: false,
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}},
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
if machineScope.Machine.ObjectMeta.Annotations == nil {
machineScope.Machine.ObjectMeta.Annotations = make(map[string]string)
}
machineScope.Machine.ObjectMeta.Annotations[infrastructurev1beta2.DeleteMachineOnInstanceTermination] = ""
machineScope.OCIMachine.Status.Addresses = []clusterv1.MachineAddress{
{
Type: clusterv1.MachineInternalIP,
Address: "1.1.1.1",
},
}
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
InstanceId: common.String("test"),
})).
Return(core.GetInstanceResponse{
Instance: core.Instance{
Id: common.String("test"),
LifecycleState: core.InstanceLifecycleStateRunning,
},
}, nil)
},
},
{
name: "instance in running state, reconcile every 5 minutes",
errorExpected: false,
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}},
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
if machineScope.Machine.ObjectMeta.Annotations == nil {
machineScope.Machine.ObjectMeta.Annotations = make(map[string]string)
}
machineScope.Machine.ObjectMeta.Annotations[infrastructurev1beta2.DeleteMachineOnInstanceTermination] = ""
machineScope.OCIMachine.Status.Addresses = []clusterv1.MachineAddress{
{
Type: clusterv1.MachineInternalIP,
Expand All @@ -241,13 +298,49 @@ func TestNormalReconciliationFunction(t *testing.T) {
},
}, nil)
},
validate: func(g *WithT, t *test, result ctrl.Result) {
g.Expect(result.RequeueAfter).To(Equal(300 * time.Second))
},
},
{
name: "instance in terminated state",
errorExpected: true,
expectedEvent: "invalid lifecycle state TERMINATED",
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrastructurev1beta2.InstanceProvisionFailedReason}},
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
InstanceId: common.String("test"),
})).
Return(core.GetInstanceResponse{
Instance: core.Instance{
Id: common.String("test"),
LifecycleState: core.InstanceLifecycleStateTerminated,
},
}, nil)
},
validate: func(g *WithT, t *test, result ctrl.Result) {
g.Expect(result.RequeueAfter).To(Equal(0 * time.Second))
},
},
{
name: "instance in terminated state, machine has to be deleted",
errorExpected: true,
expectedEvent: "invalid lifecycle state TERMINATED",
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrastructurev1beta2.InstanceProvisionFailedReason}},
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
fakeClient := fake.NewClientBuilder().WithObjects(getSecret()).Build()
if machineScope.Machine.ObjectMeta.Annotations == nil {
machineScope.Machine.ObjectMeta.Annotations = make(map[string]string)
}
machineScope.Machine.ObjectMeta.Annotations[infrastructurev1beta2.DeleteMachineOnInstanceTermination] = ""
t.deleteMachines = make([]clusterv1.Machine, 0)
machineScope.Client = interceptor.NewClient(fakeClient, interceptor.Funcs{
Delete: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error {
m := obj.(*clusterv1.Machine)
t.deleteMachines = append(t.deleteMachines, *m)
return nil
},
})
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
InstanceId: common.String("test"),
})).
Expand All @@ -258,13 +351,16 @@ func TestNormalReconciliationFunction(t *testing.T) {
},
}, nil)
},
validate: func(g *WithT, t *test, result ctrl.Result) {
g.Expect(len(t.deleteMachines)).To(Equal(1))
},
},
{
name: "control plane backend vnic attachments call error",
errorExpected: true,
expectedEvent: "server error",
conditionAssertion: []conditionAssertion{{infrastructurev1beta2.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrastructurev1beta2.InstanceIPAddressNotFound}},
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbclient *mock_nlb.MockNetworkLoadBalancerClient) {
machineScope.Machine.ObjectMeta.Labels = make(map[string]string)
machineScope.Machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "true"
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
Expand All @@ -287,7 +383,7 @@ func TestNormalReconciliationFunction(t *testing.T) {
{
name: "control plane backend backend exists",
errorExpected: false,
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
machineScope.Machine.ObjectMeta.Labels = make(map[string]string)
machineScope.Machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "true"
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
Expand Down Expand Up @@ -344,7 +440,7 @@ func TestNormalReconciliationFunction(t *testing.T) {
{
name: "backend creation",
errorExpected: false,
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
machineScope.Machine.ObjectMeta.Labels = make(map[string]string)
machineScope.Machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "true"
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
Expand Down Expand Up @@ -419,7 +515,7 @@ func TestNormalReconciliationFunction(t *testing.T) {
{
name: "ip address exists",
errorExpected: false,
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
machineScope.Machine.ObjectMeta.Labels = make(map[string]string)
machineScope.OCIMachine.Status.Addresses = []clusterv1.MachineAddress{
{
Expand Down Expand Up @@ -478,7 +574,7 @@ func TestNormalReconciliationFunction(t *testing.T) {
{
name: "backend creation fails",
errorExpected: true,
testSpecificSetup: func(machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
testSpecificSetup: func(t *test, machineScope *scope.MachineScope, computeClient *mock_compute.MockComputeClient, vcnClient *mock_vcn.MockClient, nlbClient *mock_nlb.MockNetworkLoadBalancerClient) {
machineScope.Machine.ObjectMeta.Labels = make(map[string]string)
machineScope.Machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "true"
computeClient.EXPECT().GetInstance(gomock.Any(), gomock.Eq(core.GetInstanceRequest{
Expand Down Expand Up @@ -557,9 +653,9 @@ func TestNormalReconciliationFunction(t *testing.T) {
g := NewWithT(t)
defer teardown(t, g)
setup(t, g)
tc.testSpecificSetup(ms, computeClient, vcnClient, nlbClient)
tc.testSpecificSetup(&tc, ms, computeClient, vcnClient, nlbClient)
ctx := context.Background()
_, err := r.reconcileNormal(ctx, log.FromContext(ctx), ms)
result, err := r.reconcileNormal(ctx, log.FromContext(ctx), ms)
if len(tc.conditionAssertion) > 0 {
expectConditions(g, ociMachine, tc.conditionAssertion)
}
Expand All @@ -571,6 +667,9 @@ func TestNormalReconciliationFunction(t *testing.T) {
if tc.expectedEvent != "" {
g.Eventually(recorder.Events).Should(Receive(ContainSubstring(tc.expectedEvent)))
}
if tc.validate != nil {
tc.validate(g, &tc, result)
}
})
}
}
Expand Down