Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add integration tests #29

Merged
merged 11 commits into from
Jun 22, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .ci/controllers-test/machine-class-patch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"providerSpec": {
"tags": {
"kubernetes.io/role/integration-test": "1",
"kubernetes.io/role/node": null
}
}
}
30 changes: 30 additions & 0 deletions .ci/local_integration_test
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/usr/bin/env bash
# Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e

# For the test step concourse will set the following environment variables:
# SOURCE_PATH - path to component repository root directory.

cd test/integration/controller
if ! hash ginkgo; then
# Install Ginkgo (test framework) to be able to execute the tests.
echo "Fetching Ginkgo frawework"
GO111MODULE=off go get -u github.com/onsi/ginkgo/ginkgo
echo "Successfully fetched Ginkgo frawework"
fi

echo "Starting integration tests..."

ginkgo -v
8 changes: 8 additions & 0 deletions .ci/test
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,12 @@ else
echo ">>>>> Finished executing unit tests"
fi

if [[ "${SKIP_INTEGRATION_TESTS}" != "" ]]; then
echo ">>>>> Skipping integration tests"
else
echo ">>>>> Invoking intergration tests"
.ci/pipeline_integration_test
echo ">>>>> Finished executing integration tests"
fi

echo "CI tests have passed successfully"
4 changes: 4 additions & 0 deletions pkg/spi/spi.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ type ECSClient interface {
RunInstances(request *ecs.RunInstancesRequest) (*ecs.RunInstancesResponse, error)
DescribeInstances(request *ecs.DescribeInstancesRequest) (*ecs.DescribeInstancesResponse, error)
DeleteInstance(request *ecs.DeleteInstanceRequest) (*ecs.DeleteInstanceResponse, error)
DescribeDisks(request *ecs.DescribeDisksRequest) (*ecs.DescribeDisksResponse, error)
DeleteDisk(request *ecs.DeleteDiskRequest) (*ecs.DeleteDiskResponse, error)
DescribeNetworkInterfaces(request *ecs.DescribeNetworkInterfacesRequest) (*ecs.DescribeNetworkInterfacesResponse, error)
DeleteNetworkInterface(request *ecs.DeleteNetworkInterfaceRequest) (*ecs.DeleteNetworkInterfaceResponse, error)
}

// PluginSPI provides an interface to deal with cloud provider session
Expand Down
16 changes: 16 additions & 0 deletions test/integration/controller/controller_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
package controller
himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved

import (
"testing"
"time"

. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
. "github.com/onsi/gomega"
)

func TestController(t *testing.T) {
RegisterFailHandler(Fail)
config.DefaultReporterConfig.SlowSpecThreshold = float64(300 * time.Second)
RunSpecs(t, "Controller Suite")
}
48 changes: 48 additions & 0 deletions test/integration/controller/controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/**
Overview
- Tests the provider specific Machine Controller
Prerequisites
- secret yaml file for the hyperscaler/provider passed as input
- control cluster and target clusters kube-config passed as input (optional)
BeforeSuite
- Check and create control cluster and target clusters if required
- Check and create crds ( machineclass, machines, machinesets and machinedeployment ) if required
using file available in kubernetes/crds directory of machine-controller-manager repo
- Start the Machine Controller manager ( as goroutine )
- apply secret resource for accesing the cloud provider service in the control cluster
- Create machineclass resource from file available in kubernetes directory of provider specific repo in control cluster
AfterSuite
- Delete the control and target clusters // As of now we are reusing the cluster so this is not required

Test: differentRegion Scheduling Strategy Test
1) Create machine in region other than where the target cluster exists. (e.g machine in eu-west-1 and target cluster exists in us-east-1)
Expected Output
- should fail because no cluster in same region exists)

himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved
Test: sameRegion Scheduling Strategy Test
1) Create machine in same region/zone as target cluster and attach it to the cluster
Expected Output
- should successfully attach the machine to the target cluster (new node added)
2) Delete machine
Expected Output
- should successfully delete the machine from the target cluster (less one node)
**/

package controller_test

import (
"github.com/gardener/machine-controller-manager-provider-alicloud/test/integration/provider"
"github.com/gardener/machine-controller-manager/pkg/test/integration/common"
. "github.com/onsi/ginkgo"
)

var commons = common.NewIntegrationTestFramework(&provider.ResourcesTrackerImpl{}, 400)

var _ = BeforeSuite(commons.SetupBeforeSuite)

var _ = AfterSuite(commons.Cleanup)

var _ = Describe("Machine controllers test", func() {
commons.BeforeEachCheck()
commons.ControllerTests()
})
174 changes: 174 additions & 0 deletions test/integration/provider/alicloud.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
package provider

import (
"context"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
api "github.com/gardener/machine-controller-manager-provider-alicloud/pkg/alicloud/apis"
"github.com/gardener/machine-controller-manager-provider-alicloud/pkg/spi"
"github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/gardener/machine-controller-manager/pkg/util/provider/driver"

providerDriver "github.com/gardener/machine-controller-manager-provider-alicloud/pkg/alicloud"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/json"
"log"
)

/**
Orphaned Resources
- VMs:
Describe instances with specified tag name:<cluster-name>
Report/Print out instances found
Describe volumes attached to the instance (using instance id)
Report/Print out volumes found
Delete attached volumes found
Terminate instances found
- Disks:
Describe volumes with tag status:available
Report/Print out volumes found
Delete identified volumes
**/

func newSession(machineClass *v1alpha1.MachineClass, secret *v1.Secret) spi.ECSClient {
var (
providerSpec *api.ProviderSpec
sPI spi.PluginSPIImpl
)

err := json.Unmarshal([]byte(machineClass.ProviderSpec.Raw), &providerSpec)
if err != nil {
providerSpec = nil
log.Printf("Error occured while performing unmarshal %s", err.Error())
}
sess, err := sPI.NewECSClient(secret, providerSpec.Region)
if err != nil {
log.Printf("Error occured while creating new session %s", err)
}
return sess
}

func getMachines(machineClass *v1alpha1.MachineClass, secretData map[string][]byte) ([]string, error) {
var machines []string
var sPI spi.PluginSPIImpl
driverProvider := providerDriver.NewAlicloudPlugin(&sPI)
machineList, err := driverProvider.ListMachines(context.TODO(), &driver.ListMachinesRequest{
MachineClass: machineClass,
Secret: &v1.Secret{Data: secretData},
})
if err != nil {
return nil, err
} else if len(machineList.MachineList) != 0 {
for _, machine := range machineList.MachineList {
machines = append(machines, machine)
}
}
return machines, nil
}

// getOrphanesInstances returns list of Orphan resources that couldn't be deleted
himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved
func getOrphanedInstances(tagName string, tagValue string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) ([]string, error) {
sess := newSession(machineClass, &v1.Secret{Data: secretData})
var instancesID []string
var tags = &[]ecs.DescribeInstancesTag{{Key: tagName, Value: tagValue}}
input := ecs.CreateDescribeInstancesRequest()
input.InstanceName = "instance-state-name"
input.Status = "running"
input.Tag = tags

himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved
result, err := sess.DescribeInstances(input)
if err != nil {
return instancesID, err
}
for _, instance := range result.Instances.Instance {
instancesID = append(instancesID, instance.InstanceId)
}
return instancesID, nil
}

// getOrphanesDisks returns list of Orphan disks
func getOrphanedDisks(tagName string, tagValue string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) ([]string, error) {
sess := newSession(machineClass, &v1.Secret{Data: secretData})
var volumeID []string
var tags = &[]ecs.DescribeDisksTag{{Key: tagName, Value: tagValue}}
input := ecs.CreateDescribeDisksRequest()
input.Tag = tags
himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved
result, err := sess.DescribeDisks(input)
if err != nil {
return volumeID, err
}
for _, disk := range result.Disks.Disk {
volumeID = append(volumeID, disk.DiskId)
}
return volumeID, nil
}

// getOrphanedNICs returns list of Orphan NICs
func getOrphanedNICs(tagName string, tagValue string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) ([]string, error) {
sess := newSession(machineClass, &v1.Secret{Data: secretData})
var NICIDs []string
himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved
var tags = &[]ecs.DescribeNetworkInterfacesTag{{Key: tagName, Value: tagValue}}
input := ecs.CreateDescribeNetworkInterfacesRequest()
input.Tag = tags

result, err := sess.DescribeNetworkInterfaces(input)
if err != nil {
return NICIDs, err
}
for _, nic := range result.NetworkInterfaceSets.NetworkInterfaceSet {
NICIDs = append(NICIDs, nic.NetworkInterfaceId)
}
return NICIDs, nil
}

func cleanOrphanResources(instanceIds []string, volumeIds []string, NICIds []string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) (delErrInstanceId []string, delErrVolumeIds []string, delErrNICs []string) {

for _, instanceId := range instanceIds {
if err := terminateInstance(instanceId, machineClass, secretData); err != nil {
delErrInstanceId = append(delErrInstanceId, instanceId)
}
}

for _, volumeId := range volumeIds {
if err := deleteVolume(volumeId, machineClass, secretData); err != nil {
delErrVolumeIds = append(delErrVolumeIds, volumeId)
}
}

for _, nicId := range NICIds {
if err := deleteNIC(nicId, machineClass, secretData); err != nil {
delErrNICs = append(delErrNICs, nicId)
}
}

return
}

func deleteNIC(nicId string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) error {
sess := newSession(machineClass, &v1.Secret{Data: secretData})
input := &ecs.DeleteNetworkInterfaceRequest{NetworkInterfaceId: nicId}
_, err := sess.DeleteNetworkInterface(input)
if err != nil {
return err
}
return nil
}

func deleteVolume(diskId string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) error {
sess := newSession(machineClass, &v1.Secret{Data: secretData})
input := &ecs.DeleteDiskRequest{DiskId: diskId}
_, err := sess.DeleteDisk(input)
if err != nil {
return err
}
return nil
}

func terminateInstance(instanceId string, machineClass *v1alpha1.MachineClass, secretData map[string][]byte) error {
sess := newSession(machineClass, &v1.Secret{Data: secretData})
input := &ecs.DeleteInstanceRequest{InstanceId: instanceId}
_, err := sess.DeleteInstance(input)
if err != nil {
return err
}
return nil
}
90 changes: 90 additions & 0 deletions test/integration/provider/rti.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
package provider

import (
"fmt"

v1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
)

//ResourcesTrackerImpl type keeps a note of resources which are initialized in MCM IT suite and are used in provider IT
type ResourcesTrackerImpl struct {
MachineClass *v1alpha1.MachineClass
SecretData map[string][]byte
ClusterName string
}

// InitializeResourcesTracker initializes the type ResourcesTrackerImpl variable and tries
// to delete the orphan resources present before the actual IT runs.
// create a cleanup function to delete the list of orphan resources.
// 1. get list of orphan resources.
// 2. Mark them for deletion and call cleanup.
// 3. Print the orphan resources which got error in deletion.
func (r *ResourcesTrackerImpl) InitializeResourcesTracker(machineClass *v1alpha1.MachineClass, secretData map[string][]byte, clusterName string) error {

r.MachineClass = machineClass
r.SecretData = secretData
r.ClusterName = clusterName

initialVMs, initialVolumes, initialMachines, initialNICs, err := r.probeResources()
if err != nil {
fmt.Printf("Error in initial probe of orphaned resources: %s", err.Error())
return err
}

delErrOrphanVMs, delErrOrphanVolumes, delErrOrphanNICs := cleanOrphanResources(initialVMs, initialVolumes, initialNICs, r.MachineClass, r.SecretData)
if delErrOrphanVMs != nil || delErrOrphanVolumes != nil || initialMachines != nil || delErrOrphanNICs != nil {
err := fmt.Errorf("error in cleaning the following orphan resources. Clean them up before proceeding with the test.\nvirtual machines: %v\ndisks: %v\nmcm machines: %v\nnics: %v", delErrOrphanVMs, delErrOrphanVolumes, initialMachines, delErrOrphanNICs)
return err
}

return nil
}

// probeResources will look for resources currently available and returns them
func (r *ResourcesTrackerImpl) probeResources() ([]string, []string, []string, []string, error) {
// Check for VM instances with matching tags/labels
// Describe volumes attached to VM instance & delete the volumes
// Finally delete the VM instance
himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved

integrationTestTag := "tag:kubernetes.io/role/integration-test"
integrationTestTagValue := "1"

orphanVMs, err := getOrphanedInstances(integrationTestTag, integrationTestTagValue, r.MachineClass, r.SecretData)
if err != nil {
return orphanVMs, nil, nil, nil, err
}

// Check for available volumes in cloud provider with tag/label [Status:available]
orphanVols, err := getOrphanedDisks(integrationTestTag, integrationTestTagValue, r.MachineClass, r.SecretData)
if err != nil {
return orphanVMs, orphanVols, nil, nil, err
}

availMachines, err := getMachines(r.MachineClass, r.SecretData)
if err != nil {
return orphanVMs, orphanVols, availMachines, nil, err
}

orphanNICs, err := getOrphanedNICs(integrationTestTag, integrationTestTagValue, r.MachineClass, r.SecretData)

return orphanVMs, orphanVols, availMachines, orphanNICs, err

}

// IsOrphanedResourcesAvailable checks whether there are any orphaned resources left.
//If yes, then prints them and returns true. If not, then returns false
func (r *ResourcesTrackerImpl) IsOrphanedResourcesAvailable() bool {
afterTestExecutionVMs, afterTestExecutionAvailDisks, afterTestExecutionAvailmachines, afterTestExecutionNICs, err := r.probeResources()
if err != nil {
fmt.Printf("Error probing orphaned resources: %s", err.Error())
return true
}

if afterTestExecutionVMs != nil || afterTestExecutionAvailDisks != nil || afterTestExecutionAvailmachines != nil || afterTestExecutionNICs != nil {
fmt.Printf("The following resources are orphans ... trying to delete them \n")
himanshu-kun marked this conversation as resolved.
Show resolved Hide resolved
fmt.Printf("Virtual Machines: %v\nVolumes: %v\nNICs: %v\nMCM Machines %v\n ", afterTestExecutionVMs, afterTestExecutionAvailDisks, afterTestExecutionNICs, afterTestExecutionAvailmachines)
return true
}

return false
}