Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Aetos integration #1581

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions test/integration_test/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
package integrationtest

import (
"crypto/tls"
"flag"
"fmt"
"net/http"
"os"
"path"
"sort"
Expand Down Expand Up @@ -40,6 +42,7 @@ import (
_ "github.com/portworx/torpedo/drivers/volume/generic_csi"
_ "github.com/portworx/torpedo/drivers/volume/linstor"
_ "github.com/portworx/torpedo/drivers/volume/portworx"
"github.com/portworx/torpedo/pkg/aetosutil"
"github.com/portworx/torpedo/pkg/log"
testrailutils "github.com/portworx/torpedo/pkg/testrailuttils"
"github.com/sirupsen/logrus"
Expand Down Expand Up @@ -135,6 +138,12 @@ const (
testrailUserNameVar = "TESTRAIL_USERNAME"
testrailPasswordVar = "TESTRAIL_PASSWORD"
testrailMilestoneVar = "TESTRAIL_MILESTONE"

testUser = "nouser"
testProduct = "Stork"
testDescription = ""
testBranch = ""
testType = "stork integration test"
)

var nodeDriver node.Driver
Expand Down Expand Up @@ -166,6 +175,8 @@ var testrailSetupSuccessful bool
var bidirectionalClusterpair bool
var unidirectionalClusterpair bool

var dash *aetosutil.Dashboard

func TestSnapshot(t *testing.T) {
t.Run("testSnapshot", testSnapshot)
t.Run("testSnapshotRestore", testSnapshotRestore)
Expand Down Expand Up @@ -321,6 +332,22 @@ func setup() error {
return fmt.Errorf("TEST_MODE environment variable not set for stork: %v", err)
}
SetupTestRail()
dash = aetosutil.Get()
if !isDashboardReachable() {
log.Infof("Aetos Dashboard is not reachable. Disabling dashboard reporting.")
}

dash.IsEnabled = true
testSet := aetosutil.TestSet{
User: testUser,
Product: testProduct,
Description: testDescription,
Branch: testBranch,
TestType: testType,
Tags: make(map[string]string),
Status: aetosutil.NOTSTARTED,
}
dash.TestSet = &testSet

return nil
}
Expand Down Expand Up @@ -387,6 +414,7 @@ func verifyScheduledNode(t *testing.T, appNode node.Node, volumes []string) {
}
}
require.Equal(t, true, found, "Scheduled node not found in driver node list. DriverNodes: %v ScheduledNode: %v", driverNodes, appNode)
log.InfoD("Scheduled node for app found: %s", appNode.Name)

scores := getScoringBasedOnHyperconvergence(t, driverNodes, volumes)

Expand All @@ -399,6 +427,7 @@ func verifyScheduledNode(t *testing.T, appNode node.Node, volumes []string) {

logrus.Infof("Scores: %v", scores)
require.Equal(t, highScore, scores[appNode.Name], "Scheduled node does not have the highest score")
log.InfoD("Verified scheduled node for app has highest score: %s", appNode.Name)
}

// Helper function to get scoring of driverNodes based on hyper-convergence
Expand Down Expand Up @@ -1891,3 +1920,27 @@ func getSupportedOperatorCRMapping() map[string][]meta_v1.APIResource {

return operatorAppToCRMap
}

func isDashboardReachable() bool {
timeout := 15 * time.Second
client := &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
aboutURL := strings.Replace(aetosutil.DashBoardBaseURL, "dashboard", "datamodel/about", -1)
log.Infof("Checking URL: %s", aboutURL)
response, err := client.Get(aboutURL)

if err != nil {
log.Warn(err.Error())
return false
}
if response.StatusCode == 200 {
return true
}
return false
}
33 changes: 33 additions & 0 deletions test/integration_test/extender_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/portworx/sched-ops/k8s/storage"
"github.com/portworx/torpedo/drivers/node"
"github.com/portworx/torpedo/drivers/scheduler"
"github.com/portworx/torpedo/pkg/log"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
apps_api "k8s.io/api/apps/v1"
Expand All @@ -27,6 +28,7 @@ const (
)

func TestExtender(t *testing.T) {
dash.TestSetBegin(dash.TestSet)
err := setSourceKubeConfig()
require.NoError(t, err, "failed to set kubeconfig to source cluster: %v", err)

Expand All @@ -47,10 +49,12 @@ func TestExtender(t *testing.T) {
}

func noPVCTest(t *testing.T) {
dash.TestCaseBegin("Stork scheduler No PVC test", "Stork scheduler test for app with no PVC", "", nil)
var testrailID, testResult = 50785, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

log.InfoD("Deploy app with no PVC")
ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "nopvctest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-nopvc"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -60,13 +64,16 @@ func noPVCTest(t *testing.T) {
require.NoError(t, err, "Error waiting for pod to get to running state")

destroyAndWait(t, ctxs)
log.InfoD("Deleted app with no PVC")

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func singlePVCTest(t *testing.T) {
dash.TestCaseBegin("Stork scheduler single PVC test", "Stork scheduler test for app with single PVC", "", nil)
log.InfoD("")
var testrailID, testResult = 50786, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand All @@ -82,6 +89,7 @@ func singlePVCTest(t *testing.T) {
scheduledNodes, err := schedulerDriver.GetNodesForApp(ctxs[0])
require.NoError(t, err, "Error getting node for app")
require.Equal(t, 1, len(scheduledNodes), "App should be scheduled on one node")
log.InfoD("App with single PVC scheduled on one node")

volumeNames := getVolumeNames(t, ctxs[0])
require.Equal(t, 1, len(volumeNames), "Should only have one volume")
Expand All @@ -93,9 +101,11 @@ func singlePVCTest(t *testing.T) {
// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
log.InfoD("Deleted app with single PVC")
}

func statefulsetTest(t *testing.T) {
dash.TestCaseBegin("Stateful set extender test", "Stork scheduler test with stateful set application", "", nil)
var testrailID, testResult = 50787, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand Down Expand Up @@ -133,6 +143,7 @@ func statefulsetTest(t *testing.T) {
}

func multiplePVCTest(t *testing.T) {
dash.TestCaseBegin("Multiple PVC test", "Stork scheduler test with app using multiple PVCS", "", nil)
var testrailID, testResult = 50788, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand Down Expand Up @@ -161,6 +172,7 @@ func multiplePVCTest(t *testing.T) {
}

func driverNodeErrorTest(t *testing.T) {
dash.TestCaseBegin("Driver node error", "Induce error on driver node by stopping PX on the node", "", nil)
var testrailID, testResult = 50789, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand All @@ -184,6 +196,7 @@ func driverNodeErrorTest(t *testing.T) {

time.Sleep(1 * time.Minute)

log.InfoD("Stopping volume driver on node: %s", scheduledNodes[0])
err = volumeDriver.StopDriver(scheduledNodes, false, nil)
require.NoError(t, err, "Error stopping driver on scheduled Node %+v", scheduledNodes[0])
stoppedNode := scheduledNodes[0]
Expand All @@ -204,11 +217,13 @@ func driverNodeErrorTest(t *testing.T) {

verifyScheduledNode(t, scheduledNodes[0], volumeNames)

log.InfoD("Starting volume driver on node: %s", stoppedNode)
err = volumeDriver.StartDriver(stoppedNode)
require.NoError(t, err, "Error starting driver on Node %+v", scheduledNodes[0])

err = volumeDriver.WaitDriverUpOnNode(stoppedNode, defaultWaitTimeout)
require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0])
log.InfoD("Verified volume driver is up on node: %s", stoppedNode)

destroyAndWait(t, ctxs)

Expand All @@ -218,10 +233,12 @@ func driverNodeErrorTest(t *testing.T) {
}

func poolMaintenanceTest(t *testing.T) {
dash.TestCaseBegin("Pool Maintenance", "Stork scheduling test with pool in maintenance mode", "", nil)
var testrailID, testResult = 86080, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

log.InfoD("Deploy App")
ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "pool-test"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -239,6 +256,7 @@ func poolMaintenanceTest(t *testing.T) {

verifyScheduledNode(t, scheduledNodes[0], volumeNames)

log.InfoD("Enter pool in maintenance mode on node: %s", scheduledNodes[0])
err = volumeDriver.EnterPoolMaintenance(scheduledNodes[0])
require.NoError(t, err, "Error entering pool maintenance mode on scheduled node %+v", scheduledNodes[0])
poolMaintenanceNode := scheduledNodes[0]
Expand Down Expand Up @@ -274,10 +292,13 @@ func poolMaintenanceTest(t *testing.T) {
}

func pvcOwnershipTest(t *testing.T) {
dash.TestCaseBegin("PVC ownership", "Validating PVC ownership", "", nil)
defer dash.TestCaseEnd()
var testrailID, testResult = 50781, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

log.InfoD("Schedule mysql app")
ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "ownershiptest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-repl-1"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -296,19 +317,22 @@ func pvcOwnershipTest(t *testing.T) {
verifyScheduledNode(t, scheduledNodes[0], volumeNames)

for _, spec := range ctxs[0].App.SpecList {
log.InfoD("Delete storage class.")
if obj, ok := spec.(*storage_api.StorageClass); ok {
err := storage.Instance().DeleteStorageClass(obj.Name)
require.NoError(t, err, "Error deleting storage class for mysql.")
}
if obj, ok := spec.(*v1.PersistentVolumeClaim); ok {
updatePVC, err := core.Instance().GetPersistentVolumeClaim(obj.Name, obj.Namespace)
require.NoError(t, err, "Error getting persistent volume claim.")
log.InfoD("Delete storage class annotation on PVC: %s", updatePVC.Name)
delete(updatePVC.Annotations, annotationStorageProvisioner)
_, err = core.Instance().UpdatePersistentVolumeClaim(updatePVC)
require.NoError(t, err, "Error updating annotations in PVC.")
}
}

log.InfoD("Stop volume driver on scheduled node: %s", scheduledNodes[0].Name)
err = volumeDriver.StopDriver(scheduledNodes, false, nil)
require.NoError(t, err, "Error stopping driver on scheduled Node %+v", scheduledNodes[0])
// make sure to start driver if test failed
Expand All @@ -332,6 +356,7 @@ func pvcOwnershipTest(t *testing.T) {
for _, pod := range depPods {
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse {
log.InfoD("Unscheduled pod found: %s", pod.Name)
errUnscheduledPod = true
}
}
Expand All @@ -355,10 +380,12 @@ func pvcOwnershipTest(t *testing.T) {
}

func antihyperconvergenceTest(t *testing.T) {
dash.TestCaseBegin("Stork scheduler antihyperconvergence test", "validate antihyperconvergence for app with shared V4 SVC volume", "", nil)
var testrailID, testResult = 85859, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

log.InfoD("Schedule app")
ctxs, err := schedulerDriver.Schedule("antihyperconvergencetest",
scheduler.ScheduleOptions{
AppKeys: []string{"test-sv4-svc-repl1"},
Expand Down Expand Up @@ -388,6 +415,7 @@ func antihyperconvergenceTest(t *testing.T) {
}

func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) {
log.InfoD("Verify anti-hyperconvergence with prefer remote node only option")
var testrailID, testResult = 85860, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand All @@ -398,6 +426,7 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) {
})
require.NoError(t, err, "Error scheduling task")
require.Equal(t, 1, len(ctxs), "Only one task should have started")
log.InfoD("App deployed")

logrus.Infof("Waiting for all Pods to come online")
err = schedulerDriver.WaitForRunning(ctxs[0], defaultWaitTimeout, defaultWaitInterval)
Expand Down Expand Up @@ -451,6 +480,7 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) {
}

func preferRemoteNodeFalseHyperconvergenceTest(t *testing.T) {
dash.TestCaseBegin("Stork scheduler prefer remote node antihyperconvergence test", "validate antihyperconvergence with preferRemoteNodeOnly flag", "", nil)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't we be passing the exact name of the test as the first param instead of a brief description?

var testrailID, testResult = 92964, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand Down Expand Up @@ -509,9 +539,11 @@ func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []st
for _, appNode := range appNodes {
require.Equal(t, highScore, scores[appNode.Name], "Scheduled node does not have the highest score")
}
log.InfoD("Verified scheduled node has the highest score")
}

func equalPodSpreadTest(t *testing.T) {
dash.TestCaseBegin("Stork scheduler equal pod spread test", "Verify equal pod spread is achieved using stork for an app", "", nil)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same concern about first param as above. Similar concern everywhere else.

var testrailID, testResult = 84664, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)
Expand Down Expand Up @@ -544,6 +576,7 @@ func equalPodSpreadTest(t *testing.T) {
require.Equal(t, 3, len(scheduledNodesMap), "App should be scheduled on 3 nodes, pod spread not achieved.")

logrus.Infof("Verifying that volume replicase are spread equally across worker nodes")
log.InfoD("Pod spread verified")

logrus.Info("Deleting apps created by the test")
destroyAndWait(t, ctxs)
Expand Down
Loading