-
Notifications
You must be signed in to change notification settings - Fork 89
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Aetos integration #1581
Open
Rohit-PX
wants to merge
3
commits into
libopenstorage:master
Choose a base branch
from
Rohit-PX:aetos_integration
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
+97
−0
Open
Aetos integration #1581
Changes from all commits
Commits
Show all changes
3 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -13,6 +13,7 @@ import ( | |
"github.com/portworx/sched-ops/k8s/storage" | ||
"github.com/portworx/torpedo/drivers/node" | ||
"github.com/portworx/torpedo/drivers/scheduler" | ||
"github.com/portworx/torpedo/pkg/log" | ||
"github.com/sirupsen/logrus" | ||
"github.com/stretchr/testify/require" | ||
apps_api "k8s.io/api/apps/v1" | ||
|
@@ -27,6 +28,7 @@ const ( | |
) | ||
|
||
func TestExtender(t *testing.T) { | ||
dash.TestSetBegin(dash.TestSet) | ||
err := setSourceKubeConfig() | ||
require.NoError(t, err, "failed to set kubeconfig to source cluster: %v", err) | ||
|
||
|
@@ -47,10 +49,12 @@ func TestExtender(t *testing.T) { | |
} | ||
|
||
func noPVCTest(t *testing.T) { | ||
dash.TestCaseBegin("Stork scheduler No PVC test", "Stork scheduler test for app with no PVC", "", nil) | ||
var testrailID, testResult = 50785, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
||
log.InfoD("Deploy app with no PVC") | ||
ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "nopvctest"), | ||
scheduler.ScheduleOptions{AppKeys: []string{"mysql-nopvc"}}) | ||
require.NoError(t, err, "Error scheduling task") | ||
|
@@ -60,13 +64,16 @@ func noPVCTest(t *testing.T) { | |
require.NoError(t, err, "Error waiting for pod to get to running state") | ||
|
||
destroyAndWait(t, ctxs) | ||
log.InfoD("Deleted app with no PVC") | ||
|
||
// If we are here then the test has passed | ||
testResult = testResultPass | ||
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) | ||
} | ||
|
||
func singlePVCTest(t *testing.T) { | ||
dash.TestCaseBegin("Stork scheduler single PVC test", "Stork scheduler test for app with single PVC", "", nil) | ||
log.InfoD("") | ||
var testrailID, testResult = 50786, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -82,6 +89,7 @@ func singlePVCTest(t *testing.T) { | |
scheduledNodes, err := schedulerDriver.GetNodesForApp(ctxs[0]) | ||
require.NoError(t, err, "Error getting node for app") | ||
require.Equal(t, 1, len(scheduledNodes), "App should be scheduled on one node") | ||
log.InfoD("App with single PVC scheduled on one node") | ||
|
||
volumeNames := getVolumeNames(t, ctxs[0]) | ||
require.Equal(t, 1, len(volumeNames), "Should only have one volume") | ||
|
@@ -93,9 +101,11 @@ func singlePVCTest(t *testing.T) { | |
// If we are here then the test has passed | ||
testResult = testResultPass | ||
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) | ||
log.InfoD("Deleted app with single PVC") | ||
} | ||
|
||
func statefulsetTest(t *testing.T) { | ||
dash.TestCaseBegin("Stateful set extender test", "Stork scheduler test with stateful set application", "", nil) | ||
var testrailID, testResult = 50787, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -133,6 +143,7 @@ func statefulsetTest(t *testing.T) { | |
} | ||
|
||
func multiplePVCTest(t *testing.T) { | ||
dash.TestCaseBegin("Multiple PVC test", "Stork scheduler test with app using multiple PVCS", "", nil) | ||
var testrailID, testResult = 50788, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -161,6 +172,7 @@ func multiplePVCTest(t *testing.T) { | |
} | ||
|
||
func driverNodeErrorTest(t *testing.T) { | ||
dash.TestCaseBegin("Driver node error", "Induce error on driver node by stopping PX on the node", "", nil) | ||
var testrailID, testResult = 50789, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -184,6 +196,7 @@ func driverNodeErrorTest(t *testing.T) { | |
|
||
time.Sleep(1 * time.Minute) | ||
|
||
log.InfoD("Stopping volume driver on node: %s", scheduledNodes[0]) | ||
err = volumeDriver.StopDriver(scheduledNodes, false, nil) | ||
require.NoError(t, err, "Error stopping driver on scheduled Node %+v", scheduledNodes[0]) | ||
stoppedNode := scheduledNodes[0] | ||
|
@@ -204,11 +217,13 @@ func driverNodeErrorTest(t *testing.T) { | |
|
||
verifyScheduledNode(t, scheduledNodes[0], volumeNames) | ||
|
||
log.InfoD("Starting volume driver on node: %s", stoppedNode) | ||
err = volumeDriver.StartDriver(stoppedNode) | ||
require.NoError(t, err, "Error starting driver on Node %+v", scheduledNodes[0]) | ||
|
||
err = volumeDriver.WaitDriverUpOnNode(stoppedNode, defaultWaitTimeout) | ||
require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0]) | ||
log.InfoD("Verified volume driver is up on node: %s", stoppedNode) | ||
|
||
destroyAndWait(t, ctxs) | ||
|
||
|
@@ -218,10 +233,12 @@ func driverNodeErrorTest(t *testing.T) { | |
} | ||
|
||
func poolMaintenanceTest(t *testing.T) { | ||
dash.TestCaseBegin("Pool Maintenance", "Stork scheduling test with pool in maintenance mode", "", nil) | ||
var testrailID, testResult = 86080, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
||
log.InfoD("Deploy App") | ||
ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "pool-test"), | ||
scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}}) | ||
require.NoError(t, err, "Error scheduling task") | ||
|
@@ -239,6 +256,7 @@ func poolMaintenanceTest(t *testing.T) { | |
|
||
verifyScheduledNode(t, scheduledNodes[0], volumeNames) | ||
|
||
log.InfoD("Enter pool in maintenance mode on node: %s", scheduledNodes[0]) | ||
err = volumeDriver.EnterPoolMaintenance(scheduledNodes[0]) | ||
require.NoError(t, err, "Error entering pool maintenance mode on scheduled node %+v", scheduledNodes[0]) | ||
poolMaintenanceNode := scheduledNodes[0] | ||
|
@@ -274,10 +292,13 @@ func poolMaintenanceTest(t *testing.T) { | |
} | ||
|
||
func pvcOwnershipTest(t *testing.T) { | ||
dash.TestCaseBegin("PVC ownership", "Validating PVC ownership", "", nil) | ||
defer dash.TestCaseEnd() | ||
var testrailID, testResult = 50781, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
||
log.InfoD("Schedule mysql app") | ||
ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "ownershiptest"), | ||
scheduler.ScheduleOptions{AppKeys: []string{"mysql-repl-1"}}) | ||
require.NoError(t, err, "Error scheduling task") | ||
|
@@ -296,19 +317,22 @@ func pvcOwnershipTest(t *testing.T) { | |
verifyScheduledNode(t, scheduledNodes[0], volumeNames) | ||
|
||
for _, spec := range ctxs[0].App.SpecList { | ||
log.InfoD("Delete storage class.") | ||
if obj, ok := spec.(*storage_api.StorageClass); ok { | ||
err := storage.Instance().DeleteStorageClass(obj.Name) | ||
require.NoError(t, err, "Error deleting storage class for mysql.") | ||
} | ||
if obj, ok := spec.(*v1.PersistentVolumeClaim); ok { | ||
updatePVC, err := core.Instance().GetPersistentVolumeClaim(obj.Name, obj.Namespace) | ||
require.NoError(t, err, "Error getting persistent volume claim.") | ||
log.InfoD("Delete storage class annotation on PVC: %s", updatePVC.Name) | ||
delete(updatePVC.Annotations, annotationStorageProvisioner) | ||
_, err = core.Instance().UpdatePersistentVolumeClaim(updatePVC) | ||
require.NoError(t, err, "Error updating annotations in PVC.") | ||
} | ||
} | ||
|
||
log.InfoD("Stop volume driver on scheduled node: %s", scheduledNodes[0].Name) | ||
err = volumeDriver.StopDriver(scheduledNodes, false, nil) | ||
require.NoError(t, err, "Error stopping driver on scheduled Node %+v", scheduledNodes[0]) | ||
// make sure to start driver if test failed | ||
|
@@ -332,6 +356,7 @@ func pvcOwnershipTest(t *testing.T) { | |
for _, pod := range depPods { | ||
for _, cond := range pod.Status.Conditions { | ||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse { | ||
log.InfoD("Unscheduled pod found: %s", pod.Name) | ||
errUnscheduledPod = true | ||
} | ||
} | ||
|
@@ -355,10 +380,12 @@ func pvcOwnershipTest(t *testing.T) { | |
} | ||
|
||
func antihyperconvergenceTest(t *testing.T) { | ||
dash.TestCaseBegin("Stork scheduler antihyperconvergence test", "validate antihyperconvergence for app with shared V4 SVC volume", "", nil) | ||
var testrailID, testResult = 85859, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
||
log.InfoD("Schedule app") | ||
ctxs, err := schedulerDriver.Schedule("antihyperconvergencetest", | ||
scheduler.ScheduleOptions{ | ||
AppKeys: []string{"test-sv4-svc-repl1"}, | ||
|
@@ -388,6 +415,7 @@ func antihyperconvergenceTest(t *testing.T) { | |
} | ||
|
||
func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { | ||
log.InfoD("Verify anti-hyperconvergence with prefer remote node only option") | ||
var testrailID, testResult = 85860, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -398,6 +426,7 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { | |
}) | ||
require.NoError(t, err, "Error scheduling task") | ||
require.Equal(t, 1, len(ctxs), "Only one task should have started") | ||
log.InfoD("App deployed") | ||
|
||
logrus.Infof("Waiting for all Pods to come online") | ||
err = schedulerDriver.WaitForRunning(ctxs[0], defaultWaitTimeout, defaultWaitInterval) | ||
|
@@ -451,6 +480,7 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { | |
} | ||
|
||
func preferRemoteNodeFalseHyperconvergenceTest(t *testing.T) { | ||
dash.TestCaseBegin("Stork scheduler prefer remote node antihyperconvergence test", "validate antihyperconvergence with preferRemoteNodeOnly flag", "", nil) | ||
var testrailID, testResult = 92964, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -509,9 +539,11 @@ func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []st | |
for _, appNode := range appNodes { | ||
require.Equal(t, highScore, scores[appNode.Name], "Scheduled node does not have the highest score") | ||
} | ||
log.InfoD("Verified scheduled node has the highest score") | ||
} | ||
|
||
func equalPodSpreadTest(t *testing.T) { | ||
dash.TestCaseBegin("Stork scheduler equal pod spread test", "Verify equal pod spread is achieved using stork for an app", "", nil) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same concern about first param as above. Similar concern everywhere else. |
||
var testrailID, testResult = 84664, testResultFail | ||
runID := testrailSetupForTest(testrailID, &testResult) | ||
defer updateTestRail(&testResult, testrailID, runID) | ||
|
@@ -544,6 +576,7 @@ func equalPodSpreadTest(t *testing.T) { | |
require.Equal(t, 3, len(scheduledNodesMap), "App should be scheduled on 3 nodes, pod spread not achieved.") | ||
|
||
logrus.Infof("Verifying that volume replicase are spread equally across worker nodes") | ||
log.InfoD("Pod spread verified") | ||
|
||
logrus.Info("Deleting apps created by the test") | ||
destroyAndWait(t, ctxs) | ||
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Shouldn't we be passing the exact name of the test as the first param instead of a brief description?