-
Notifications
You must be signed in to change notification settings - Fork 367
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. Weβll occasionally send you account related emails.
Already on GitHub? Sign in to your account
upcoming: [M3-8960] - Update Kubernetes version upgrade components for LKE-E #11415
Changes from 9 commits
29f4052
efb61c7
492a7ed
26ee5e3
0ec7415
b2ab4fb
09d286a
d9a3600
eb8e9b1
a1d8be9
8ea979e
7340553
1151b7b
0f35afa
9077ebc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -29,6 +29,7 @@ import { | |
mockGetControlPlaneACL, | ||
mockUpdateControlPlaneACLError, | ||
mockGetControlPlaneACLError, | ||
mockGetTieredKubernetesVersions, | ||
} from 'support/intercepts/lke'; | ||
import { | ||
mockGetLinodeType, | ||
|
@@ -133,7 +134,7 @@ describe('LKE cluster updates', () => { | |
* - Confirms that Kubernetes upgrade prompt is shown when not up-to-date. | ||
* - Confirms that Kubernetes upgrade prompt is hidden when up-to-date. | ||
*/ | ||
it('can upgrade kubernetes version from the details page', () => { | ||
it('can upgrade standard kubernetes version from the details page', () => { | ||
const oldVersion = '1.25'; | ||
const newVersion = '1.26'; | ||
|
||
|
@@ -235,7 +236,7 @@ describe('LKE cluster updates', () => { | |
ui.toast.findByMessage('Recycle started successfully.'); | ||
}); | ||
|
||
it('can upgrade the kubernetes version from the landing page', () => { | ||
it('can upgrade the standard kubernetes version from the landing page', () => { | ||
const oldVersion = '1.25'; | ||
const newVersion = '1.26'; | ||
|
||
|
@@ -294,6 +295,191 @@ describe('LKE cluster updates', () => { | |
cy.findByText(newVersion).should('be.visible'); | ||
}); | ||
|
||
/* | ||
mjac0bs marked this conversation as resolved.
Show resolved
Hide resolved
|
||
* - Confirms UI flow of upgrading Kubernetes enterprise version using mocked API requests. | ||
* - Confirms that Kubernetes upgrade prompt is shown when not up-to-date. | ||
* - Confirms that Kubernetes upgrade prompt is hidden when up-to-date. | ||
*/ | ||
it('can upgrade enterprise kubernetes version from the details page', () => { | ||
const oldVersion = '1.31.1+lke1'; | ||
const newVersion = '1.31.1+lke2'; | ||
Comment on lines
+304
to
+305
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are new LKE-E versions denoted by a higher number after There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Both? Versioning is what confuses me the most right now, tbh. The LKE folks said earier:
So there are still minor version increases, and patch increases, and then there's this new(ly exposed?) "lke" version and that can change too. And from some LKE-E docs:
I think that last part is consistent with the test spec here. We have a patch version that gets a new lke version, so that is considered the latest patch to update to. |
||
|
||
// TODO LKE-E: Remove once feature is in GA | ||
mockAppendFeatureFlags({ | ||
lkeEnterprise: { enabled: true, la: true }, | ||
}); | ||
|
||
const mockCluster = kubernetesClusterFactory.build({ | ||
k8s_version: oldVersion, | ||
tier: 'enterprise', | ||
}); | ||
|
||
const mockClusterUpdated = { | ||
...mockCluster, | ||
k8s_version: newVersion, | ||
}; | ||
|
||
const upgradePrompt = | ||
'A new version of Kubernetes is available (1.31.1+lke2).'; | ||
|
||
const upgradeNotes = [ | ||
'Once the upgrade is complete you will need to recycle all nodes in your cluster', | ||
// Confirm that the old version and new version are both shown. | ||
oldVersion, | ||
newVersion, | ||
]; | ||
|
||
mockGetCluster(mockCluster).as('getCluster'); | ||
mockGetTieredKubernetesVersions('enterprise', [ | ||
{ id: newVersion, tier: 'enterprise' }, | ||
{ id: oldVersion, tier: 'enterprise' }, | ||
]).as('getTieredVersions'); | ||
mockGetClusterPools(mockCluster.id, mockNodePools).as('getNodePools'); | ||
mockUpdateCluster(mockCluster.id, mockClusterUpdated).as('updateCluster'); | ||
mockGetDashboardUrl(mockCluster.id); | ||
mockGetApiEndpoints(mockCluster.id); | ||
|
||
cy.visitWithLogin(`/kubernetes/clusters/${mockCluster.id}`); | ||
cy.wait(['@getCluster', '@getNodePools', '@getTieredVersions']); | ||
|
||
// Confirm that upgrade prompt is shown. | ||
cy.findByText(upgradePrompt).should('be.visible'); | ||
ui.button | ||
.findByTitle('Upgrade Version') | ||
.should('be.visible') | ||
.should('be.enabled') | ||
.click(); | ||
|
||
ui.dialog | ||
.findByTitle( | ||
`Step 1: Upgrade ${mockCluster.label} to Kubernetes ${newVersion}` | ||
) | ||
.should('be.visible') | ||
.within(() => { | ||
upgradeNotes.forEach((note: string) => { | ||
cy.findAllByText(note, { exact: false }).should('be.visible'); | ||
}); | ||
|
||
ui.button | ||
.findByTitle('Upgrade Version') | ||
.should('be.visible') | ||
.should('be.enabled') | ||
.click(); | ||
}); | ||
|
||
// Wait for API response and assert toast message is shown. | ||
cy.wait('@updateCluster'); | ||
|
||
// Verify the banner goes away because the version update has happened | ||
cy.findByText(upgradePrompt).should('not.exist'); | ||
|
||
mockRecycleAllNodes(mockCluster.id).as('recycleAllNodes'); | ||
|
||
const stepTwoDialogTitle = 'Step 2: Recycle All Cluster Nodes'; | ||
|
||
ui.dialog | ||
.findByTitle(stepTwoDialogTitle) | ||
.should('be.visible') | ||
.within(() => { | ||
cy.findByText('Kubernetes version has been updated successfully.', { | ||
exact: false, | ||
}).should('be.visible'); | ||
|
||
cy.findByText( | ||
'For the changes to take full effect you must recycle the nodes in your cluster.', | ||
{ exact: false } | ||
).should('be.visible'); | ||
|
||
ui.button | ||
.findByTitle('Recycle All Nodes') | ||
.should('be.visible') | ||
.should('be.enabled') | ||
.click(); | ||
}); | ||
|
||
// Verify clicking the "Recycle All Nodes" makes an API call | ||
cy.wait('@recycleAllNodes'); | ||
|
||
// Verify the upgrade dialog closed | ||
cy.findByText(stepTwoDialogTitle).should('not.exist'); | ||
|
||
// Verify the banner is still gone after the flow | ||
cy.findByText(upgradePrompt).should('not.exist'); | ||
|
||
// Verify the version is correct after the update | ||
cy.findByText(`Version ${newVersion}`); | ||
|
||
ui.toast.findByMessage('Recycle started successfully.'); | ||
}); | ||
|
||
it('can upgrade the enterprise kubernetes version from the landing page', () => { | ||
const oldVersion = '1.31.1+lke1'; | ||
const newVersion = '1.32.1+lke2'; | ||
|
||
// TODO LKE-E: Remove once feature is in GA | ||
mockAppendFeatureFlags({ | ||
lkeEnterprise: { enabled: true, la: true }, | ||
}); | ||
|
||
const cluster = kubernetesClusterFactory.build({ | ||
k8s_version: oldVersion, | ||
tier: 'enterprise', | ||
}); | ||
|
||
const updatedCluster = { ...cluster, k8s_version: newVersion }; | ||
|
||
mockGetClusters([cluster]).as('getClusters'); | ||
mockGetTieredKubernetesVersions('enterprise', [ | ||
{ id: newVersion, tier: 'enterprise' }, | ||
{ id: oldVersion, tier: 'enterprise' }, | ||
]).as('getTieredVersions'); | ||
mockUpdateCluster(cluster.id, updatedCluster).as('updateCluster'); | ||
mockRecycleAllNodes(cluster.id).as('recycleAllNodes'); | ||
|
||
cy.visitWithLogin(`/kubernetes/clusters`); | ||
|
||
cy.wait(['@getClusters', '@getTieredVersions']); | ||
|
||
cy.findByText(oldVersion).should('be.visible'); | ||
|
||
cy.findByText('UPGRADE') | ||
.should('be.visible') | ||
.should('be.enabled') | ||
.click(); | ||
|
||
ui.dialog | ||
.findByTitle( | ||
`Step 1: Upgrade ${cluster.label} to Kubernetes ${newVersion}` | ||
) | ||
.should('be.visible'); | ||
|
||
mockGetClusters([updatedCluster]).as('getClusters'); | ||
|
||
ui.button | ||
.findByTitle('Upgrade Version') | ||
.should('be.visible') | ||
.should('be.enabled') | ||
.click(); | ||
|
||
cy.wait(['@updateCluster', '@getClusters']); | ||
|
||
ui.dialog | ||
.findByTitle('Step 2: Recycle All Cluster Nodes') | ||
.should('be.visible'); | ||
|
||
ui.button | ||
.findByTitle('Recycle All Nodes') | ||
.should('be.visible') | ||
.should('be.enabled') | ||
.click(); | ||
|
||
cy.wait('@recycleAllNodes'); | ||
|
||
ui.toast.assertMessage('Recycle started successfully.'); | ||
|
||
cy.findByText(newVersion).should('be.visible'); | ||
}); | ||
|
||
/* | ||
* - Confirms node, node pool, and cluster recycling UI flow using mocked API data. | ||
* - Confirms that user is warned that recycling recreates nodes and may take a while. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -9,6 +9,7 @@ import type { | |
KubernetesControlPlaneACLPayload, | ||
KubernetesDashboardResponse, | ||
KubernetesEndpointResponse, | ||
KubernetesTieredVersion, | ||
KubernetesVersion, | ||
PoolNodeResponse, | ||
} from '@linode/api-v4/lib/kubernetes/types'; | ||
|
@@ -78,6 +79,20 @@ export const kubernetesVersionFactory = Factory.Sync.makeFactory<KubernetesVersi | |
} | ||
); | ||
|
||
export const kubernetesStandardTierVersionFactory = Factory.Sync.makeFactory<KubernetesTieredVersion>( | ||
{ | ||
id: '1.31', | ||
tier: 'standard', | ||
} | ||
); | ||
|
||
export const kubernetesEnterpriseTierVersionFactory = Factory.Sync.makeFactory<KubernetesTieredVersion>( | ||
{ | ||
id: 'v1.31.1+lke1', | ||
tier: 'enterprise', | ||
} | ||
); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Would it make any sense to use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done in 0f35afa. ππΌ I incremented the minor version in the standard tier factory, and the patch in the enterprise tier factory. Tests continue to pass and I think this will be consistent enough with possible data/not overly complicated. |
||
|
||
export const kubernetesControlPlaneACLOptionsFactory = Factory.Sync.makeFactory<ControlPlaneACLOptions>( | ||
{ | ||
addresses: { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any thoughts about unit tests to confirm that the Upgrade chip is present when a new LKE version is available and is absent when there is no new version? Don't think it needs to block this PR but might be good to have as a small standalone test ticket. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good call out - yeah, let's do this in a standalone test ticket. It looks like the LKE landing page spec isn't testing version upgrades chip at all currently. I made M3-9023 and will get that done as part of this LKE-E epic! |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,4 @@ | ||
import { Chip } from '@linode/ui'; | ||
import { KubeNodePoolResponse, KubernetesCluster } from '@linode/api-v4'; | ||
import Grid from '@mui/material/Unstable_Grid2'; | ||
import * as React from 'react'; | ||
import { Link } from 'react-router-dom'; | ||
|
@@ -9,20 +8,20 @@ import { DateTimeDisplay } from 'src/components/DateTimeDisplay'; | |
import { Hidden } from 'src/components/Hidden'; | ||
import { TableCell } from 'src/components/TableCell'; | ||
import { TableRow } from 'src/components/TableRow'; | ||
import { | ||
useAllKubernetesNodePoolQuery, | ||
useKubernetesVersionQuery, | ||
} from 'src/queries/kubernetes'; | ||
import { useAllKubernetesNodePoolQuery } from 'src/queries/kubernetes'; | ||
import { useRegionsQuery } from 'src/queries/regions/regions'; | ||
import { useSpecificTypes } from 'src/queries/types'; | ||
import { extendTypesQueryResult } from 'src/utilities/extendType'; | ||
|
||
import { | ||
getNextVersion, | ||
getTotalClusterMemoryCPUAndStorage, | ||
useLkeStandardOrEnterpriseVersions, | ||
} from '../kubeUtils'; | ||
import { ClusterActionMenu } from './ClusterActionMenu'; | ||
|
||
import type { KubeNodePoolResponse, KubernetesCluster } from '@linode/api-v4'; | ||
|
||
const useStyles = makeStyles()(() => ({ | ||
clusterRow: { | ||
'&:before': { | ||
|
@@ -64,14 +63,17 @@ export const KubernetesClusterRow = (props: Props) => { | |
const { cluster, openDeleteDialog, openUpgradeDialog } = props; | ||
const { classes } = useStyles(); | ||
|
||
const { data: versions } = useKubernetesVersionQuery(); | ||
const { data: pools } = useAllKubernetesNodePoolQuery(cluster.id); | ||
const typesQuery = useSpecificTypes(pools?.map((pool) => pool.type) ?? []); | ||
const types = extendTypesQueryResult(typesQuery); | ||
const { data: regions } = useRegionsQuery(); | ||
|
||
const region = regions?.find((r) => r.id === cluster.region); | ||
|
||
const { versions } = useLkeStandardOrEnterpriseVersions( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We default to standard if the tier is undefined (LKE-E feature not enabled) because all current clusters are 'standard'. |
||
cluster.tier ?? 'standard' | ||
); | ||
|
||
const nextVersion = getNextVersion(cluster.k8s_version, versions ?? []); | ||
|
||
const hasUpgrade = nextVersion !== null; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,7 @@ import { | |
getLatestVersion, | ||
useAPLAvailability, | ||
useIsLkeEnterpriseEnabled, | ||
useLkeStandardOrEnterpriseVersions, | ||
} from 'src/features/Kubernetes/kubeUtils'; | ||
import { useAccount } from 'src/queries/account/account'; | ||
import { | ||
|
@@ -28,9 +29,7 @@ import { | |
import { | ||
useCreateKubernetesClusterBetaMutation, | ||
useCreateKubernetesClusterMutation, | ||
useKubernetesTieredVersionsQuery, | ||
useKubernetesTypesQuery, | ||
useKubernetesVersionQuery, | ||
} from 'src/queries/kubernetes'; | ||
import { useRegionsQuery } from 'src/queries/regions/regions'; | ||
import { useAllTypes } from 'src/queries/types'; | ||
|
@@ -130,31 +129,16 @@ export const CreateCluster = () => { | |
mutateAsync: createKubernetesClusterBeta, | ||
} = useCreateKubernetesClusterBetaMutation(); | ||
|
||
const { | ||
data: _versionData, | ||
isError: versionLoadError, | ||
isLoading: versionLoading, | ||
} = useKubernetesVersionQuery(); | ||
|
||
const { | ||
data: enterpriseTierVersionData, | ||
isLoading: enterpriseTierVersionDataIsLoading, | ||
} = useKubernetesTieredVersionsQuery('enterprise'); | ||
|
||
const { | ||
isLkeEnterpriseLAFeatureEnabled, | ||
isLkeEnterpriseLAFlagEnabled, | ||
} = useIsLkeEnterpriseEnabled(); | ||
|
||
/** | ||
* If LKE-E is enabled, use the new /versions/<tier> endpoint data, which supports enterprise tiers. | ||
* If LKE-E is disabled, use the data from the existing /versions endpoint. | ||
* @todo LKE-E: Clean up use of versionData once LKE-E is in GA. | ||
*/ | ||
const versionData = | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This logic was moved inside |
||
isLkeEnterpriseLAFeatureEnabled && selectedTier === 'enterprise' | ||
? enterpriseTierVersionData | ||
: _versionData; | ||
const { | ||
isLoadingVersions, | ||
versions: versionData, | ||
versionsError, | ||
} = useLkeStandardOrEnterpriseVersions(selectedTier); | ||
|
||
const versions = (versionData ?? []).map((thisVersion) => ({ | ||
label: thisVersion.id, | ||
|
@@ -303,7 +287,7 @@ export const CreateCluster = () => { | |
selectedRegionID: selectedRegionId, | ||
}); | ||
|
||
if (typesError || regionsError || versionLoadError) { | ||
if (typesError || regionsError || versionsError) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Updates with error and loading variables were just to update to the values returned from the hook. Now the tiered version query is disabled when the LKE-E feature is disabled (and isFetching will be false), so we shouldn't need to check |
||
// This information is necessary to create a Cluster. Otherwise, show an error state. | ||
return <ErrorState errorText="An unexpected error occurred." />; | ||
} | ||
|
@@ -389,14 +373,10 @@ export const CreateCluster = () => { | |
disableClearable={!!version} | ||
errorText={errorMap.k8s_version} | ||
label="Kubernetes Version" | ||
loading={isLoadingVersions} | ||
options={versions} | ||
placeholder={' '} | ||
value={versions.find((v) => v.value === version) ?? null} | ||
loading={ | ||
versionLoading || | ||
(isLkeEnterpriseLAFeatureEnabled && | ||
enterpriseTierVersionDataIsLoading) | ||
} | ||
/> | ||
{showAPL && ( | ||
<> | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for adding this distinction! (+ the other test)