diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 13df195611fb..e9a2a537544e 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -178,6 +178,7 @@ func runStart(cmd *cobra.Command, args []string) { upgradeExistingConfig(existing) } + validateProfileName(existing) validateSpecifiedDriver(existing) validateKubernetesVersion(existing) @@ -638,6 +639,29 @@ func hostDriver(existing *config.ClusterConfig) string { return h.Driver.DriverName() } +// validateProfileName makes sure that new profile name not duplicated with any of machine names in existing multi-node clusters. +func validateProfileName(existing *config.ClusterConfig) { + if existing != nil { + return + } + + profiles, err := config.ListProfilesLight() + if err != nil { + exit.Message(reason.InternalListConfig, "Unable to list profiles: {{.error}}", out.V{"error": err}) + } + for _, p := range profiles { + for _, n := range p.Config.Nodes { + machineName := config.MachineName(*p.Config, n) + if ClusterFlagValue() == machineName { + out.WarningT("Profile name '{{.name}}' is duplicated with machine name '{{.machine}}' in profile '{{.profile}}'", out.V{"name": ClusterFlagValue(), + "machine": machineName, + "profile": p.Name}) + exit.Message(reason.Usage, "Profile name should be unique") + } + } + } +} + // validateSpecifiedDriver makes sure that if a user has passed in a driver // it matches the existing cluster if there is one func validateSpecifiedDriver(existing *config.ClusterConfig) { diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index a632f0485958..1eb7a54f921b 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -223,7 +223,25 @@ func ListProfiles(miniHome ...string) (validPs []*Profile, inValidPs []*Profile, return validPs, inValidPs, nil } -// removeDupes removes duplicates +// ListProfilesLight returns profiles in minikube home dir +// Unlike `ListProfiles` this function doens't try to get profile from container +func ListProfilesLight(miniHome ...string) (ps []*Profile, err error) { + // try to get profiles list based on left over evidences such as directory + pDirs, err := profileDirs(miniHome...) + if err != nil { + return nil, err + } + + for _, n := range pDirs { + p, err := LoadProfile(n, miniHome...) + if err == nil && p.IsValid() { + ps = append(ps, p) + } + } + return ps, nil +} + +// removeDupes removes duplipcates func removeDupes(profiles []string) []string { // Use map to record duplicates as we find them. seen := map[string]bool{} @@ -291,7 +309,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc ClusterConfig, n Node) string { // For single node cluster, default to back to old naming - if len(cc.Nodes) == 1 || n.ControlPlane { + if (len(cc.Nodes) == 1 && cc.Nodes[0].Name == n.Name) || n.ControlPlane { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index ea32cc36a743..b33f8e65ff3f 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -146,10 +146,8 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name - n := config.Node{Name: ih.Name} - // This should pass without calling Create because the host exists already. - h, _, err := StartHost(api, &mc, &n) + h, _, err := StartHost(api, &mc, &(mc.Nodes[0])) if err != nil { t.Fatalf("Error starting host: %v", err) } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 672e52276d54..2af987f38f9e 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -38,6 +38,20 @@ const ( // Add adds a new node config to an existing cluster. func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { + profiles, err := config.ListProfilesLight() + if err != nil { + return err + } + + machineName := config.MachineName(*cc, n) + for _, p := range profiles { + for _, existNode := range p.Config.Nodes { + if machineName == config.MachineName(*p.Config, existNode) { + return errors.Errorf("Node %s already exists in %s profile", machineName, p.Name) + } + } + } + if err := config.SaveNode(cc, &n); err != nil { return errors.Wrap(err, "save node") } diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index bad0bf781789..59ef5f999bb5 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -48,6 +48,7 @@ func TestMultiNode(t *testing.T) { {"DeleteNode", validateDeleteNodeFromMultiNode}, {"StopMultiNode", validateStopMultiNodeCluster}, {"RestartMultiNode", validateRestartMultiNodeCluster}, + {"ValidateNameConflict", validatNameConflict}, } for _, tc := range tests { tc := tc @@ -308,3 +309,39 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output()) } } + +func validatNameConflict(ctx context.Context, t *testing.T, profile string) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile)) + if err != nil { + t.Errorf("failed to run node list. args %q : %v", rr.Command(), err) + } + curNodeNum := strings.Count(rr.Stdout.String(), profile) + + // Start new profile. It's expected failture + profileName := fmt.Sprintf("%s-m0%d", profile, curNodeNum) + startArgs := append([]string{"start", "-p", profileName}, StartArgs()...) + rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err == nil { + t.Errorf("expected start profile command to fail. args %q", rr.Command()) + } + + // Start new profile temporary profile to conflict node name. + profileName = fmt.Sprintf("%s-m0%d", profile, curNodeNum+1) + startArgs = append([]string{"start", "-p", profileName}, StartArgs()...) + rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Errorf("failed to start profile. args %q : %v", rr.Command(), err) + } + + // Add a node to the current cluster. It's expected failture + addArgs := []string{"node", "add", "-p", profile} + rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...)) + if err == nil { + t.Errorf("expected add node command to fail fail. args %q : %v", rr.Command(), err) + } + + rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profileName)) + if err != nil { + t.Logf("failed to clean temporary profile. args %q : %v", rr.Command(), err) + } +}