diff --git a/api/types/constants.go b/api/types/constants.go index 3b82ae0eabf9b..283401615171f 100644 --- a/api/types/constants.go +++ b/api/types/constants.go @@ -1301,6 +1301,9 @@ const ( // provisioning system get added to when provisioned in KEEP mode. This prevents // already existing users from being tampered with or deleted. TeleportKeepGroup = "teleport-keep" + // TeleportStaticGroup is a default group that static host users get added to. This + // prevents already existing users from being tampered with or deleted. + TeleportStaticGroup = "teleport-static" ) const ( diff --git a/integration/hostuser_test.go b/integration/hostuser_test.go index f6f5403496efd..4497189c62d96 100644 --- a/integration/hostuser_test.go +++ b/integration/hostuser_test.go @@ -36,13 +36,18 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + labelv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/label/v1" + userprovisioningpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/userprovisioning/v2" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/userprovisioning" "github.com/gravitational/teleport/integration/helpers" "github.com/gravitational/teleport/lib/auth/testauthority" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/backend/lite" + "github.com/gravitational/teleport/lib/service/servicecfg" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/services/local" "github.com/gravitational/teleport/lib/srv" @@ -63,7 +68,7 @@ func TestRootHostUsersBackend(t *testing.T) { } t.Run("Test CreateGroup", func(t *testing.T) { - t.Cleanup(cleanupUsersAndGroups(nil, []string{testgroup})) + t.Cleanup(func() { cleanupUsersAndGroups(nil, []string{testgroup}) }) err := usersbk.CreateGroup(testgroup, "") require.NoError(t, err) @@ -75,7 +80,7 @@ func TestRootHostUsersBackend(t *testing.T) { }) t.Run("Test CreateUser and group", func(t *testing.T) { - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, []string{testgroup})) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, []string{testgroup}) }) err := usersbk.CreateGroup(testgroup, "") require.NoError(t, err) @@ -106,7 +111,7 @@ func TestRootHostUsersBackend(t *testing.T) { }) t.Run("Test DeleteUser", func(t *testing.T) { - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, nil)) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, nil) }) err := usersbk.CreateUser(testuser, nil, "", "", "") require.NoError(t, err) _, err = usersbk.Lookup(testuser) @@ -121,7 +126,7 @@ func TestRootHostUsersBackend(t *testing.T) { t.Run("Test GetAllUsers", func(t *testing.T) { checkUsers := []string{"teleport-usera", "teleport-userb", "teleport-userc"} - t.Cleanup(cleanupUsersAndGroups(checkUsers, nil)) + t.Cleanup(func() { cleanupUsersAndGroups(checkUsers, nil) }) for _, u := range checkUsers { err := usersbk.CreateUser(u, []string{}, "", "", "") @@ -153,7 +158,7 @@ func TestRootHostUsersBackend(t *testing.T) { }) t.Run("Test CreateHomeDirectory does not follow symlinks", func(t *testing.T) { - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, nil)) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, nil) }) err := usersbk.CreateUser(testuser, nil, "", "", "") require.NoError(t, err) @@ -196,19 +201,21 @@ func requireUserInGroups(t *testing.T, u *user.User, requiredGroups []string) { require.Subset(t, getUserGroups(t, u), requiredGroups) } -func cleanupUsersAndGroups(users []string, groups []string) func() { - return func() { - for _, group := range groups { - cmd := exec.Command("groupdel", group) - err := cmd.Run() - if err != nil { - log.Debugf("Error deleting group %s: %s", group, err) - } - } - for _, user := range users { - host.UserDel(user) +func cleanupUsersAndGroups(users []string, groups []string) { + for _, group := range groups { + cmd := exec.Command("groupdel", group) + err := cmd.Run() + if err != nil { + log.Debugf("Error deleting group %s: %s", group, err) } } + for _, user := range users { + host.UserDel(user) + } +} + +func sudoersPath(username, uuid string) string { + return fmt.Sprintf("/etc/sudoers.d/teleport-%s-%s", uuid, username) } func TestRootHostUsers(t *testing.T) { @@ -223,11 +230,11 @@ func TestRootHostUsers(t *testing.T) { users := srv.NewHostUsers(context.Background(), presence, "host_uuid") testGroups := []string{"group1", "group2"} - closer, err := users.UpsertUser(testuser, services.HostUsersInfo{Groups: testGroups, Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP}) + closer, err := users.UpsertUser(testuser, services.HostUsersInfo{Groups: testGroups, Mode: services.HostUserModeDrop}) require.NoError(t, err) testGroups = append(testGroups, types.TeleportDropGroup) - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, testGroups)) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, testGroups) }) u, err := user.Lookup(testuser) require.NoError(t, err) @@ -249,13 +256,13 @@ func TestRootHostUsers(t *testing.T) { require.ErrorIs(t, err, user.UnknownGroupIdError(testGID)) closer, err := users.UpsertUser(testuser, services.HostUsersInfo{ - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, UID: testUID, GID: testGID, }) require.NoError(t, err) - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, []string{types.TeleportDropGroup})) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, []string{types.TeleportDropGroup}) }) group, err := user.LookupGroupId(testGID) require.NoError(t, err) @@ -277,10 +284,10 @@ func TestRootHostUsers(t *testing.T) { expectedHome := filepath.Join("/home", testuser) require.NoDirExists(t, expectedHome) - closer, err := users.UpsertUser(testuser, services.HostUsersInfo{Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP}) + closer, err := users.UpsertUser(testuser, services.HostUsersInfo{Mode: services.HostUserModeKeep}) require.NoError(t, err) require.Nil(t, closer) - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, []string{types.TeleportKeepGroup})) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, []string{types.TeleportKeepGroup}) }) u, err := user.Lookup(testuser) require.NoError(t, err) @@ -299,17 +306,13 @@ func TestRootHostUsers(t *testing.T) { users := srv.NewHostUsers(context.Background(), presence, uuid) sudoers := srv.NewHostSudoers(uuid) - sudoersPath := func(username, uuid string) string { - return fmt.Sprintf("/etc/sudoers.d/teleport-%s-%s", uuid, username) - } - t.Cleanup(func() { os.Remove(sudoersPath(testuser, uuid)) host.UserDel(testuser) }) closer, err := users.UpsertUser(testuser, services.HostUsersInfo{ - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, }) require.NoError(t, err) err = sudoers.WriteSudoers(testuser, []string{"ALL=(ALL) ALL"}) @@ -338,20 +341,22 @@ func TestRootHostUsers(t *testing.T) { deleteableUsers := []string{"teleport-user1", "teleport-user2", "teleport-user3"} for _, user := range deleteableUsers { - _, err := users.UpsertUser(user, services.HostUsersInfo{Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP}) + _, err := users.UpsertUser(user, services.HostUsersInfo{Mode: services.HostUserModeDrop}) require.NoError(t, err) } // this user should not be in the service group as it was created with mode keep. closer, err := users.UpsertUser("teleport-user4", services.HostUsersInfo{ - Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP, + Mode: services.HostUserModeKeep, }) require.NoError(t, err) require.Nil(t, closer) - t.Cleanup(cleanupUsersAndGroups( - []string{"teleport-user1", "teleport-user2", "teleport-user3", "teleport-user4"}, - []string{types.TeleportDropGroup, types.TeleportKeepGroup})) + t.Cleanup(func() { + cleanupUsersAndGroups( + []string{"teleport-user1", "teleport-user2", "teleport-user3", "teleport-user4"}, + []string{types.TeleportDropGroup, types.TeleportKeepGroup}) + }) err = users.DeleteAllUsers() require.NoError(t, err) @@ -393,13 +398,13 @@ func TestRootHostUsers(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - t.Cleanup(cleanupUsersAndGroups([]string{testuser}, slices.Concat(tc.firstGroups, tc.secondGroups))) + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, slices.Concat(tc.firstGroups, tc.secondGroups)) }) // Verify that the user is created with the first set of groups. users := srv.NewHostUsers(context.Background(), presence, "host_uuid") _, err := users.UpsertUser(testuser, services.HostUsersInfo{ Groups: tc.firstGroups, - Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP, + Mode: services.HostUserModeKeep, }) require.NoError(t, err) u, err := user.Lookup(testuser) @@ -409,7 +414,7 @@ func TestRootHostUsers(t *testing.T) { // Verify that the user is updated with the second set of groups. _, err = users.UpsertUser(testuser, services.HostUsersInfo{ Groups: tc.secondGroups, - Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP, + Mode: services.HostUserModeKeep, }) require.NoError(t, err) u, err = user.Lookup(testuser) @@ -505,7 +510,7 @@ func TestRootLoginAsHostUser(t *testing.T) { require.NoError(t, err) // Run an SSH session to completion. - t.Cleanup(cleanupUsersAndGroups([]string{login}, groups)) + t.Cleanup(func() { cleanupUsersAndGroups([]string{login}, groups) }) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) t.Cleanup(cancel) err = client.SSH(ctx, tc.command) @@ -525,3 +530,151 @@ func TestRootLoginAsHostUser(t *testing.T) { }) } } + +func TestRootStaticHostUsers(t *testing.T) { + utils.RequireRoot(t) + // Create test instance. + privateKey, publicKey, err := testauthority.New().GenerateKeyPair() + require.NoError(t, err) + + instance := helpers.NewInstance(t, helpers.InstanceConfig{ + ClusterName: helpers.Site, + HostID: uuid.New().String(), + NodeName: Host, + Priv: privateKey, + Pub: publicKey, + Log: utils.NewLoggerForTests(), + }) + + require.NoError(t, instance.Create(t, nil, false, nil)) + require.NoError(t, instance.Start()) + t.Cleanup(func() { + require.NoError(t, instance.StopAll()) + }) + nodeCfg := servicecfg.MakeDefaultConfig() + nodeCfg.SSH.Labels = map[string]string{ + "foo": "bar", + } + _, err = instance.StartNode(nodeCfg) + require.NoError(t, err) + + // Create host user resources. + groups := []string{"foo", "bar"} + goodLogin := utils.GenerateLocalUsername(t) + goodUser := userprovisioning.NewStaticHostUser(goodLogin, &userprovisioningpb.StaticHostUserSpec{ + Matchers: []*userprovisioningpb.Matcher{ + { + NodeLabels: []*labelv1.Label{ + { + Name: "foo", + Values: []string{"bar"}, + }, + }, + Groups: groups, + Sudoers: []string{"All = (root) NOPASSWD: /usr/bin/systemctl restart nginx.service"}, + }, + }, + }) + nonMatchingLogin := utils.GenerateLocalUsername(t) + nonMatchingUser := userprovisioning.NewStaticHostUser(nonMatchingLogin, &userprovisioningpb.StaticHostUserSpec{ + Matchers: []*userprovisioningpb.Matcher{ + { + NodeLabels: []*labelv1.Label{ + { + Name: "foo", + Values: []string{"baz"}, + }, + }, + Groups: groups, + }, + }, + }) + conflictingLogin := utils.GenerateLocalUsername(t) + conflictingUser := userprovisioning.NewStaticHostUser(conflictingLogin, &userprovisioningpb.StaticHostUserSpec{ + Matchers: []*userprovisioningpb.Matcher{ + { + NodeLabels: []*labelv1.Label{ + { + Name: "foo", + Values: []string{"bar"}, + }, + }, + Groups: groups, + }, + { + NodeLabelsExpression: `labels["foo"] == "bar"`, + Groups: groups, + }, + }, + }) + + clt := instance.Process.GetAuthServer() + for _, hostUser := range []*userprovisioningpb.StaticHostUser{goodUser, nonMatchingUser, conflictingUser} { + _, err := clt.UpsertStaticHostUser(context.Background(), hostUser) + require.NoError(t, err) + } + t.Cleanup(func() { cleanupUsersAndGroups([]string{goodLogin, nonMatchingLogin, conflictingLogin}, groups) }) + + // Test that a node picks up new host users from the cache. + testStaticHostUsers(t, nodeCfg.HostUUID, goodLogin, nonMatchingLogin, conflictingLogin, groups) + cleanupUsersAndGroups([]string{goodLogin, nonMatchingLogin, conflictingLogin}, groups) + + require.NoError(t, instance.StopNodes()) + _, err = instance.StartNode(nodeCfg) + require.NoError(t, err) + // Test that a new node picks up existing host users on startup. + testStaticHostUsers(t, nodeCfg.HostUUID, goodLogin, nonMatchingLogin, conflictingLogin, groups) + + // Check that a deleted resource doesn't affect the host user. + require.NoError(t, clt.DeleteStaticHostUser(context.Background(), goodLogin)) + var lookupErr error + var homeDirErr error + var sudoerErr error + require.Never(t, func() bool { + _, lookupErr = user.Lookup(goodLogin) + _, homeDirErr = os.Stat("/home/" + goodLogin) + _, sudoerErr = os.Stat(sudoersPath(goodLogin, nodeCfg.HostUUID)) + return lookupErr != nil || homeDirErr != nil || sudoerErr != nil + }, 5*time.Second, time.Second, + "lookup err: %v\nhome dir err: %v\nsudoer err: %v\n", + lookupErr, homeDirErr, sudoerErr) +} + +func testStaticHostUsers(t *testing.T, nodeUUID, goodLogin, nonMatchingLogin, conflictingLogin string, groups []string) { + t.Cleanup(func() { + os.Remove(sudoersPath(goodLogin, nodeUUID)) + }) + + // Check that the good user was correctly applied. + require.EventuallyWithT(t, func(collect *assert.CollectT) { + // Check that the user was created. + existingUser, err := user.Lookup(goodLogin) + assert.NoError(collect, err) + assert.DirExists(collect, existingUser.HomeDir) + // Check that the user has the right groups, including teleport-static. + groupIDs, err := existingUser.GroupIds() + assert.NoError(collect, err) + userGroups := make([]string, 0, len(groupIDs)) + for _, gid := range groupIDs { + group, err := user.LookupGroupId(gid) + assert.NoError(collect, err) + userGroups = append(userGroups, group.Name) + } + assert.Subset(collect, userGroups, groups) + assert.Contains(collect, userGroups, types.TeleportStaticGroup) + // Check that the sudoers file was created. + assert.FileExists(collect, sudoersPath(goodLogin, nodeUUID)) + }, 10*time.Second, time.Second) + + // Check that the nonmatching and conflicting users were not created. + var nonmatchingUserErr error + var conflictingUserErr error + require.Never(t, func() bool { + _, nonmatchingUserErr = user.Lookup(nonMatchingLogin) + _, conflictingUserErr = user.Lookup(conflictingLogin) + return nonmatchingUserErr == nil && conflictingUserErr == nil + }, 5*time.Second, time.Second, + "nonmatching user error: %v\nconflicting user error: %v\n", + nonmatchingUserErr, conflictingUserErr, + ) +} diff --git a/lib/authz/permissions.go b/lib/authz/permissions.go index fc16841e602c5..a235e11b8edb1 100644 --- a/lib/authz/permissions.go +++ b/lib/authz/permissions.go @@ -1007,6 +1007,7 @@ func definitionForBuiltinRole(clusterName string, recConfig readonly.SessionReco types.NewRule(types.KindLock, services.RO()), types.NewRule(types.KindNetworkRestrictions, services.RO()), types.NewRule(types.KindConnectionDiagnostic, services.RW()), + types.NewRule(types.KindStaticHostUser, services.RO()), }, }, }) diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index e9211aed2cdb0..fd5c22d00b50b 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -152,6 +152,7 @@ type testFuncs153[T types.Resource153] struct { cacheGet func(context.Context, string) (T, error) cacheList func(context.Context) ([]T, error) update func(context.Context, T) error + delete func(context.Context, string) error deleteAll func(context.Context) error } @@ -2808,18 +2809,21 @@ func testResources153[T types.Resource153](t *testing.T, p *testPack, funcs test protocmp.Transform(), } + assertCacheContents := func(expected []T) { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + out, err := funcs.cacheList(ctx) + assert.NoError(collect, err) + assert.Len(collect, cmp.Diff(expected, out, cmpOpts...), 0) + }, 2*time.Second, 250*time.Millisecond) + } + // Check that the resource is now in the backend. out, err := funcs.list(ctx) require.NoError(t, err) require.Empty(t, cmp.Diff([]T{r}, out, cmpOpts...)) // Wait until the information has been replicated to the cache. - require.Eventually(t, func() bool { - // Make sure the cache has a single resource in it. - out, err = funcs.cacheList(ctx) - assert.NoError(t, err) - return len(cmp.Diff([]T{r}, out, cmpOpts...)) == 0 - }, time.Second*2, time.Millisecond*250) + assertCacheContents([]T{r}) // cacheGet is optional as not every resource implements it if funcs.cacheGet != nil { @@ -2844,24 +2848,25 @@ func testResources153[T types.Resource153](t *testing.T, p *testPack, funcs test require.Empty(t, cmp.Diff([]T{r}, out, cmpOpts...)) // Check that information has been replicated to the cache. - require.Eventually(t, func() bool { - // Make sure the cache has a single resource in it. - out, err = funcs.cacheList(ctx) - assert.NoError(t, err) - return len(cmp.Diff([]T{r}, out, cmpOpts...)) == 0 - }, time.Second*2, time.Millisecond*250) + assertCacheContents([]T{r}) + + if funcs.delete != nil { + // Add a second resource. + r2, err := funcs.newResource("test-resource-2") + require.NoError(t, err) + require.NoError(t, funcs.create(ctx, r2)) + assertCacheContents([]T{r, r2}) + // Check that only one resource is deleted. + require.NoError(t, funcs.delete(ctx, r2.GetMetadata().Name)) + assertCacheContents([]T{r}) + } // Remove all resources from the backend. err = funcs.deleteAll(ctx) require.NoError(t, err) // Check that information has been replicated to the cache. - require.EventuallyWithT(t, func(t *assert.CollectT) { - // Check that the cache is now empty. - out, err = funcs.cacheList(ctx) - assert.NoError(t, err) - assert.Empty(t, out) - }, time.Second*2, time.Millisecond*250) + assertCacheContents([]T{}) } func TestRelativeExpiry(t *testing.T) { diff --git a/lib/cache/collections.go b/lib/cache/collections.go index d12b8d9961306..bb6f43178c3c8 100644 --- a/lib/cache/collections.go +++ b/lib/cache/collections.go @@ -2369,16 +2369,7 @@ func (staticHostUserExecutor) deleteAll(ctx context.Context, cache *Cache) error } func (staticHostUserExecutor) delete(ctx context.Context, cache *Cache, resource types.Resource) error { - var hostUser *userprovisioningpb.StaticHostUser - r, ok := resource.(types.Resource153Unwrapper) - if ok { - hostUser, ok = r.Unwrap().(*userprovisioningpb.StaticHostUser) - if ok { - err := cache.staticHostUsersCache.DeleteStaticHostUser(ctx, hostUser.Metadata.Name) - return trace.Wrap(err) - } - } - return trace.BadParameter("unknown StaticHostUser type, expected %T, got %T", hostUser, resource) + return trace.Wrap(cache.staticHostUsersCache.DeleteStaticHostUser(ctx, resource.GetName())) } func (staticHostUserExecutor) isSingleton() bool { return false } diff --git a/lib/service/service.go b/lib/service/service.go index d6f69213ea059..586f6115cd7be 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -3078,6 +3078,22 @@ func (process *TeleportProcess) initSSH() error { } defer func() { warnOnErr(process.ExitContext(), s.Close(), logger) }() + if s.GetCreateHostUser() { + staticHostUserReconciler, err := srv.NewStaticHostUserHandler(srv.StaticHostUserHandlerConfig{ + Events: conn.Client, + StaticHostUser: conn.Client.StaticHostUserClient(), + Server: s, + Users: s.GetHostUsers(), + Sudoers: s.GetHostSudoers(), + }) + if err != nil { + return trace.Wrap(err) + } + process.RegisterFunc("static.host.user.reconciler", func() error { + return staticHostUserReconciler.Run(process.ExitContext()) + }) + } + var resumableServer *resumption.SSHServerWrapper if os.Getenv("TELEPORT_UNSTABLE_DISABLE_SSH_RESUMPTION") == "" { resumableServer = resumption.NewSSHServerWrapper(resumption.SSHServerWrapperConfig{ diff --git a/lib/services/access_checker.go b/lib/services/access_checker.go index b4ccd0cfc0c48..8a630cd3a7a0c 100644 --- a/lib/services/access_checker.go +++ b/lib/services/access_checker.go @@ -966,6 +966,34 @@ func (a *accessChecker) DesktopGroups(s types.WindowsDesktop) ([]string, error) return utils.StringsSliceFromSet(groups), nil } +// HostUserMode determines how host users should be created. +type HostUserMode int + +const ( + // HostUserModeUndefined is the default mode, for when the mode couldn't be + // determined from a types.CreateHostUserMode. + HostUserModeUndefined HostUserMode = iota + // HostUserModeKeep creates a home directory and persists after a session ends. + HostUserModeKeep + // HostUserModeDrop does not create a home directory, and it is removed after + // a session ends. + HostUserModeDrop + // HostUserModeStatic creates a home directory and exists independently of a + // session. + HostUserModeStatic +) + +func convertHostUserMode(mode types.CreateHostUserMode) HostUserMode { + switch mode { + case types.CreateHostUserMode_HOST_USER_MODE_KEEP: + return HostUserModeKeep + case types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP: + return HostUserModeDrop + default: + return HostUserModeUndefined + } +} + // HostUsersInfo keeps information about groups and sudoers entries // for a particular host user type HostUsersInfo struct { @@ -973,7 +1001,7 @@ type HostUsersInfo struct { Groups []string // Mode determines if a host user should be deleted after a session // ends or not. - Mode types.CreateHostUserMode + Mode HostUserMode // UID is the UID that the host user will be created with UID string // GID is the GID that the host user will be created with @@ -1052,7 +1080,7 @@ func (a *accessChecker) HostUsers(s types.Server) (*HostUsersInfo, error) { return &HostUsersInfo{ Groups: utils.StringsSliceFromSet(groups), - Mode: mode, + Mode: convertHostUserMode(mode), UID: uid, GID: gid, }, nil diff --git a/lib/services/local/events.go b/lib/services/local/events.go index a36f431208091..7a358c3f1f41c 100644 --- a/lib/services/local/events.go +++ b/lib/services/local/events.go @@ -2194,7 +2194,7 @@ type staticHostUserParser struct { func (p *staticHostUserParser) parse(event backend.Event) (types.Resource, error) { switch event.Type { case types.OpDelete: - return resourceHeader(event, types.KindStaticHostUser, types.V1, 0) + return resourceHeader(event, types.KindStaticHostUser, types.V2, 0) case types.OpPut: resource, err := services.UnmarshalProtoResource[*userprovisioningpb.StaticHostUser]( event.Item.Value, diff --git a/lib/services/role.go b/lib/services/role.go index f9470b950494b..82278b05eb841 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -2511,7 +2511,7 @@ func (l *kubernetesClusterLabelMatcher) Match(role types.Role, typ types.RoleCon if err != nil { return false, trace.Wrap(err) } - ok, _, err := checkLabelsMatch(typ, labelMatchers, l.userTraits, mapLabelGetter(l.clusterLabels), false) + ok, _, err := CheckLabelsMatch(typ, labelMatchers, l.userTraits, mapLabelGetter(l.clusterLabels), false) return ok, trace.Wrap(err) } @@ -2734,10 +2734,10 @@ func checkRoleLabelsMatch( if err != nil { return false, "", trace.Wrap(err) } - return checkLabelsMatch(condition, labelMatchers, userTraits, resource, debug) + return CheckLabelsMatch(condition, labelMatchers, userTraits, resource, debug) } -// checkLabelsMatch checks if the [labelMatchers] match the labels of [resource] +// CheckLabelsMatch checks if the [labelMatchers] match the labels of [resource] // for [condition]. // It considers both [labelMatchers.Labels] and [labelMatchers.Expression]. // @@ -2750,7 +2750,7 @@ func checkRoleLabelsMatch( // match it's not considered a match. // // If neither is set, it's not a match in either case. -func checkLabelsMatch( +func CheckLabelsMatch( condition types.RoleConditionType, labelMatchers types.LabelMatchers, userTraits wrappers.Traits, diff --git a/lib/services/role_test.go b/lib/services/role_test.go index 8ce5c32f48473..9beb05cfe0d2b 100644 --- a/lib/services/role_test.go +++ b/lib/services/role_test.go @@ -8200,7 +8200,7 @@ func TestHostUsers_CanCreateHostUser(t *testing.T) { info, err := accessChecker.HostUsers(tc.server) require.Equal(t, tc.canCreate, err == nil && info != nil) if tc.canCreate { - require.Equal(t, tc.expectedMode, info.Mode) + require.Equal(t, convertHostUserMode(tc.expectedMode), info.Mode) } }) } diff --git a/lib/srv/statichostusers.go b/lib/srv/statichostusers.go new file mode 100644 index 0000000000000..e9834a49a8921 --- /dev/null +++ b/lib/srv/statichostusers.go @@ -0,0 +1,266 @@ +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package srv + +import ( + "context" + "errors" + "log/slog" + "strconv" + "time" + + "github.com/gravitational/trace" + + userprovisioningpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/userprovisioning/v2" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/label" + apiutils "github.com/gravitational/teleport/api/utils" + "github.com/gravitational/teleport/api/utils/retryutils" + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/utils" +) + +const staticHostUserWatcherTimeout = 30 * time.Second + +// InfoGetter is an interface for getting up-to-date server info. +type InfoGetter interface { + // GetInfo gets a server, including dynamic labels. + GetInfo() types.Server +} + +// StaticHostUserHandler handles watching for static host user resources and +// applying them to the host. +type StaticHostUserHandler struct { + events types.Events + staticHostUser services.StaticHostUser + server InfoGetter + users HostUsers + sudoers HostSudoers + retry *retryutils.Linear +} + +// StaticHostUserHandlerConfig configures a StaticHostUserHandler. +type StaticHostUserHandlerConfig struct { + // Events is an events interface for creating a watcher. + Events types.Events + // StaticHostUser is a static host user client. + StaticHostUser services.StaticHostUser + // Server is a resource to fetch a types.Server for access checks. This is + // here instead of a types.Server directly so we can get updated dynamic + // labels. + Server InfoGetter + // Users is a host user backend. + Users HostUsers + // Sudoers is a host sudoers backend. + Sudoers HostSudoers +} + +// NewStaticHostUserHandler creates a new StaticHostUserHandler. +func NewStaticHostUserHandler(cfg StaticHostUserHandlerConfig) (*StaticHostUserHandler, error) { + if cfg.Events == nil { + return nil, trace.BadParameter("missing Events") + } + if cfg.StaticHostUser == nil { + return nil, trace.BadParameter("missing StaticHostUser") + } + if cfg.Server == nil { + return nil, trace.BadParameter("missing Server") + } + retry, err := retryutils.NewLinear(retryutils.LinearConfig{ + First: utils.FullJitter(defaults.MaxWatcherBackoff / 10), + Step: defaults.MaxWatcherBackoff / 5, + Max: defaults.MaxWatcherBackoff, + Jitter: retryutils.NewHalfJitter(), + }) + if err != nil { + return nil, trace.Wrap(err) + } + return &StaticHostUserHandler{ + events: cfg.Events, + staticHostUser: cfg.StaticHostUser, + server: cfg.Server, + users: cfg.Users, + sudoers: cfg.Sudoers, + retry: retry, + }, nil +} + +// Run runs the static host user handler to completion. +func (s *StaticHostUserHandler) Run(ctx context.Context) error { + if s.users == nil { + return nil + } + + for { + err := s.run(ctx) + if err == nil { + return nil + } + slog.DebugContext(ctx, "Static host user handler encountered a network error, will restart.", "error", err) + s.retry.Inc() + + select { + case <-ctx.Done(): + return trace.Wrap(ctx.Err()) + case <-s.retry.After(): + } + } +} + +func (s *StaticHostUserHandler) run(ctx context.Context) error { + // Start the watcher. + watcher, err := s.events.NewWatcher(ctx, types.Watch{ + Kinds: []types.WatchKind{ + { + Kind: types.KindStaticHostUser, + }, + }, + }) + if err != nil { + return trace.Wrap(err) + } + defer watcher.Close() + + select { + case event := <-watcher.Events(): + if event.Type != types.OpInit { + return trace.Errorf("missing init event from watcher") + } + s.retry.Reset() + case <-time.After(staticHostUserWatcherTimeout): + return trace.LimitExceeded("timed out waiting for static host user watcher to initialize") + case <-ctx.Done(): + return nil + } + + // Fetch any host users that existed prior to creating the watcher. + var startKey string + for { + users, nextKey, err := s.staticHostUser.ListStaticHostUsers(ctx, 0, startKey) + if err != nil { + return trace.Wrap(err) + } + for _, hostUser := range users { + if err := s.handleNewHostUser(ctx, hostUser); err != nil { + // Log the error so we don't stop the handler. + slog.WarnContext(ctx, "Error handling static host user.", "error", err, "login", hostUser.GetMetadata().Name) + } + } + if nextKey == "" { + break + } + startKey = nextKey + } + + // Listen for new host users on the watcher. + for { + select { + case event := <-watcher.Events(): + if event.Type != types.OpPut { + continue + } + r, ok := event.Resource.(types.Resource153Unwrapper) + if !ok { + slog.WarnContext(ctx, "Unexpected resource type.", "resource", event.Resource) + continue + } + hostUser, ok := r.Unwrap().(*userprovisioningpb.StaticHostUser) + if !ok { + slog.WarnContext(ctx, "Unexpected resource type.", "resource", event.Resource) + continue + } + if err := s.handleNewHostUser(ctx, hostUser); err != nil { + // Log the error so we don't stop the handler. + slog.WarnContext(ctx, "Error handling static host user.", "error", err, "login", hostUser.GetMetadata().Name) + continue + } + case <-watcher.Done(): + return trace.Wrap(watcher.Error()) + case <-ctx.Done(): + if !errors.Is(ctx.Err(), context.Canceled) { + return trace.Wrap(ctx.Err()) + } + return nil + } + } +} + +func (s *StaticHostUserHandler) handleNewHostUser(ctx context.Context, hostUser *userprovisioningpb.StaticHostUser) error { + var createUser *userprovisioningpb.Matcher + login := hostUser.GetMetadata().Name + server := s.server.GetInfo() + for _, matcher := range hostUser.Spec.Matchers { + // Check if this host user applies to this node. + nodeLabels := make(types.Labels) + for k, v := range label.ToMap(matcher.NodeLabels) { + nodeLabels[k] = apiutils.Strings(v) + } + matched, _, err := services.CheckLabelsMatch( + types.Allow, + types.LabelMatchers{ + Labels: nodeLabels, + Expression: matcher.NodeLabelsExpression, + }, + nil, // userTraits + server, + false, // debug + ) + if err != nil { + return trace.Wrap(err) + } + if !matched { + continue + } + + // Matching multiple times is an error. + if createUser != nil { + const msg = "Multiple matchers matched this node. Please update resource to ensure that each node is matched only once." + slog.WarnContext(ctx, msg, "login", login, + slog.Group("first_match", "labels", createUser.NodeLabels, "expression", createUser.NodeLabelsExpression), + slog.Group("second_match", "labels", matcher.NodeLabels, "expression", matcher.NodeLabelsExpression), + ) + return trace.BadParameter(msg) + } + createUser = matcher + } + + if createUser == nil { + return nil + } + + slog.DebugContext(ctx, "Attempt to update matched static host user.", "login", login) + ui := services.HostUsersInfo{ + Groups: createUser.Groups, + Mode: services.HostUserModeStatic, + } + if createUser.Uid != 0 { + ui.UID = strconv.Itoa(int(createUser.Uid)) + } + if createUser.Gid != 0 { + ui.GID = strconv.Itoa(int(createUser.Gid)) + } + if _, err := s.users.UpsertUser(login, ui); err != nil { + return trace.Wrap(err) + } + if s.sudoers != nil && len(createUser.Sudoers) != 0 { + if err := s.sudoers.WriteSudoers(login, createUser.Sudoers); err != nil { + return trace.Wrap(err) + } + } + return nil +} diff --git a/lib/srv/usermgmt.go b/lib/srv/usermgmt.go index a64f8b54e8f2a..8ed7d291e6d8b 100644 --- a/lib/srv/usermgmt.go +++ b/lib/srv/usermgmt.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "log/slog" "maps" "os/user" "regexp" @@ -229,23 +230,22 @@ func (u *HostSudoersManagement) RemoveSudoers(name string) error { // unmanagedUserErr is returned when attempting to modify or interact with a user that is not managed by Teleport. var unmanagedUserErr = errors.New("user not managed by teleport") -func (u *HostUserManagement) updateUser(name string, ui services.HostUsersInfo) error { - +func (u *HostUserManagement) updateUser(name string, ui services.HostUsersInfo) (io.Closer, error) { existingUser, err := u.backend.Lookup(name) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } currentGroups := make(map[string]struct{}, len(ui.Groups)) groupIDs, err := u.backend.UserGIDs(existingUser) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } for _, groupID := range groupIDs { group, err := u.backend.LookupGroupByID(groupID) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } currentGroups[group.Name] = struct{}{} @@ -257,22 +257,40 @@ func (u *HostUserManagement) updateUser(name string, ui services.HostUsersInfo) _, hasDropGroup := currentGroups[types.TeleportDropGroup] _, hasKeepGroup := currentGroups[types.TeleportKeepGroup] - if !hasDropGroup && !hasKeepGroup && !migrateKeepUser { - return trace.Errorf("%q %w", name, unmanagedUserErr) + _, hasStaticGroup := currentGroups[types.TeleportStaticGroup] + if !(hasDropGroup || hasKeepGroup || hasStaticGroup || migrateKeepUser) { + return nil, trace.Errorf("%q %w", name, unmanagedUserErr) + } + + // Do not convert/update groups from static to non-static user, and vice versa. + isStaticUser := ui.Mode == services.HostUserModeStatic + if hasStaticGroup != isStaticUser { + slog.DebugContext(context.Background(), + "Aborting host user creation, can't convert between auto-provisioned and static host users.", + "login", name) + return nil, nil } + var closer io.Closer switch ui.Mode { - case types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP: + case services.HostUserModeDrop: ui.Groups = append(ui.Groups, types.TeleportDropGroup) - case types.CreateHostUserMode_HOST_USER_MODE_KEEP: + closer = &userCloser{ + username: name, + users: u, + backend: u.backend, + } + case services.HostUserModeStatic: + ui.Groups = append(ui.Groups, types.TeleportStaticGroup) + case services.HostUserModeKeep: if !hasKeepGroup { home, err := u.backend.GetDefaultHomeDirectory(name) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } if err := u.backend.CreateHomeDirectory(home, existingUser.Uid, existingUser.Gid); err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } } @@ -289,36 +307,48 @@ func (u *HostUserManagement) updateUser(name string, ui services.HostUsersInfo) primaryGroup, err := u.backend.LookupGroupByID(existingUser.Gid) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } finalGroups[primaryGroup.Name] = struct{}{} if !maps.Equal(currentGroups, finalGroups) { - return trace.Wrap(u.doWithUserLock(func(_ types.SemaphoreLease) error { + if err := u.doWithUserLock(func(_ types.SemaphoreLease) error { return trace.Wrap(u.backend.SetUserGroups(name, ui.Groups)) - })) + }); err != nil { + return nil, trace.Wrap(err) + } } - return nil + return closer, nil } -func (u *HostUserManagement) createUser(name string, ui services.HostUsersInfo) error { +func (u *HostUserManagement) createUser(name string, ui services.HostUsersInfo) (io.Closer, error) { var home string var err error + var closer io.Closer switch ui.Mode { - case types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP: + case services.HostUserModeDrop: ui.Groups = append(ui.Groups, types.TeleportDropGroup) - case types.CreateHostUserMode_HOST_USER_MODE_KEEP: - ui.Groups = append(ui.Groups, types.TeleportKeepGroup) + closer = &userCloser{ + username: name, + users: u, + backend: u.backend, + } + case services.HostUserModeKeep, services.HostUserModeStatic: + if ui.Mode == services.HostUserModeStatic { + ui.Groups = append(ui.Groups, types.TeleportStaticGroup) + } else { + ui.Groups = append(ui.Groups, types.TeleportKeepGroup) + } home, err = u.backend.GetDefaultHomeDirectory(name) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } } - return trace.Wrap(u.doWithUserLock(func(_ types.SemaphoreLease) error { - if ui.Mode != types.CreateHostUserMode_HOST_USER_MODE_KEEP { + err = u.doWithUserLock(func(_ types.SemaphoreLease) error { + if ui.Mode == services.HostUserModeDrop { if err := u.storage.UpsertHostUserInteractionTime(u.ctx, name, time.Now()); err != nil { return trace.Wrap(err) } @@ -349,7 +379,11 @@ func (u *HostUserManagement) createUser(name string, ui services.HostUsersInfo) } return nil - })) + }) + if err != nil { + return nil, trace.Wrap(err) + } + return closer, nil } func (u *HostUserManagement) ensureGroupsExist(groups ...string) error { @@ -369,7 +403,7 @@ func (u *HostUserManagement) UpsertUser(name string, ui services.HostUsersInfo) // allow for explicit assignment of teleport-keep group in order to facilitate migrating KEEP users that existed before we added // the teleport-keep group migrateKeepUser := slices.Contains(ui.Groups, types.TeleportKeepGroup) - skipKeepGroup := migrateKeepUser && ui.Mode == types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP + skipKeepGroup := migrateKeepUser && ui.Mode != services.HostUserModeKeep if skipKeepGroup { log.Warnf("explicit assignment of %q group is not possible in 'insecure-drop' mode", types.TeleportKeepGroup) @@ -378,13 +412,14 @@ func (u *HostUserManagement) UpsertUser(name string, ui services.HostUsersInfo) groupSet := make(map[string]struct{}, len(ui.Groups)) groups := make([]string, 0, len(ui.Groups)) for _, group := range ui.Groups { - // the TeleportDropGroup is managed automatically and should not be allowed direct assignment - if group == types.TeleportDropGroup { - continue - } - - if skipKeepGroup && group == types.TeleportKeepGroup { + switch group { + // TeleportDropGroup and TeleportStaticGroup are managed automatically and should not be allowed direct assignment + case types.TeleportDropGroup, types.TeleportStaticGroup: continue + case types.TeleportKeepGroup: + if skipKeepGroup { + continue + } } if _, ok := groupSet[group]; !ok { @@ -394,7 +429,7 @@ func (u *HostUserManagement) UpsertUser(name string, ui services.HostUsersInfo) } ui.Groups = groups - if err := u.ensureGroupsExist(types.TeleportDropGroup, types.TeleportKeepGroup); err != nil { + if err := u.ensureGroupsExist(types.TeleportDropGroup, types.TeleportKeepGroup, types.TeleportStaticGroup); err != nil { return nil, trace.WrapWithMessage(err, "creating teleport system groups") } @@ -402,21 +437,13 @@ func (u *HostUserManagement) UpsertUser(name string, ui services.HostUsersInfo) return nil, trace.WrapWithMessage(err, "creating configured groups") } - var closer io.Closer - if ui.Mode == types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP { - closer = &userCloser{ - username: name, - users: u, - backend: u.backend, - } - } - - if err := u.updateUser(name, ui); err != nil { + closer, err := u.updateUser(name, ui) + if err != nil { if !errors.Is(err, user.UnknownUserError(name)) { return nil, trace.Wrap(err) } - if err := u.createUser(name, ui); err != nil { + if closer, err = u.createUser(name, ui); err != nil { return nil, trace.Wrap(err) } } diff --git a/lib/srv/usermgmt_test.go b/lib/srv/usermgmt_test.go index 753e409454d28..4b6b8f663972d 100644 --- a/lib/srv/usermgmt_test.go +++ b/lib/srv/usermgmt_test.go @@ -203,18 +203,11 @@ var _ HostSudoersBackend = &testHostUserBackend{} func TestUserMgmt_CreateTemporaryUser(t *testing.T) { t.Parallel() - backend := newTestUserMgmt() - bk, err := memory.New(memory.Config{}) - require.NoError(t, err) - pres := local.NewPresenceService(bk) - users := HostUserManagement{ - backend: backend, - storage: pres, - } + users, backend := initBackend(t, nil) userinfo := services.HostUsersInfo{ Groups: []string{"hello", "sudo"}, - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, } // create a user with some groups closer, err := users.UpsertUser("bob", userinfo) @@ -266,7 +259,7 @@ func TestUserMgmtSudoers_CreateTemporaryUser(t *testing.T) { closer, err := users.UpsertUser("bob", services.HostUsersInfo{ Groups: []string{"hello", "sudo"}, - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, }) require.NoError(t, err) require.NotEqual(t, nil, closer) @@ -289,12 +282,12 @@ func TestUserMgmtSudoers_CreateTemporaryUser(t *testing.T) { // been created backend.CreateUser("testuser", nil, "", "", "") _, err := users.UpsertUser("testuser", services.HostUsersInfo{ - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, }) require.ErrorIs(t, err, unmanagedUserErr) backend.CreateGroup(types.TeleportDropGroup, "") _, err = users.UpsertUser("testuser", services.HostUsersInfo{ - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, }) require.ErrorIs(t, err, unmanagedUserErr) }) @@ -334,7 +327,7 @@ func TestUserMgmt_DeleteAllTeleportSystemUsers(t *testing.T) { if slices.Contains(user.groups, types.TeleportDropGroup) { users.UpsertUser(user.user, services.HostUsersInfo{ Groups: user.groups, - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, }) } else { mgmt.CreateUser(user.user, user.groups, "", "", "") @@ -403,23 +396,13 @@ func TestIsUnknownGroupError(t *testing.T) { func Test_UpdateUserGroups_Keep(t *testing.T) { t.Parallel() - backend := newTestUserMgmt() - bk, err := memory.New(memory.Config{}) - require.NoError(t, err) - pres := local.NewPresenceService(bk) - users := HostUserManagement{ - backend: backend, - storage: pres, - } allGroups := []string{"foo", "bar", "baz", "quux"} - for _, group := range allGroups { - require.NoError(t, backend.CreateGroup(group, "")) - } + users, backend := initBackend(t, allGroups) userinfo := services.HostUsersInfo{ Groups: slices.Clone(allGroups[:2]), - Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP, + Mode: services.HostUserModeKeep, } // Create user @@ -448,8 +431,16 @@ func Test_UpdateUserGroups_Keep(t *testing.T) { assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportKeepGroup), backend.users["alice"]) assert.NotContains(t, backend.users["alice"], types.TeleportDropGroup) + // Do not convert the managed user to static. + userinfo.Mode = services.HostUserModeStatic + closer, err = users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Equal(t, 1, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportKeepGroup), backend.users["alice"]) + // Updates with INSECURE_DROP mode should convert the managed user - userinfo.Mode = types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP + userinfo.Mode = services.HostUserModeDrop userinfo.Groups = slices.Clone(allGroups[:2]) closer, err = users.UpsertUser("alice", userinfo) assert.NoError(t, err) @@ -461,23 +452,13 @@ func Test_UpdateUserGroups_Keep(t *testing.T) { func Test_UpdateUserGroups_Drop(t *testing.T) { t.Parallel() - backend := newTestUserMgmt() - bk, err := memory.New(memory.Config{}) - require.NoError(t, err) - pres := local.NewPresenceService(bk) - users := HostUserManagement{ - backend: backend, - storage: pres, - } allGroups := []string{"foo", "bar", "baz", "quux"} - for _, group := range allGroups { - require.NoError(t, backend.CreateGroup(group, "")) - } + users, backend := initBackend(t, allGroups) userinfo := services.HostUsersInfo{ Groups: slices.Clone(allGroups[:2]), - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, } // Create user @@ -506,8 +487,16 @@ func Test_UpdateUserGroups_Drop(t *testing.T) { assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportDropGroup), backend.users["alice"]) assert.NotContains(t, backend.users["alice"], types.TeleportKeepGroup) + // Do not convert the managed user to static. + userinfo.Mode = services.HostUserModeStatic + closer, err = users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Equal(t, 1, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportDropGroup), backend.users["alice"]) + // Updates with KEEP mode should convert the ephemeral user - userinfo.Mode = types.CreateHostUserMode_HOST_USER_MODE_KEEP + userinfo.Mode = services.HostUserModeKeep userinfo.Groups = slices.Clone(allGroups[:2]) closer, err = users.UpsertUser("alice", userinfo) assert.NoError(t, err) @@ -518,28 +507,66 @@ func Test_UpdateUserGroups_Drop(t *testing.T) { assert.NotContains(t, backend.users["alice"], types.TeleportDropGroup) } -func Test_DontManageExistingUser(t *testing.T) { +func Test_UpdateUserGroups_Static(t *testing.T) { t.Parallel() - backend := newTestUserMgmt() - bk, err := memory.New(memory.Config{}) - require.NoError(t, err) - pres := local.NewPresenceService(bk) - users := HostUserManagement{ - backend: backend, - storage: pres, + allGroups := []string{"foo", "bar", "baz", "quux"} + users, backend := initBackend(t, allGroups) + userinfo := services.HostUsersInfo{ + Groups: slices.Clone(allGroups[:2]), + Mode: services.HostUserModeStatic, } + // Create user. + closer, err := users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Zero(t, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportStaticGroup), backend.users["alice"]) + + // Update user with new groups. + userinfo.Groups = slices.Clone(allGroups[2:]) + closer, err = users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Equal(t, 1, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportStaticGroup), backend.users["alice"]) + + // Upsert again with same groups should not call SetUserGroups. + closer, err = users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Equal(t, 1, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(userinfo.Groups, types.TeleportStaticGroup), backend.users["alice"]) + + // Do not convert to KEEP. + userinfo.Mode = services.HostUserModeKeep + closer, err = users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Equal(t, 1, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(slices.Clone(allGroups[2:]), types.TeleportStaticGroup), backend.users["alice"]) + + // Do not convert to INSECURE_DROP. + userinfo.Mode = services.HostUserModeDrop + closer, err = users.UpsertUser("alice", userinfo) + assert.NoError(t, err) + assert.Equal(t, nil, closer) + assert.Equal(t, 1, backend.setUserGroupsCalls) + assert.ElementsMatch(t, append(slices.Clone(allGroups[2:]), types.TeleportStaticGroup), backend.users["alice"]) +} + +func Test_DontManageExistingUser(t *testing.T) { + t.Parallel() + allGroups := []string{"foo", "bar", "baz", "quux"} - for _, group := range allGroups { - require.NoError(t, backend.CreateGroup(group, "")) - } + users, backend := initBackend(t, allGroups) assert.NoError(t, backend.CreateUser("alice", allGroups, "", "", "")) userinfo := services.HostUsersInfo{ Groups: allGroups[:2], - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + Mode: services.HostUserModeDrop, } // Update user in DROP mode @@ -550,7 +577,15 @@ func Test_DontManageExistingUser(t *testing.T) { assert.ElementsMatch(t, allGroups, backend.users["alice"]) // Update user in KEEP mode - userinfo.Mode = types.CreateHostUserMode_HOST_USER_MODE_KEEP + userinfo.Mode = services.HostUserModeKeep + closer, err = users.UpsertUser("alice", userinfo) + assert.ErrorIs(t, err, unmanagedUserErr) + assert.Equal(t, nil, closer) + assert.Zero(t, backend.setUserGroupsCalls) + assert.ElementsMatch(t, allGroups, backend.users["alice"]) + + // Update static user + userinfo.Mode = services.HostUserModeStatic closer, err = users.UpsertUser("alice", userinfo) assert.ErrorIs(t, err, unmanagedUserErr) assert.Equal(t, nil, closer) @@ -561,44 +596,45 @@ func Test_DontManageExistingUser(t *testing.T) { func Test_DontUpdateUnmanagedUsers(t *testing.T) { t.Parallel() - backend := newTestUserMgmt() - bk, err := memory.New(memory.Config{}) - require.NoError(t, err) - pres := local.NewPresenceService(bk) - users := HostUserManagement{ - backend: backend, - storage: pres, - } - allGroups := []string{"foo", "bar", "baz", "quux"} - for _, group := range allGroups { - require.NoError(t, backend.CreateGroup(group, "")) - } + users, backend := initBackend(t, allGroups) assert.NoError(t, backend.CreateUser("alice", allGroups[2:], "", "", "")) - userinfo := services.HostUsersInfo{ - Groups: allGroups[:2], - Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP, + tests := []struct { + name string + userinfo services.HostUsersInfo + }{ + { + name: "keep", + userinfo: services.HostUsersInfo{ + Groups: allGroups[:2], + Mode: services.HostUserModeKeep, + }, + }, + { + name: "drop", + userinfo: services.HostUsersInfo{ + Groups: allGroups[:2], + Mode: services.HostUserModeDrop, + }, + }, + { + name: "static", + userinfo: services.HostUsersInfo{ + Groups: allGroups[:2], + Mode: services.HostUserModeStatic, + }, + }, } - - // Try to update existing, unmanaged user in KEEP mode - closer, err := users.UpsertUser("alice", userinfo) - assert.ErrorIs(t, err, unmanagedUserErr) - assert.Equal(t, nil, closer) - assert.Zero(t, backend.setUserGroupsCalls) - assert.ElementsMatch(t, allGroups[2:], backend.users["alice"]) - - userinfo = services.HostUsersInfo{ - Groups: allGroups[:2], - Mode: types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP, + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + closer, err := users.UpsertUser("alice", tc.userinfo) + assert.ErrorIs(t, err, unmanagedUserErr) + assert.Equal(t, nil, closer) + assert.Zero(t, backend.setUserGroupsCalls) + assert.ElementsMatch(t, allGroups[2:], backend.users["alice"]) + }) } - - // Try to update existing, unmanaged user in DROP mode - closer, err = users.UpsertUser("alice", userinfo) - assert.ErrorIs(t, err, unmanagedUserErr) - assert.Equal(t, nil, closer) - assert.Zero(t, backend.setUserGroupsCalls) - assert.ElementsMatch(t, allGroups[2:], backend.users["alice"]) } // teleport-keep can be included explicitly in the Groups slice in order to flag an @@ -606,25 +642,14 @@ func Test_DontUpdateUnmanagedUsers(t *testing.T) { func Test_AllowExplicitlyManageExistingUsers(t *testing.T) { t.Parallel() - backend := newTestUserMgmt() - bk, err := memory.New(memory.Config{}) - require.NoError(t, err) - pres := local.NewPresenceService(bk) - users := HostUserManagement{ - backend: backend, - storage: pres, - } - allGroups := []string{"foo", types.TeleportKeepGroup, types.TeleportDropGroup} - for _, group := range allGroups { - require.NoError(t, backend.CreateGroup(group, "")) - } + users, backend := initBackend(t, allGroups) assert.NoError(t, backend.CreateUser("alice-keep", []string{}, "", "", "")) assert.NoError(t, backend.CreateUser("alice-drop", []string{}, "", "", "")) userinfo := services.HostUsersInfo{ Groups: slices.Clone(allGroups), - Mode: types.CreateHostUserMode_HOST_USER_MODE_KEEP, + Mode: services.HostUserModeKeep, } // Take ownership of existing user when in KEEP mode @@ -637,7 +662,7 @@ func Test_AllowExplicitlyManageExistingUsers(t *testing.T) { assert.NotContains(t, backend.users["alice-keep"], types.TeleportDropGroup) // Don't take ownership of existing user when in DROP mode - userinfo.Mode = types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP + userinfo.Mode = services.HostUserModeDrop closer, err = users.UpsertUser("alice-drop", userinfo) assert.ErrorIs(t, err, unmanagedUserErr) assert.Equal(t, nil, closer) @@ -645,12 +670,27 @@ func Test_AllowExplicitlyManageExistingUsers(t *testing.T) { assert.Empty(t, backend.users["alice-drop"]) // Don't assign teleport-keep to users created in DROP mode - userinfo.Mode = types.CreateHostUserMode_HOST_USER_MODE_INSECURE_DROP + userinfo.Mode = services.HostUserModeDrop closer, err = users.UpsertUser("bob", userinfo) assert.NoError(t, err) assert.NotEqual(t, nil, closer) assert.Equal(t, 1, backend.setUserGroupsCalls) assert.ElementsMatch(t, []string{"foo", types.TeleportDropGroup}, backend.users["bob"]) assert.NotContains(t, backend.users["bob"], types.TeleportKeepGroup) +} + +func initBackend(t *testing.T, groups []string) (HostUserManagement, *testHostUserBackend) { + backend := newTestUserMgmt() + bk, err := memory.New(memory.Config{}) + require.NoError(t, err) + pres := local.NewPresenceService(bk) + users := HostUserManagement{ + backend: backend, + storage: pres, + } + for _, group := range groups { + require.NoError(t, backend.CreateGroup(group, "")) + } + return users, backend }