Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Node admission/authorization #14227

Merged
merged 3 commits into from
Sep 18, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions pkg/cmd/server/bootstrappolicy/dead.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import (

var (
deadClusterRoles = []rbac.ClusterRole{}

deadClusterRoleBindings = []rbac.ClusterRoleBinding{}
)

func addDeadClusterRole(name string) {
Expand All @@ -25,12 +27,33 @@ func addDeadClusterRole(name string) {
)
}

func addDeadClusterRoleBinding(name, roleName string) {
for _, existing := range deadClusterRoleBindings {
if name == existing.Name {
glog.Fatalf("%q was already registered", name)
}
}

deadClusterRoleBindings = append(deadClusterRoleBindings,
rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: name},
RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: roleName},
},
)
}

// GetDeadClusterRoles returns cluster roles which should no longer have any permissions.
// These are enumerated so that a reconcile that tightens permissions will properly.
func GetDeadClusterRoles() []rbac.ClusterRole {
return deadClusterRoles
}

// GetDeadClusterRoleBindings returns cluster role bindings which should no longer have any subjects.
// These are enumerated so that a reconcile that tightens permissions will properly remove them.
func GetDeadClusterRoleBindings() []rbac.ClusterRoleBinding {
return deadClusterRoleBindings
}

func init() {
// these were replaced by kube controller roles
addDeadClusterRole("system:replication-controller")
Expand All @@ -50,4 +73,7 @@ func init() {
addDeadClusterRole("system:build-controller")
addDeadClusterRole("system:deploymentconfig-controller")
addDeadClusterRole("system:deployment-controller")

// this was replaced by the node authorizer
addDeadClusterRoleBinding("system:nodes", "system:node")
}
13 changes: 7 additions & 6 deletions pkg/cmd/server/bootstrappolicy/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -696,13 +696,13 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole {
Rules: []rbac.PolicyRule{
// Needed to check API access. These creates are non-mutating
rbac.NewRule("create").Groups(kAuthnGroup).Resources("tokenreviews").RuleOrDie(),
rbac.NewRule("create").Groups(authzGroup, legacyAuthzGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
rbac.NewRule("create").Groups(kAuthzGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
// Needed to build serviceLister, to populate env vars for services
rbac.NewRule(read...).Groups(kapiGroup).Resources("services").RuleOrDie(),
// Nodes can register themselves
// TODO: restrict to creating a node with the same name they announce
// Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object.
rbac.NewRule("create", "get", "list", "watch").Groups(kapiGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("update", "patch", "delete").Groups(kapiGroup).Resources("nodes").RuleOrDie(),
// TODO: restrict to the bound node once supported
rbac.NewRule("update", "patch").Groups(kapiGroup).Resources("nodes/status").RuleOrDie(),

Expand Down Expand Up @@ -731,7 +731,7 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole {
// Needed for glusterfs volumes
rbac.NewRule("get").Groups(kapiGroup).Resources("endpoints").RuleOrDie(),
// Nodes are allowed to request CSRs (specifically, request serving certs)
rbac.NewRule("get", "create").Groups(certificates.GroupName).Resources("certificatesigningrequests").RuleOrDie(),
rbac.NewRule("get", "create", "list", "watch").Groups(certificates.GroupName).Resources("certificatesigningrequests").RuleOrDie(),
},
},

Expand Down Expand Up @@ -966,9 +966,6 @@ func GetOpenshiftBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding {
newOriginClusterBinding(StatusCheckerRoleBindingName, StatusCheckerRoleName).
Groups(AuthenticatedGroup, UnauthenticatedGroup).
BindingOrDie(),
newOriginClusterBinding(NodeRoleBindingName, NodeRoleName).
Groups(NodesGroup).
BindingOrDie(),
newOriginClusterBinding(NodeProxierRoleBindingName, NodeProxierRoleName).
// Allow node identities to run node proxies
Groups(NodesGroup).
Expand Down Expand Up @@ -1009,6 +1006,10 @@ func GetOpenshiftBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding {

func GetBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding {
openshiftClusterRoleBindings := GetOpenshiftBootstrapClusterRoleBindings()
// dead cluster roles need to be checked for conflicts (in case something new comes up)
// so add them to this list.
openshiftClusterRoleBindings = append(openshiftClusterRoleBindings, GetDeadClusterRoleBindings()...)

kubeClusterRoleBindings := bootstrappolicy.ClusterRoleBindings()
kubeControllerClusterRoleBindings := bootstrappolicy.ControllerRoleBindings()
openshiftControllerClusterRoleBindings := ControllerRoleBindings()
Expand Down
19 changes: 19 additions & 0 deletions pkg/cmd/server/bootstrappolicy/policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
rulevalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
kbootstrappolicy "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"

"github.com/openshift/origin/pkg/api/v1"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
Expand Down Expand Up @@ -109,6 +110,7 @@ func TestCovers(t *testing.T) {
var clusterAdmin *rbac.ClusterRole
var storageAdmin *rbac.ClusterRole
var imageBuilder *rbac.ClusterRole
var nodeRole *rbac.ClusterRole

for i := range allRoles {
role := allRoles[i]
Expand All @@ -135,6 +137,8 @@ func TestCovers(t *testing.T) {
storageAdmin = &role
case bootstrappolicy.ImageBuilderRoleName:
imageBuilder = &role
case bootstrappolicy.NodeRoleName:
nodeRole = &role
}
}

Expand Down Expand Up @@ -176,4 +180,19 @@ func TestCovers(t *testing.T) {
if covers, miss := rulevalidation.Covers(systemMaster.Rules, clusterAdmin.Rules); !covers {
t.Errorf("failed to cover: %#v", miss)
}

// Make sure our node role covers upstream node rules
if covers, miss := rulevalidation.Covers(nodeRole.Rules, kbootstrappolicy.NodeRules()); !covers {
t.Errorf("upstream node role has extra permissions:")
for _, r := range miss {
t.Logf("\t%s", r.CompactString())
}
}
// Make sure our node role doesn't have any extra permissions
if covers, miss := rulevalidation.Covers(kbootstrappolicy.NodeRules(), nodeRole.Rules); !covers {
t.Errorf("openshift node role has extra permissions:")
for _, r := range miss {
t.Logf("\t%s", r.CompactString())
}
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this needed ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

to ensure there were no differences between our node role and the upstream node role as we converge

}
3 changes: 2 additions & 1 deletion pkg/cmd/server/origin/admission/register.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
securityadmission "github.com/openshift/origin/pkg/security/admission"
serviceadmit "github.com/openshift/origin/pkg/service/admission"

"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
storageclassdefaultadmission "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault"

imagepolicyapi "github.com/openshift/origin/pkg/image/admission/imagepolicy/api"
Expand Down Expand Up @@ -83,6 +84,7 @@ var (
serviceadmit.RestrictedEndpointsPluginName,
"LimitRanger",
"ServiceAccount",
noderestriction.PluginName,
"SecurityContextConstraint",
"SCCExecRestrictions",
"PersistentVolumeLabel",
Expand Down Expand Up @@ -111,7 +113,6 @@ var (
// these are new, reassess post-rebase
"Initializers",
"GenericAdmissionWebhook",
"NodeRestriction",
"PodTolerationRestriction",
)
)
Expand Down
26 changes: 23 additions & 3 deletions pkg/cmd/server/origin/master_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,20 +43,25 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
kclientsetexternal "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
kclientsetinternal "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
kinternalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion"
rbacinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion"
rbaclisters "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion"
sacontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
kadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
"k8s.io/kubernetes/pkg/serviceaccount"
noderestriction "k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
saadmit "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
storageclassdefaultadmission "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/node"
rbacauthorizer "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
kbootstrappolicy "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"

"github.com/openshift/origin/pkg/auth/authenticator/request/paramtoken"
authnregistry "github.com/openshift/origin/pkg/auth/oauth/registry"
Expand Down Expand Up @@ -227,6 +232,8 @@ func BuildMasterConfig(options configapi.MasterConfig, informers InformerAccess)
kubeAuthorizer,
kubeSubjectLocator,
informers.GetInternalKubeInformers().Rbac().InternalVersion().ClusterRoles().Lister(),
informers.GetInternalKubeInformers().Core().InternalVersion().Pods(),
informers.GetInternalKubeInformers().Core().InternalVersion().PersistentVolumes(),
options.ProjectConfig.ProjectRequestMessage,
)

Expand Down Expand Up @@ -397,6 +404,7 @@ var (
"PodPreset",
"LimitRanger",
"ServiceAccount",
noderestriction.PluginName,
"SecurityContextConstraint",
storageclassdefaultadmission.PluginName,
"AlwaysPullImages",
Expand All @@ -408,7 +416,6 @@ var (
"DefaultTolerationSeconds",
"Initializers",
"GenericAdmissionWebhook",
"NodeRestriction",
"PodTolerationRestriction",
// NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins.
// DO NOT ADD ANY PLUGINS AFTER THIS LINE!
Expand Down Expand Up @@ -441,6 +448,7 @@ var (
"PodPreset",
"LimitRanger",
"ServiceAccount",
noderestriction.PluginName,
"SecurityContextConstraint",
storageclassdefaultadmission.PluginName,
"AlwaysPullImages",
Expand All @@ -452,7 +460,6 @@ var (
"DefaultTolerationSeconds",
"Initializers",
"GenericAdmissionWebhook",
"NodeRestriction",
"PodTolerationRestriction",
// NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins.
// DO NOT ADD ANY PLUGINS AFTER THIS LINE!
Expand Down Expand Up @@ -779,14 +786,27 @@ func buildKubeAuth(r rbacinformers.Interface) (kauthorizer.Authorizer, rbacregis
return kubeAuthorizer, ruleResolver, kubeSubjectLocator
}

func newAuthorizer(kubeAuthorizer kauthorizer.Authorizer, kubeSubjectLocator rbacauthorizer.SubjectLocator, clusterRoleGetter rbaclisters.ClusterRoleLister, projectRequestDenyMessage string) (kauthorizer.Authorizer, authorizer.SubjectLocator) {
func newAuthorizer(
kubeAuthorizer kauthorizer.Authorizer,
kubeSubjectLocator rbacauthorizer.SubjectLocator,
clusterRoleGetter rbaclisters.ClusterRoleLister,
podInformer coreinformers.PodInformer,
pvInformer coreinformers.PersistentVolumeInformer,
projectRequestDenyMessage string,
) (kauthorizer.Authorizer, authorizer.SubjectLocator) {
messageMaker := authorizer.NewForbiddenMessageResolver(projectRequestDenyMessage)
roleBasedAuthorizer := authorizer.NewAuthorizer(kubeAuthorizer, messageMaker)
subjectLocator := authorizer.NewSubjectLocator(kubeSubjectLocator)

scopeLimitedAuthorizer := scope.NewAuthorizer(roleBasedAuthorizer, clusterRoleGetter, messageMaker)

graph := node.NewGraph()
node.AddGraphEventHandlers(graph, podInformer, pvInformer)
nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), kbootstrappolicy.NodeRules())

authorizer := authorizerunion.New(
authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup), // authorizes system:masters to do anything, just like upstream
nodeAuthorizer,
scopeLimitedAuthorizer)

return authorizer, subjectLocator
Expand Down
Loading