Skip to content

Commit

Permalink
Merge pull request openshift#1425 from dgrisonnet/revert-1330
Browse files Browse the repository at this point in the history
Bug 1995924: Revert "Merge pull request openshift#1330 from dgrisonnet/ha-upgradeable"
  • Loading branch information
openshift-merge-robot committed Oct 12, 2021
2 parents a62b8fe + a702102 commit f766545
Show file tree
Hide file tree
Showing 629 changed files with 92 additions and 201,515 deletions.
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ require (
k8s.io/client-go v12.0.0+incompatible
k8s.io/klog/v2 v2.10.0
k8s.io/kube-aggregator v0.21.1
k8s.io/kubectl v0.22.1
k8s.io/metrics v0.22.1
)

Expand Down
51 changes: 0 additions & 51 deletions go.sum

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions hack/build-jsonnet.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ mv "${prefix}/cluster-monitoring-operator/cluster-role.yaml" \
mv "${prefix}/cluster-monitoring-operator/namespaced-cluster-role.yaml" \
"${prefix}/manifests/0000_50_cluster-monitoring-operator_02-namespaced-cluster-role.yaml"


# Move resulting manifests to the manifests directory
mv assets/manifests/* manifests/
rmdir assets/manifests || true
34 changes: 0 additions & 34 deletions jsonnet/components/cluster-monitoring-operator.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -197,21 +197,6 @@ function(params) {
resources: ['certificatesigningrequests/approval', 'certificatesigningrequests/status'],
verbs: ['get', 'list', 'watch'],
},
// The operator needs these permissions to cordon nodes when rebalancing
// pods.
{
apiGroups: [''],
resources: ['nodes'],
verbs: ['get', 'list', 'update', 'patch'],
},
// The operator needs to get PersistentVolumes to know their storage
// topology. Based on that information, it will only delete PVCs attached
// to volumes with a zonal topology when rebalancing pods.
{
apiGroups: [''],
resources: ['persistentvolumes'],
verbs: ['get'],
},
],
},

Expand Down Expand Up @@ -252,25 +237,6 @@ function(params) {
resources: ['events'],
verbs: ['create', 'patch', 'update'],
},
// The operator needs to be able to list pods related to a particular
// workload and delete them so that they can be rescheduled on a
// different node.
{
apiGroups: [''],
resources: ['pods'],
verbs: ['list', 'delete'],
},
// The operators needs to be able to delete PVCs to rescheduled pods on
// different nodes because zonal persistent volumes can cause scheduling
// issues if not deleted beforehand.
// It also need to watch and update PVC since users are able to mark
// their PVC for deletion and the operator needs to react upon that.
{
apiGroups: [''],
resources: ['persistentvolumeclaims'],
verbs: ['get', 'list', 'watch', 'update', 'delete'],
},

],
},

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,20 +60,3 @@ rules:
- create
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- delete
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- delete
15 changes: 0 additions & 15 deletions manifests/0000_50_cluster-monitoring-operator_02-role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -114,21 +114,6 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- update
- patch
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- apiGroups:
- authentication.k8s.io
resources:
Expand Down
82 changes: 10 additions & 72 deletions pkg/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ type Client struct {
aggclient aggregatorclient.Interface
}

func NewForConfig(cfg *rest.Config, version string, namespace, userWorkloadNamespace string) (*Client, error) {
func New(ctx context.Context, cfg *rest.Config, version string, namespace, userWorkloadNamespace string) (*Client, error) {
mclient, err := monitoring.NewForConfig(cfg)
if err != nil {
return nil, err
Expand Down Expand Up @@ -117,76 +117,18 @@ func NewForConfig(cfg *rest.Config, version string, namespace, userWorkloadNames
return nil, errors.Wrap(err, "creating kubernetes aggregator")
}

return New(
version,
namespace,
userWorkloadNamespace,
KubernetesClient(kclient),
OpenshiftConfigClient(oscclient),
OpenshiftSecurityClient(ossclient),
OpenshiftRouteClient(osrclient),
MonitoringClient(mclient),
ApiExtensionsClient(eclient),
AggregatorClient(aggclient),
), nil
}

type Option = func(*Client)

func KubernetesClient(kclient kubernetes.Interface) Option {
return func(c *Client) {
c.kclient = kclient
}
}

func OpenshiftConfigClient(oscclient openshiftconfigclientset.Interface) Option {
return func(c *Client) {
c.oscclient = oscclient
}
}

func OpenshiftSecurityClient(ossclient openshiftsecurityclientset.Interface) Option {
return func(c *Client) {
c.ossclient = ossclient
}
}

func OpenshiftRouteClient(osrclient openshiftrouteclientset.Interface) Option {
return func(c *Client) {
c.osrclient = osrclient
}
}

func MonitoringClient(mclient monitoring.Interface) Option {
return func(c *Client) {
c.mclient = mclient
}
}

func ApiExtensionsClient(eclient apiextensionsclient.Interface) Option {
return func(c *Client) {
c.eclient = eclient
}
}

func AggregatorClient(aggclient aggregatorclient.Interface) Option {
return func(c *Client) {
c.aggclient = aggclient
}
}

func New(version string, namespace, userWorkloadNamespace string, options ...Option) *Client {
c := &Client{
return &Client{
version: version,
namespace: namespace,
userWorkloadNamespace: userWorkloadNamespace,
}

for _, opt := range options {
opt(c)
}

return c
kclient: kclient,
oscclient: oscclient,
ossclient: ossclient,
osrclient: osrclient,
mclient: mclient,
eclient: eclient,
aggclient: aggclient,
}, nil
}

func (c *Client) KubernetesInterface() kubernetes.Interface {
Expand All @@ -209,10 +151,6 @@ func (c *Client) SecretListWatchForNamespace(ns string) *cache.ListWatch {
return cache.NewListWatchFromClient(c.kclient.CoreV1().RESTClient(), "secrets", ns, fields.Everything())
}

func (c *Client) PersistentVolumeClaimListWatchForNamespace(ns string) *cache.ListWatch {
return cache.NewListWatchFromClient(c.kclient.CoreV1().RESTClient(), "persistentvolumeclaims", ns, fields.Everything())
}

func (c *Client) InfrastructureListWatchForResource(ctx context.Context, resource string) *cache.ListWatch {
infrastructure := c.oscclient.ConfigV1().Infrastructures()

Expand Down
121 changes: 54 additions & 67 deletions pkg/client/status_reporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,64 +67,24 @@ func (r *StatusReporter) relatedObjects() []v1.ObjectReference {
}
}

func (r *StatusReporter) Get(ctx context.Context) (*v1.ClusterOperator, error) {
return r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
}

func (r *StatusReporter) Create(ctx context.Context, co *v1.ClusterOperator) (*v1.ClusterOperator, error) {
return r.client.Create(ctx, co, metav1.CreateOptions{})
}

func (r *StatusReporter) getOrCreateClusterOperator(ctx context.Context) (*v1.ClusterOperator, error) {
co, err := r.Get(ctx)
func (r *StatusReporter) SetDone(ctx context.Context, degradedConditionMessage string, degradedConditionReason string) error {
co, err := r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
co = r.newClusterOperator()
co, err = r.Create(ctx, co)
}
if err != nil {
return nil, err
}
return co, nil
}

func (r *StatusReporter) newClusterOperator() *v1.ClusterOperator {
time := metav1.Now()
co := &v1.ClusterOperator{
TypeMeta: metav1.TypeMeta{
APIVersion: "config.openshift.io/v1",
Kind: "ClusterOperator",
},
ObjectMeta: metav1.ObjectMeta{
Name: r.clusterOperatorName,
},
Spec: v1.ClusterOperatorSpec{},
Status: v1.ClusterOperatorStatus{},
co, err = r.client.Create(ctx, co, metav1.CreateOptions{})
}
co.Status.RelatedObjects = r.relatedObjects()
co.Status.Conditions = newConditions(co.Status, r.version, time).entries()

return co
}

func (r *StatusReporter) setConditions(ctx context.Context, co *v1.ClusterOperator, conditions *conditions) error {
co.Status.Conditions = conditions.entries()
co.Status.RelatedObjects = r.relatedObjects()

_, err := r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
return err
}

func (r *StatusReporter) SetRollOutDone(ctx context.Context, degradedConditionMessage string, degradedConditionReason string) error {
co, err := r.getOrCreateClusterOperator(ctx)
if err != nil {
if err != nil && !apierrors.IsNotFound(err) {
return err
}

time := metav1.Now()

conditions := newConditions(co.Status, r.version, time)
conditions.setCondition(v1.OperatorAvailable, v1.ConditionTrue, "Successfully rolled out the stack.", "RollOutDone", time)
conditions.setCondition(v1.OperatorProgressing, v1.ConditionFalse, "", "", time)
conditions.setCondition(v1.OperatorDegraded, v1.ConditionFalse, degradedConditionMessage, degradedConditionReason, time)
conditions.setCondition(v1.OperatorUpgradeable, v1.ConditionTrue, "", asExpectedReason, time)
co.Status.Conditions = conditions.entries()

// If we have reached "level" for the operator, report that we are at the version
// injected into us during update. We require that all components be rolled out
Expand All @@ -140,32 +100,50 @@ func (r *StatusReporter) SetRollOutDone(ctx context.Context, degradedConditionMe
co.Status.Versions = nil
}

return r.setConditions(ctx, co, conditions)
_, err = r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
return err
}

// SetRollOutInProgress sets the OperatorProgressing condition to true, either:
// SetInProgress sets the OperatorProgressing condition to true, either:
// 1. If there has been no previous status yet
// 2. If the previous ClusterOperator OperatorAvailable condition was false
//
// This will ensure that the progressing state will be only set initially or in case of failure.
// Once controller operator versions are available, an additional check will be introduced that toggles
// the OperatorProgressing state in case of version upgrades.
func (r *StatusReporter) SetRollOutInProgress(ctx context.Context) error {
co, err := r.getOrCreateClusterOperator(ctx)
if err != nil {
func (r *StatusReporter) SetInProgress(ctx context.Context) error {
co, err := r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
co = r.newClusterOperator()
co, err = r.client.Create(ctx, co, metav1.CreateOptions{})
}
if err != nil && !apierrors.IsNotFound(err) {
return err
}

time := metav1.Now()
conditions := newConditions(co.Status, r.version, metav1.Now())
conditions.setCondition(v1.OperatorProgressing, v1.ConditionTrue, "Rolling out the stack.", "RollOutInProgress", time)
reasonInProgress := "RollOutInProgress"
conditions := newConditions(co.Status, r.version, time)
conditions.setCondition(v1.OperatorProgressing, v1.ConditionTrue, "Rolling out the stack.", reasonInProgress, time)
conditions.setCondition(v1.OperatorUpgradeable, v1.ConditionTrue, "", asExpectedReason, time)
co.Status.Conditions = conditions.entries()
co.Status.RelatedObjects = r.relatedObjects()

_, err = r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
return err
}

return r.setConditions(ctx, co, conditions)
func (r *StatusReporter) Get(ctx context.Context) (*v1.ClusterOperator, error) {
return r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
}

func (r *StatusReporter) SetFailed(ctx context.Context, statusErr error, reason string) error {
co, err := r.getOrCreateClusterOperator(ctx)
if err != nil {
co, err := r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
co = r.newClusterOperator()
co, err = r.client.Create(ctx, co, metav1.CreateOptions{})
}
if err != nil && !apierrors.IsNotFound(err) {
return err
}

Expand All @@ -177,19 +155,28 @@ func (r *StatusReporter) SetFailed(ctx context.Context, statusErr error, reason
conditions.setCondition(v1.OperatorAvailable, v1.ConditionFalse, unavailableMessage, reason, time)
conditions.setCondition(v1.OperatorProgressing, v1.ConditionFalse, unavailableMessage, reason, time)
conditions.setCondition(v1.OperatorDegraded, v1.ConditionTrue, fmt.Sprintf("Failed to rollout the stack. Error: %v", statusErr), reason, time)
conditions.setCondition(v1.OperatorUpgradeable, v1.ConditionTrue, "", asExpectedReason, time)
co.Status.Conditions = conditions.entries()

return r.setConditions(ctx, co, conditions)
_, err = r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
return err
}

func (r *StatusReporter) SetUpgradeable(ctx context.Context, cond v1.ConditionStatus, message, reason string) error {
co, err := r.getOrCreateClusterOperator(ctx)
if err != nil {
return err
}

func (r *StatusReporter) newClusterOperator() *v1.ClusterOperator {
time := metav1.Now()
conditions := newConditions(co.Status, r.version, metav1.Now())
conditions.setCondition(v1.OperatorUpgradeable, cond, message, reason, time)
co := &v1.ClusterOperator{
TypeMeta: metav1.TypeMeta{
APIVersion: "config.openshift.io/v1",
Kind: "ClusterOperator",
},
ObjectMeta: metav1.ObjectMeta{
Name: r.clusterOperatorName,
},
Spec: v1.ClusterOperatorSpec{},
Status: v1.ClusterOperatorStatus{},
}
co.Status.RelatedObjects = r.relatedObjects()
co.Status.Conditions = newConditions(co.Status, r.version, time).entries()

return r.setConditions(ctx, co, conditions)
return co
}
Loading

0 comments on commit f766545

Please sign in to comment.