Skip to content

Commit

Permalink
fix: testing fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
ramantehlan committed Jun 27, 2024
1 parent d8ec9de commit 34e6d96
Show file tree
Hide file tree
Showing 27 changed files with 389 additions and 254 deletions.
5 changes: 5 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"githubPullRequests.ignoredPullRequestBranches": [
"main"
]
}
11 changes: 9 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ We will build and publish our resolver changes.
1. Go into resolver directory.
2. Run build and publish command.
```bash
make docker-build-resolver docker-publish-resolver IMG=ramantehlan/elasti-resolver:latest
make docker-buildx-resolver IMG=ramantehlan/elasti-resolver:v1alpha1
```

### Build Operator
Expand All @@ -84,14 +84,21 @@ We will build and publish our Operator changes.
1. Go into operator directory.
2. Run build and publish command.
```bash
make docker-build-resolver docker-publish-resolver IMG=ramantehlan/elasti-resolver:latest
make docker-buildx IMG=ramantehlan/elasti-operator:v1alpha1
```

> Once your changes are published, you can re-deploy in your cluster.
# Configuration
TBA

# Playground

```
make docker-build docker-publish IMG=localhost:5001/elasti-operator:v1alpha1
make docker-build docker-publish IMG=localhost:5001/elasti-resolver:v1alpha1
```

# Icon

The icon is <a href="https://www.flaticon.com/free-icons/full-screen" title="full-screen icons">Full-screen icon created by Uniconlabs - Flaticon</a>.
Expand Down
23 changes: 23 additions & 0 deletions demo-elastiService.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: elasti.truefoundry.com/v1alpha1
kind: ElastiService
metadata:
labels:
app.kubernetes.io/name: elasti-operator
app.kubernetes.io/managed-by: kustomize
name: emotion-class
namespace: raman-ws
spec:
queueTimeout: 4
idlePeriod: 20
service: emotion-class-svc
deploymentName: emotion-class-svc
minTargetReplicas: 1
# scaleTargetRef:
# apiVersion: apps/v1
# kind: Rollouts
# name: emotion-class-svc
scaleTargetRef:
apiVersion: apps/v1
kind: Deployments
name: emotion-class-svc

39 changes: 37 additions & 2 deletions install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,15 @@ metadata:
name: elasti-operator-controller-manager
namespace: elasti-operator-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/name: elasti-resolver
name: elasti-resolver
namespace: elasti
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
Expand Down Expand Up @@ -156,6 +165,16 @@ rules:
- watch
- update
- patch
- apiGroups:
- apps
resources:
- rollouts
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- discovery.k8s.io
resources:
Expand Down Expand Up @@ -297,7 +316,7 @@ roleRef:
name: elasti-operator-leader-election-role
subjects:
- kind: ServiceAccount
name: elasti-operator-controller-manager
name: elasti-resolver
namespace: elasti-operator-system
---
apiVersion: rbac.authorization.k8s.io/v1
Expand All @@ -318,6 +337,22 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/name: elasti-operator
name: elasti-resolver-additional-access-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elasti-operator-additional-access
subjects:
- kind: ServiceAccount
name: elasti-operator-controller-manager
namespace: elasti
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/managed-by: kustomize
Expand Down Expand Up @@ -484,7 +519,7 @@ spec:
spec:
containers:
- name: playground
image: ramantehlan/elasti-resolver:latest
image: ramantehlan/elasti-resolver:v1alpha1
env:
- name: SYSTEM_NAMESPACE
value: elasti
Expand Down
5 changes: 3 additions & 2 deletions operator/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,14 @@ docker-push: ## Push docker image with the manager.
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
PLATFORMS ?= linux/arm64,linux/amd64 #,linux/s390x,linux/ppc64le
.PHONY: docker-buildx
docker-buildx: ## Build and push docker image for the manager for cross-platform support
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name project-v3-builder
$(CONTAINER_TOOL) buildx use project-v3-builder
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross ../
- $(CONTAINER_TOOL) buildx rm project-v3-builder
rm Dockerfile.cross

Expand Down Expand Up @@ -141,6 +141,7 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in

.PHONY: undeploy
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
# $(KUBECTL) delete --ignore-not-found=true -f ../playground/config/watch-crd.yaml
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -

##@ Dependencies
Expand Down
2 changes: 1 addition & 1 deletion operator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ make docker-build docker-push IMG=<some-registry>/elasti-operator:tag

**NOTE:** This image ought to be published in the personal registry you specified.
And it is required to have access to pull the image from the working environment.
Make sure you have the proper permission to the registry if the above commands dont work.
Make sure you have the proper permission to the registry if the above commands don't work.

**Install the CRDs into the cluster:**

Expand Down
12 changes: 6 additions & 6 deletions operator/api/v1alpha1/elastiservice_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ const (
type ElastiServiceSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
Service string `json:"service,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
QTimout int32 `json:"queueTimeout,omitempty"`
IdlePeriod int32 `json:"idlePeriod,omitempty"`
ServeReplicasCount int32 `json:"minReplicas,omitempty"`
ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
Service string `json:"service,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
QTimout int32 `json:"queueTimeout,omitempty"`
IdlePeriod int32 `json:"idlePeriod,omitempty"`
MinTargetReplicas int32 `json:"minTargetReplicas,omitempty"`
}

type ScaleTargetRef struct {
Expand Down
16 changes: 16 additions & 0 deletions operator/api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ spec:
idlePeriod:
format: int32
type: integer
minReplicas:
minTargetReplicas:
format: int32
type: integer
queueTimeout:
Expand Down
4 changes: 2 additions & 2 deletions operator/config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: localhost:5001/elasti-operator
newTag: v1alpha1
newName: controller
newTag: latest
2 changes: 1 addition & 1 deletion operator/config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ spec:
- --leader-elect
- --health-probe-bind-address=:8081
- --metrics-bind-address=0
image: controller:latest
image: localhost:5001/elasti-operator:v1alpha1
name: manager
ports:
- containerPort: 8013
Expand Down
57 changes: 36 additions & 21 deletions operator/internal/controller/elastiservice_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,21 +24,19 @@ import (
)

type (
RunReconcileFunc func(ctx context.Context, req ctrl.Request, mode string) (res ctrl.Result, err error)
SwitchModeFunc func(ctx context.Context, req ctrl.Request, mode string) (res ctrl.Result, err error)
ElastiServiceReconciler struct {
client.Client
Scheme *kRuntime.Scheme
Logger *zap.Logger
Informer *informer.Manager
RunReconcileLocks sync.Map
WatcherStartLock sync.Map
Scheme *kRuntime.Scheme
Logger *zap.Logger
Informer *informer.Manager
SwitchModeLocks sync.Map
InformerStartLocks sync.Map
ReconcileLocks sync.Map
}
)

const (
ServeMode = "serve"
ProxyMode = "proxy"
NullMode = ""

// These are resolver details, ideally in future we can move this to a configmap, or find a better way to serve this
resolverNamespace = "elasti"
Expand Down Expand Up @@ -69,6 +67,13 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
r.Logger.Error("Panic stack trace", zap.ByteString("stacktrace", buf[:n]))
}
}()

r.Logger.Debug("- In Reconcile", zap.String("es", req.NamespacedName.String()))
mutex := r.getMutexForReconcile(req.NamespacedName.String())
mutex.Lock()
defer r.Logger.Debug("- Out of Reconcile", zap.String("es", req.NamespacedName.String()))
defer mutex.Unlock()

// First we get the ElastiService object
// No mutex is taken for this, as we are not modifying the object, but if we face issues in future, we can add a mutex
es, esErr := r.getCRD(ctx, req.NamespacedName)
Expand All @@ -80,12 +85,6 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
r.Logger.Error("Failed to get ElastiService in Reconcile", zap.String("es", req.String()), zap.Error(esErr))
return res, esErr
}
// We add the CRD details to service directory, so when elasti server received a request,
// we can find the right resource to scale up
crdDirectory.CRDDirectory.AddCRD(es.Spec.Service, &crdDirectory.CRDDetails{
CRDName: es.Name,
DeploymentName: es.Spec.DeploymentName,
})

// If the ElastiService is being deleted, we need to clean up the resources
if !es.ObjectMeta.DeletionTimestamp.IsZero() {
Expand All @@ -103,25 +102,30 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return res, nil
}

// We check if the CRD is being deleted, and if it is, we clean up the resources
// We also check if the CRD has finalizer, and if not, we add the finalizer
if err := r.checkFinalizerCRD(ctx, es, req); err != nil {
r.Logger.Error("Failed to finalize CRD", zap.String("es", req.String()), zap.Error(err))
return res, err
}

// We add the CRD details to service directory, so when elasti server received a request,
// we can find the right resource to scale up
crdDirectory.CRDDirectory.AddCRD(es.Spec.Service, &crdDirectory.CRDDetails{
CRDName: es.Name,
Spec: es.Spec,
})

// We need to start the informer only once per CRD. This is to avoid multiple informers for the same CRD
// We reset mutex if crd is deleted, so it can be used again if the same CRD is reapplied
r.getMutexForInformerStart(req.NamespacedName.String()).Do(func() {
// Watch for changes in target deployment
//go r.Informer.AddDeploymentWatch(req, es.Spec.DeploymentName, req.Namespace, r.getTargetDeploymentChangeHandler(ctx, es, req))
// Watch for changes in ScaleTargetRef
targetGroup, targetVersion, err := utils.ParseAPIVersion(es.Spec.ScaleTargetRef.APIVersion)
if err != nil {
r.Logger.Error("Failed to parse API version", zap.String("APIVersion", es.Spec.ScaleTargetRef.APIVersion), zap.Error(err))
return
}
go r.Informer.Add(&informer.RequestWatch{

// Watch for changes in ScaleTargetRef
r.Informer.Add(&informer.RequestWatch{
Req: req,
ResourceName: es.Spec.ScaleTargetRef.Name,
ResourceNamespace: req.Namespace,
Expand All @@ -132,8 +136,9 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
},
Handlers: r.getScaleTargetRefChangeHandler(ctx, es, req),
})

// Watch for changes in public service
go r.Informer.Add(&informer.RequestWatch{
r.Informer.Add(&informer.RequestWatch{
Req: req,
ResourceName: es.Spec.Service,
ResourceNamespace: es.Namespace,
Expand All @@ -144,6 +149,11 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
},
Handlers: r.getPublicServiceChangeHandler(ctx, es, req),
})

r.Logger.Info("ScaleTargetRef and Public Service added to informer", zap.String("es", req.String()),
zap.String("scaleTargetRef", es.Spec.ScaleTargetRef.Name),
zap.String("public service", es.Spec.Service),
)
})

return res, nil
Expand All @@ -154,3 +164,8 @@ func (r *ElastiServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {
For(&v1alpha1.ElastiService{}).
Complete(r)
}

func (r *ElastiServiceReconciler) getMutexForReconcile(key string) *sync.Mutex {
l, _ := r.ReconcileLocks.LoadOrStore(key, &sync.Mutex{})
return l.(*sync.Mutex)
}
Loading

0 comments on commit 34e6d96

Please sign in to comment.