diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000..b242572e
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,5 @@
+{
+ "githubPullRequests.ignoredPullRequestBranches": [
+ "main"
+ ]
+}
\ No newline at end of file
diff --git a/README.md b/README.md
index 7e26346c..a6844a57 100644
--- a/README.md
+++ b/README.md
@@ -74,7 +74,7 @@ We will build and publish our resolver changes.
1. Go into resolver directory.
2. Run build and publish command.
```bash
-make docker-build-resolver docker-publish-resolver IMG=ramantehlan/elasti-resolver:latest
+make docker-buildx-resolver IMG=ramantehlan/elasti-resolver:v1alpha1
```
### Build Operator
@@ -84,7 +84,7 @@ We will build and publish our Operator changes.
1. Go into operator directory.
2. Run build and publish command.
```bash
-make docker-build-resolver docker-publish-resolver IMG=ramantehlan/elasti-resolver:latest
+make docker-buildx IMG=ramantehlan/elasti-operator:v1alpha1
```
> Once your changes are published, you can re-deploy in your cluster.
@@ -92,6 +92,13 @@ make docker-build-resolver docker-publish-resolver IMG=ramantehlan/elasti-resolv
# Configuration
TBA
+# Playground
+
+```
+ make docker-build docker-publish IMG=localhost:5001/elasti-operator:v1alpha1
+ make docker-build docker-publish IMG=localhost:5001/elasti-resolver:v1alpha1
+```
+
# Icon
The icon is Full-screen icon created by Uniconlabs - Flaticon.
diff --git a/demo-elastiService.yaml b/demo-elastiService.yaml
new file mode 100644
index 00000000..9e888d82
--- /dev/null
+++ b/demo-elastiService.yaml
@@ -0,0 +1,23 @@
+apiVersion: elasti.truefoundry.com/v1alpha1
+kind: ElastiService
+metadata:
+ labels:
+ app.kubernetes.io/name: elasti-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: emotion-class
+ namespace: raman-ws
+spec:
+ queueTimeout: 4
+ idlePeriod: 20
+ service: emotion-class-svc
+ deploymentName: emotion-class-svc
+ minTargetReplicas: 1
+ # scaleTargetRef:
+ # apiVersion: apps/v1
+ # kind: Rollouts
+ # name: emotion-class-svc
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployments
+ name: emotion-class-svc
+
diff --git a/install.yaml b/install.yaml
index f9d3cb4b..c2937169 100644
--- a/install.yaml
+++ b/install.yaml
@@ -100,6 +100,15 @@ metadata:
name: elasti-operator-controller-manager
namespace: elasti-operator-system
---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: elasti-resolver
+ name: elasti-resolver
+ namespace: elasti
+---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@@ -156,6 +165,16 @@ rules:
- watch
- update
- patch
+- apiGroups:
+ - apps
+ resources:
+ - rollouts
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
- apiGroups:
- discovery.k8s.io
resources:
@@ -297,7 +316,7 @@ roleRef:
name: elasti-operator-leader-election-role
subjects:
- kind: ServiceAccount
- name: elasti-operator-controller-manager
+ name: elasti-resolver
namespace: elasti-operator-system
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -318,6 +337,22 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: elasti-operator
+ name: elasti-resolver-additional-access-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: elasti-operator-additional-access
+subjects:
+- kind: ServiceAccount
+ name: elasti-operator-controller-manager
+ namespace: elasti
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/managed-by: kustomize
@@ -484,7 +519,7 @@ spec:
spec:
containers:
- name: playground
- image: ramantehlan/elasti-resolver:latest
+ image: ramantehlan/elasti-resolver:v1alpha1
env:
- name: SYSTEM_NAMESPACE
value: elasti
diff --git a/operator/Makefile b/operator/Makefile
index ffdabc5a..dcfd9222 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -103,14 +103,14 @@ docker-push: ## Push docker image with the manager.
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail)
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
-PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
+PLATFORMS ?= linux/arm64,linux/amd64 #,linux/s390x,linux/ppc64le
.PHONY: docker-buildx
docker-buildx: ## Build and push docker image for the manager for cross-platform support
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name project-v3-builder
$(CONTAINER_TOOL) buildx use project-v3-builder
- - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
+ - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross ../
- $(CONTAINER_TOOL) buildx rm project-v3-builder
rm Dockerfile.cross
@@ -141,6 +141,7 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in
.PHONY: undeploy
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+ # $(KUBECTL) delete --ignore-not-found=true -f ../playground/config/watch-crd.yaml
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
##@ Dependencies
diff --git a/operator/README.md b/operator/README.md
index 849dc8dc..a20c39f5 100644
--- a/operator/README.md
+++ b/operator/README.md
@@ -21,7 +21,7 @@ make docker-build docker-push IMG=/elasti-operator:tag
**NOTE:** This image ought to be published in the personal registry you specified.
And it is required to have access to pull the image from the working environment.
-Make sure you have the proper permission to the registry if the above commands don’t work.
+Make sure you have the proper permission to the registry if the above commands don't work.
**Install the CRDs into the cluster:**
diff --git a/operator/api/v1alpha1/elastiservice_types.go b/operator/api/v1alpha1/elastiservice_types.go
index 96be0f52..437a1b08 100644
--- a/operator/api/v1alpha1/elastiservice_types.go
+++ b/operator/api/v1alpha1/elastiservice_types.go
@@ -31,12 +31,12 @@ const (
type ElastiServiceSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
- ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
- Service string `json:"service,omitempty"`
- DeploymentName string `json:"deploymentName,omitempty"`
- QTimout int32 `json:"queueTimeout,omitempty"`
- IdlePeriod int32 `json:"idlePeriod,omitempty"`
- ServeReplicasCount int32 `json:"minReplicas,omitempty"`
+ ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
+ Service string `json:"service,omitempty"`
+ DeploymentName string `json:"deploymentName,omitempty"`
+ QTimout int32 `json:"queueTimeout,omitempty"`
+ IdlePeriod int32 `json:"idlePeriod,omitempty"`
+ MinTargetReplicas int32 `json:"minTargetReplicas,omitempty"`
}
type ScaleTargetRef struct {
diff --git a/operator/api/v1alpha1/zz_generated.deepcopy.go b/operator/api/v1alpha1/zz_generated.deepcopy.go
index 0a466bb4..fa50636b 100644
--- a/operator/api/v1alpha1/zz_generated.deepcopy.go
+++ b/operator/api/v1alpha1/zz_generated.deepcopy.go
@@ -86,6 +86,7 @@ func (in *ElastiServiceList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ElastiServiceSpec) DeepCopyInto(out *ElastiServiceSpec) {
*out = *in
+ out.ScaleTargetRef = in.ScaleTargetRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElastiServiceSpec.
@@ -113,3 +114,18 @@ func (in *ElastiServiceStatus) DeepCopy() *ElastiServiceStatus {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleTargetRef) DeepCopyInto(out *ScaleTargetRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleTargetRef.
+func (in *ScaleTargetRef) DeepCopy() *ScaleTargetRef {
+ if in == nil {
+ return nil
+ }
+ out := new(ScaleTargetRef)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/operator/config/crd/bases/elasti.truefoundry.com_elastiservices.yaml b/operator/config/crd/bases/elasti.truefoundry.com_elastiservices.yaml
index 5921a037..c48a8968 100644
--- a/operator/config/crd/bases/elasti.truefoundry.com_elastiservices.yaml
+++ b/operator/config/crd/bases/elasti.truefoundry.com_elastiservices.yaml
@@ -44,7 +44,7 @@ spec:
idlePeriod:
format: int32
type: integer
- minReplicas:
+ minTargetReplicas:
format: int32
type: integer
queueTimeout:
diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml
index 471c7031..b4b702ee 100644
--- a/operator/config/manager/kustomization.yaml
+++ b/operator/config/manager/kustomization.yaml
@@ -5,5 +5,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
- newName: localhost:5001/elasti-operator
- newTag: v1alpha1
+ newName: controller
+ newTag: latest
diff --git a/operator/config/manager/manager.yaml b/operator/config/manager/manager.yaml
index 4d6764da..97f52b32 100644
--- a/operator/config/manager/manager.yaml
+++ b/operator/config/manager/manager.yaml
@@ -64,7 +64,7 @@ spec:
- --leader-elect
- --health-probe-bind-address=:8081
- --metrics-bind-address=0
- image: controller:latest
+ image: localhost:5001/elasti-operator:v1alpha1
name: manager
ports:
- containerPort: 8013
diff --git a/operator/internal/controller/elastiservice_controller.go b/operator/internal/controller/elastiservice_controller.go
index da8614d6..5c2331c2 100644
--- a/operator/internal/controller/elastiservice_controller.go
+++ b/operator/internal/controller/elastiservice_controller.go
@@ -24,21 +24,19 @@ import (
)
type (
- RunReconcileFunc func(ctx context.Context, req ctrl.Request, mode string) (res ctrl.Result, err error)
+ SwitchModeFunc func(ctx context.Context, req ctrl.Request, mode string) (res ctrl.Result, err error)
ElastiServiceReconciler struct {
client.Client
- Scheme *kRuntime.Scheme
- Logger *zap.Logger
- Informer *informer.Manager
- RunReconcileLocks sync.Map
- WatcherStartLock sync.Map
+ Scheme *kRuntime.Scheme
+ Logger *zap.Logger
+ Informer *informer.Manager
+ SwitchModeLocks sync.Map
+ InformerStartLocks sync.Map
+ ReconcileLocks sync.Map
}
)
const (
- ServeMode = "serve"
- ProxyMode = "proxy"
- NullMode = ""
// These are resolver details, ideally in future we can move this to a configmap, or find a better way to serve this
resolverNamespace = "elasti"
@@ -69,6 +67,13 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
r.Logger.Error("Panic stack trace", zap.ByteString("stacktrace", buf[:n]))
}
}()
+
+ r.Logger.Debug("- In Reconcile", zap.String("es", req.NamespacedName.String()))
+ mutex := r.getMutexForReconcile(req.NamespacedName.String())
+ mutex.Lock()
+ defer r.Logger.Debug("- Out of Reconcile", zap.String("es", req.NamespacedName.String()))
+ defer mutex.Unlock()
+
// First we get the ElastiService object
// No mutex is taken for this, as we are not modifying the object, but if we face issues in future, we can add a mutex
es, esErr := r.getCRD(ctx, req.NamespacedName)
@@ -80,12 +85,6 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
r.Logger.Error("Failed to get ElastiService in Reconcile", zap.String("es", req.String()), zap.Error(esErr))
return res, esErr
}
- // We add the CRD details to service directory, so when elasti server received a request,
- // we can find the right resource to scale up
- crdDirectory.CRDDirectory.AddCRD(es.Spec.Service, &crdDirectory.CRDDetails{
- CRDName: es.Name,
- DeploymentName: es.Spec.DeploymentName,
- })
// If the ElastiService is being deleted, we need to clean up the resources
if !es.ObjectMeta.DeletionTimestamp.IsZero() {
@@ -103,25 +102,30 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return res, nil
}
- // We check if the CRD is being deleted, and if it is, we clean up the resources
// We also check if the CRD has finalizer, and if not, we add the finalizer
if err := r.checkFinalizerCRD(ctx, es, req); err != nil {
r.Logger.Error("Failed to finalize CRD", zap.String("es", req.String()), zap.Error(err))
return res, err
}
+ // We add the CRD details to service directory, so when elasti server received a request,
+ // we can find the right resource to scale up
+ crdDirectory.CRDDirectory.AddCRD(es.Spec.Service, &crdDirectory.CRDDetails{
+ CRDName: es.Name,
+ Spec: es.Spec,
+ })
+
// We need to start the informer only once per CRD. This is to avoid multiple informers for the same CRD
// We reset mutex if crd is deleted, so it can be used again if the same CRD is reapplied
r.getMutexForInformerStart(req.NamespacedName.String()).Do(func() {
- // Watch for changes in target deployment
- //go r.Informer.AddDeploymentWatch(req, es.Spec.DeploymentName, req.Namespace, r.getTargetDeploymentChangeHandler(ctx, es, req))
- // Watch for changes in ScaleTargetRef
targetGroup, targetVersion, err := utils.ParseAPIVersion(es.Spec.ScaleTargetRef.APIVersion)
if err != nil {
r.Logger.Error("Failed to parse API version", zap.String("APIVersion", es.Spec.ScaleTargetRef.APIVersion), zap.Error(err))
return
}
- go r.Informer.Add(&informer.RequestWatch{
+
+ // Watch for changes in ScaleTargetRef
+ r.Informer.Add(&informer.RequestWatch{
Req: req,
ResourceName: es.Spec.ScaleTargetRef.Name,
ResourceNamespace: req.Namespace,
@@ -132,8 +136,9 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
},
Handlers: r.getScaleTargetRefChangeHandler(ctx, es, req),
})
+
// Watch for changes in public service
- go r.Informer.Add(&informer.RequestWatch{
+ r.Informer.Add(&informer.RequestWatch{
Req: req,
ResourceName: es.Spec.Service,
ResourceNamespace: es.Namespace,
@@ -144,6 +149,11 @@ func (r *ElastiServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques
},
Handlers: r.getPublicServiceChangeHandler(ctx, es, req),
})
+
+ r.Logger.Info("ScaleTargetRef and Public Service added to informer", zap.String("es", req.String()),
+ zap.String("scaleTargetRef", es.Spec.ScaleTargetRef.Name),
+ zap.String("public service", es.Spec.Service),
+ )
})
return res, nil
@@ -154,3 +164,8 @@ func (r *ElastiServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {
For(&v1alpha1.ElastiService{}).
Complete(r)
}
+
+func (r *ElastiServiceReconciler) getMutexForReconcile(key string) *sync.Mutex {
+ l, _ := r.ReconcileLocks.LoadOrStore(key, &sync.Mutex{})
+ return l.(*sync.Mutex)
+}
diff --git a/operator/internal/controller/opsDeployment.go b/operator/internal/controller/opsDeployment.go
index f9d34e26..3d61beb4 100644
--- a/operator/internal/controller/opsDeployment.go
+++ b/operator/internal/controller/opsDeployment.go
@@ -3,30 +3,63 @@ package controller
import (
"context"
+ "github.com/truefoundry/elasti/pkg/k8sHelper"
+ "github.com/truefoundry/elasti/pkg/values"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
- "k8s.io/apimachinery/pkg/api/errors"
+ v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "truefoundry.io/elasti/api/v1alpha1"
)
-func (r *ElastiServiceReconciler) getModeFromDeployment(ctx context.Context, deploymentNamespacedName types.NamespacedName) (string, error) {
- depl := &appsv1.Deployment{}
- if err := r.Get(ctx, deploymentNamespacedName, depl); err != nil {
- if errors.IsNotFound(err) {
- r.Logger.Info("Deployment not found", zap.Any("deployment", deploymentNamespacedName))
- return "", nil
- }
- r.Logger.Error("Failed to get deployment", zap.Any("deployment", deploymentNamespacedName), zap.Error(err))
- return "", err
+func (r *ElastiServiceReconciler) handleTargetDeploymentChanges(ctx context.Context, obj interface{}, es *v1alpha1.ElastiService, req ctrl.Request) {
+ targetDeployment := &appsv1.Deployment{}
+ err := k8sHelper.UnstructuredToResource(obj, targetDeployment)
+ if err != nil {
+ r.Logger.Error("Failed to convert unstructured to deployment", zap.Error(err))
+ return
}
- mode := ServeMode
- condition := depl.Status.Conditions
- if depl.Status.Replicas == 0 {
- mode = ProxyMode
- } else if depl.Status.Replicas > 0 && condition[1].Status == "True" {
- mode = ServeMode
+ condition := targetDeployment.Status.Conditions
+ if targetDeployment.Status.Replicas == 0 {
+ r.Logger.Debug("Deployment has 0 replicas", zap.String("deployment_name", es.Spec.DeploymentName), zap.String("es", req.String()))
+ _, err := r.switchMode(ctx, req, values.ProxyMode)
+ if err != nil {
+ r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
+ return
+ }
+ } else if targetDeployment.Status.Replicas > 0 && condition[1].Status == values.DeploymentConditionStatusTrue {
+ r.Logger.Debug("Deployment has replicas", zap.String("deployment_name", es.Spec.DeploymentName), zap.String("es", req.String()))
+ _, err := r.switchMode(ctx, req, values.ServeMode)
+ if err != nil {
+ r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
+ return
+ }
}
+ r.Logger.Info("Deployment changes handled", zap.String("deployment_name", es.Spec.DeploymentName), zap.String("es", req.String()))
+}
- r.Logger.Debug("Got mode from deployment", zap.Any("deployment", deploymentNamespacedName), zap.String("mode", mode))
- return mode, nil
+func (r *ElastiServiceReconciler) handleResolverChanges(ctx context.Context, obj interface{}, serviceName, namespace string) {
+ resolverDeployment := &appsv1.Deployment{}
+ err := k8sHelper.UnstructuredToResource(obj, resolverDeployment)
+ if err != nil {
+ r.Logger.Error("Failed to convert unstructured to deployment", zap.Error(err))
+ return
+ }
+ if resolverDeployment.Name == resolverDeploymentName {
+ targetNamespacedName := types.NamespacedName{
+ Name: serviceName,
+ Namespace: namespace,
+ }
+ targetSVC := &v1.Service{}
+ if err := r.Get(ctx, targetNamespacedName, targetSVC); err != nil {
+ r.Logger.Error("Failed to get service to update endpointslice", zap.String("service", targetNamespacedName.String()), zap.Error(err))
+ return
+ }
+ if err := r.createOrUpdateEndpointsliceToResolver(ctx, targetSVC); err != nil {
+ r.Logger.Error("Failed to create or update endpointslice to resolver", zap.String("service", targetNamespacedName.String()), zap.Error(err))
+ return
+ }
+ }
+ r.Logger.Info("Resolver changes handled", zap.String("deployment_name", resolverDeploymentName))
}
diff --git a/operator/internal/controller/opsInformer.go b/operator/internal/controller/opsInformer.go
index 0698df9c..86a2d957 100644
--- a/operator/internal/controller/opsInformer.go
+++ b/operator/internal/controller/opsInformer.go
@@ -4,34 +4,21 @@ import (
"context"
"sync"
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/truefoundry/elasti/pkg/values"
"go.uber.org/zap"
- appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"truefoundry.io/elasti/api/v1alpha1"
)
-const (
- ArgoPhaseHealthy = "Healthy"
- DeploymentConditionStatusTrue = "True"
-
- KindDeployments = "Deployments"
- KindRollout = "Rollout"
-)
-
func (r *ElastiServiceReconciler) getMutexForInformerStart(key string) *sync.Once {
- l, _ := r.WatcherStartLock.LoadOrStore(key, &sync.Once{})
+ l, _ := r.InformerStartLocks.LoadOrStore(key, &sync.Once{})
return l.(*sync.Once)
}
func (r *ElastiServiceReconciler) resetMutexForInformer(key string) {
- r.WatcherStartLock.Delete(key)
+ r.InformerStartLocks.Delete(key)
}
func (r *ElastiServiceReconciler) getResolverChangeHandler(ctx context.Context, es *v1alpha1.ElastiService, req ctrl.Request) cache.ResourceEventHandlerFuncs {
@@ -83,127 +70,13 @@ func (r *ElastiServiceReconciler) getScaleTargetRefChangeHandler(ctx context.Con
func (r *ElastiServiceReconciler) handleScaleTargetRefChanges(ctx context.Context, obj interface{}, es *v1alpha1.ElastiService, req ctrl.Request) {
switch es.Spec.ScaleTargetRef.Kind {
- case KindDeployments:
+ case values.KindDeployments:
r.Logger.Info("ScaleTargetRef kind is deployment", zap.String("kind", es.Spec.ScaleTargetRef.Kind))
r.handleTargetDeploymentChanges(ctx, obj, es, req)
- case KindRollout:
+ case values.KindRollout:
r.Logger.Info("ScaleTargetRef kind is rollout", zap.String("kind", es.Spec.ScaleTargetRef.Kind))
r.handleTargetRolloutChanges(ctx, obj, es, req)
default:
r.Logger.Error("Unsupported target kind", zap.String("kind", es.Spec.ScaleTargetRef.Kind))
}
}
-
-func (r *ElastiServiceReconciler) handleTargetRolloutChanges(ctx context.Context, obj interface{}, es *v1alpha1.ElastiService, req ctrl.Request) {
- newRollout := &argo.Rollout{}
- err := r.unstructuredToResource(obj, newRollout)
- if err != nil {
- r.Logger.Error("Failed to convert unstructured to rollout", zap.Error(err))
- return
- }
- replicas := newRollout.Status.ReadyReplicas
- condition := newRollout.Status.Phase
- if replicas == 0 {
- r.Logger.Debug("Rollout has 0 replicas", zap.String("rollout_name", es.Spec.ScaleTargetRef.Name), zap.String("es", req.String()))
- _, err := r.runReconcile(ctx, req, ProxyMode)
- if err != nil {
- r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
- return
- }
- } else if replicas > 0 && condition == ArgoPhaseHealthy {
- r.Logger.Debug("Rollout has replicas", zap.String("rollout_name", es.Spec.ScaleTargetRef.Name), zap.String("es", req.String()))
- _, err := r.runReconcile(ctx, req, ServeMode)
- if err != nil {
- r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
- return
- }
- }
- r.Logger.Info("Rollout changes handled", zap.String("rollout_name", es.Spec.ScaleTargetRef.Name), zap.String("es", req.String()))
-}
-
-func (r *ElastiServiceReconciler) handleTargetDeploymentChanges(ctx context.Context, obj interface{}, es *v1alpha1.ElastiService, req ctrl.Request) {
- newDeployment := &appsv1.Deployment{}
- err := r.unstructuredToResource(obj, newDeployment)
- if err != nil {
- r.Logger.Error("Failed to convert unstructured to deployment", zap.Error(err))
- return
- }
- condition := newDeployment.Status.Conditions
- if newDeployment.Status.Replicas == 0 {
- r.Logger.Debug("Deployment has 0 replicas", zap.String("deployment_name", es.Spec.DeploymentName), zap.String("es", req.String()))
- _, err := r.runReconcile(ctx, req, ProxyMode)
- if err != nil {
- r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
- return
- }
- } else if newDeployment.Status.Replicas > 0 && condition[1].Status == DeploymentConditionStatusTrue {
- r.Logger.Debug("Deployment has replicas", zap.String("deployment_name", es.Spec.DeploymentName), zap.String("es", req.String()))
- _, err := r.runReconcile(ctx, req, ServeMode)
- if err != nil {
- r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
- return
- }
- }
- r.Logger.Info("Deployment changes handled", zap.String("deployment_name", es.Spec.DeploymentName), zap.String("es", req.String()))
-}
-
-func (r *ElastiServiceReconciler) handlePublicServiceChanges(_ context.Context, obj interface{}, _, _ string) {
- publicService := &v1.Service{}
- err := r.unstructuredToResource(obj, publicService)
- if err != nil {
- r.Logger.Error("Failed to convert unstructured to service", zap.Error(err))
- return
- }
-
- // if publicService.Name == serviceName {
- // targetNamespacedName := types.NamespacedName{
- // Name: serviceName,
- // Namespace: namespace,
- // }
- // targetSVC := &v1.Service{}
- // if err := r.Get(ctx, targetNamespacedName, targetSVC); err != nil {
- // r.Logger.Error("Failed to get service to update endpointslice", zap.String("service", targetNamespacedName.String()), zap.Error(err))
- // return
- // }
- // if err := r.createOrUpdateEndpointsliceToResolver(ctx, targetSVC); err != nil {
- // r.Logger.Error("Failed to create or update endpointslice to resolver", zap.String("service", targetNamespacedName.String()), zap.Error(err))
- // return
- // }
- // }
- r.Logger.Info("Public service changed", zap.String("service", publicService.Name))
-}
-
-func (r *ElastiServiceReconciler) handleResolverChanges(ctx context.Context, obj interface{}, serviceName, namespace string) {
- resolverDeployment := &appsv1.Deployment{}
- err := r.unstructuredToResource(obj, resolverDeployment)
- if err != nil {
- r.Logger.Error("Failed to convert unstructured to deployment", zap.Error(err))
- return
- }
- if resolverDeployment.Name == resolverDeploymentName {
- targetNamespacedName := types.NamespacedName{
- Name: serviceName,
- Namespace: namespace,
- }
- targetSVC := &v1.Service{}
- if err := r.Get(ctx, targetNamespacedName, targetSVC); err != nil {
- r.Logger.Error("Failed to get service to update endpointslice", zap.String("service", targetNamespacedName.String()), zap.Error(err))
- return
- }
- if err := r.createOrUpdateEndpointsliceToResolver(ctx, targetSVC); err != nil {
- r.Logger.Error("Failed to create or update endpointslice to resolver", zap.String("service", targetNamespacedName.String()), zap.Error(err))
- return
- }
- }
- r.Logger.Info("Resolver changes handled", zap.String("deployment_name", resolverDeploymentName))
-}
-
-func (r *ElastiServiceReconciler) unstructuredToResource(obj interface{}, resource interface{}) error {
- unstructuredObj := obj.(*unstructured.Unstructured)
- err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.UnstructuredContent(), resource)
- if err != nil {
- r.Logger.Error("Failed to convert unstructured to interface", zap.Error(err))
- return err
- }
- return nil
-}
diff --git a/operator/internal/controller/opsReconcile.go b/operator/internal/controller/opsModes.go
similarity index 74%
rename from operator/internal/controller/opsReconcile.go
rename to operator/internal/controller/opsModes.go
index b3981255..d49d180e 100644
--- a/operator/internal/controller/opsReconcile.go
+++ b/operator/internal/controller/opsModes.go
@@ -5,6 +5,7 @@ import (
"fmt"
"sync"
+ "github.com/truefoundry/elasti/pkg/values"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@@ -12,28 +13,28 @@ import (
"truefoundry.io/elasti/api/v1alpha1"
)
-func (r *ElastiServiceReconciler) getMutexForRunReconcile(key string) *sync.Mutex {
- l, _ := r.RunReconcileLocks.LoadOrStore(key, &sync.Mutex{})
+func (r *ElastiServiceReconciler) getMutexForSwitchMode(key string) *sync.Mutex {
+ l, _ := r.SwitchModeLocks.LoadOrStore(key, &sync.Mutex{})
return l.(*sync.Mutex)
}
-func (r *ElastiServiceReconciler) runReconcile(ctx context.Context, req ctrl.Request, mode string) (res ctrl.Result, err error) {
- r.Logger.Debug("- In RunReconcile", zap.String("es", req.NamespacedName.String()))
- // Only 1 reconcile should run at a time for a given ElastiService. This prevents conflicts when updating different objects.
- mutex := r.getMutexForRunReconcile(req.NamespacedName.String())
+func (r *ElastiServiceReconciler) switchMode(ctx context.Context, req ctrl.Request, mode string) (res ctrl.Result, err error) {
+ r.Logger.Debug("- In SwitchMode", zap.String("es", req.NamespacedName.String()))
+ // Only 1 switchMode should run at a time for a given ElastiService. This prevents conflicts when updating different objects.
+ mutex := r.getMutexForSwitchMode(req.NamespacedName.String())
mutex.Lock()
- defer r.Logger.Debug("- Out of RunReconcile", zap.String("es", req.NamespacedName.String()))
+ defer r.Logger.Debug("- Out of SwitchMode", zap.String("es", req.NamespacedName.String()))
defer mutex.Unlock()
es, err := r.getCRD(ctx, req.NamespacedName)
defer r.updateCRDStatus(ctx, req.NamespacedName, mode)
switch mode {
- case ServeMode:
+ case values.ServeMode:
if err = r.enableServeMode(ctx, req, es); err != nil {
r.Logger.Error("Failed to enable serve mode", zap.String("es", req.NamespacedName.String()), zap.Error(err))
return res, err
}
r.Logger.Info("Serve mode enabled", zap.String("es", req.NamespacedName.String()))
- case ProxyMode:
+ case values.ProxyMode:
if err = r.enableProxyMode(ctx, req, es); err != nil {
r.Logger.Error("Failed to enable proxy mode", zap.String("es", req.NamespacedName.String()), zap.Error(err))
return res, err
@@ -46,9 +47,6 @@ func (r *ElastiServiceReconciler) runReconcile(ctx context.Context, req ctrl.Req
}
func (r *ElastiServiceReconciler) enableProxyMode(ctx context.Context, req ctrl.Request, es *v1alpha1.ElastiService) error {
- // Watch for changes in activator deployment, and update the endpointslice since we are in proxy mode
- go r.Informer.AddDeploymentWatch(req, resolverDeploymentName, resolverNamespace, r.getResolverChangeHandler(ctx, es, req))
-
targetNamespacedName := types.NamespacedName{
Name: es.Spec.Service,
Namespace: es.Namespace,
@@ -65,6 +63,10 @@ func (r *ElastiServiceReconciler) enableProxyMode(ctx context.Context, req ctrl.
if err = r.createOrUpdateEndpointsliceToResolver(ctx, targetSVC); err != nil {
return err
}
+
+ // Watch for changes in activator deployment, and update the endpointslice since we are in proxy mode
+ r.Informer.AddDeploymentWatch(req, resolverDeploymentName, resolverNamespace, r.getResolverChangeHandler(ctx, es, req))
+
return nil
}
diff --git a/operator/internal/controller/opsRollout.go b/operator/internal/controller/opsRollout.go
new file mode 100644
index 00000000..3b1ac812
--- /dev/null
+++ b/operator/internal/controller/opsRollout.go
@@ -0,0 +1,39 @@
+package controller
+
+import (
+ "context"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/truefoundry/elasti/pkg/k8sHelper"
+ "github.com/truefoundry/elasti/pkg/values"
+ "go.uber.org/zap"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "truefoundry.io/elasti/api/v1alpha1"
+)
+
+func (r *ElastiServiceReconciler) handleTargetRolloutChanges(ctx context.Context, obj interface{}, es *v1alpha1.ElastiService, req ctrl.Request) {
+ newRollout := &argo.Rollout{}
+ err := k8sHelper.UnstructuredToResource(obj, newRollout)
+ if err != nil {
+ r.Logger.Error("Failed to convert unstructured to rollout", zap.Error(err))
+ return
+ }
+ replicas := newRollout.Status.ReadyReplicas
+ condition := newRollout.Status.Phase
+ if replicas == 0 {
+ r.Logger.Debug("Rollout has 0 replicas", zap.String("rollout_name", es.Spec.ScaleTargetRef.Name), zap.String("es", req.String()))
+ _, err := r.switchMode(ctx, req, values.ProxyMode)
+ if err != nil {
+ r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
+ return
+ }
+ } else if replicas > 0 && condition == values.ArgoPhaseHealthy {
+ r.Logger.Debug("Rollout has replicas", zap.String("rollout_name", es.Spec.ScaleTargetRef.Name), zap.String("es", req.String()))
+ _, err := r.switchMode(ctx, req, values.ServeMode)
+ if err != nil {
+ r.Logger.Error("Reconciliation failed", zap.String("es", req.String()), zap.Error(err))
+ return
+ }
+ }
+ r.Logger.Info("Rollout changes handled", zap.String("rollout_name", es.Spec.ScaleTargetRef.Name), zap.String("es", req.String()))
+}
diff --git a/operator/internal/controller/opsServices.go b/operator/internal/controller/opsServices.go
index ad13da36..be331893 100644
--- a/operator/internal/controller/opsServices.go
+++ b/operator/internal/controller/opsServices.go
@@ -3,6 +3,7 @@ package controller
import (
"context"
+ "github.com/truefoundry/elasti/pkg/k8sHelper"
"github.com/truefoundry/elasti/pkg/utils"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
@@ -65,3 +66,29 @@ func (r *ElastiServiceReconciler) checkAndCreatePrivateService(ctx context.Conte
}
return PVTName, nil
}
+
+func (r *ElastiServiceReconciler) handlePublicServiceChanges(ctx context.Context, obj interface{}, serviceName, namespace string) {
+ publicService := &v1.Service{}
+ err := k8sHelper.UnstructuredToResource(obj, publicService)
+ if err != nil {
+ r.Logger.Error("Failed to convert unstructured to service", zap.Error(err))
+ return
+ }
+
+ if publicService.Name == serviceName {
+ targetNamespacedName := types.NamespacedName{
+ Name: serviceName,
+ Namespace: namespace,
+ }
+ targetSVC := &v1.Service{}
+ if err := r.Get(ctx, targetNamespacedName, targetSVC); err != nil {
+ r.Logger.Error("Failed to get service to update endpointslice", zap.String("service", targetNamespacedName.String()), zap.Error(err))
+ return
+ }
+ if err := r.createOrUpdateEndpointsliceToResolver(ctx, targetSVC); err != nil {
+ r.Logger.Error("Failed to create or update endpointslice to resolver", zap.String("service", targetNamespacedName.String()), zap.Error(err))
+ return
+ }
+ }
+ r.Logger.Info("Public service changed", zap.String("service", publicService.Name))
+}
diff --git a/operator/internal/crdDirectory/directory.go b/operator/internal/crdDirectory/directory.go
index 5289cf6d..6c6c9edb 100644
--- a/operator/internal/crdDirectory/directory.go
+++ b/operator/internal/crdDirectory/directory.go
@@ -4,6 +4,7 @@ import (
"sync"
"go.uber.org/zap"
+ "truefoundry.io/elasti/api/v1alpha1"
)
type Directory struct {
@@ -12,8 +13,8 @@ type Directory struct {
}
type CRDDetails struct {
- CRDName string
- DeploymentName string
+ CRDName string
+ Spec v1alpha1.ElastiServiceSpec
}
var CRDDirectory *Directory
diff --git a/operator/internal/elastiServer/elastiServer.go b/operator/internal/elastiServer/elastiServer.go
index ee6bbd9a..d819eeff 100644
--- a/operator/internal/elastiServer/elastiServer.go
+++ b/operator/internal/elastiServer/elastiServer.go
@@ -24,9 +24,8 @@ type (
// It is used by components about certain events, like when resolver receive the request
// for a service, that service is scaled up if it's at 0 replicas
Server struct {
- logger *zap.Logger
- k8sHelper *k8sHelper.Ops
- minReplicas int32
+ logger *zap.Logger
+ k8sHelper *k8sHelper.Ops
}
)
@@ -39,9 +38,8 @@ func NewServer(logger *zap.Logger, config *rest.Config) *Server {
// Get Ops client
k8sUtil := k8sHelper.NewOps(logger, kClient)
return &Server{
- logger: logger.Named("elastiServer"),
- k8sHelper: k8sUtil,
- minReplicas: 1,
+ logger: logger.Named("elastiServer"),
+ k8sHelper: k8sUtil,
}
}
@@ -98,7 +96,7 @@ func (s *Server) resolverReqHandler(w http.ResponseWriter, req *http.Request) {
s.logger.Error("Failed to write response", zap.Error(err))
return
}
- err = s.scaleDeploymentForService(ctx, body.Svc, body.Namespace)
+ err = s.scaleTargetForService(ctx, body.Svc, body.Namespace)
if err != nil {
s.logger.Error("Failed to compare and scale deployment", zap.Error(err))
return
@@ -106,15 +104,15 @@ func (s *Server) resolverReqHandler(w http.ResponseWriter, req *http.Request) {
s.logger.Info("Received fulfilled from Resolver", zap.Any("body", body))
}
-func (s *Server) scaleDeploymentForService(_ context.Context, serviceName, namespace string) error {
+func (s *Server) scaleTargetForService(_ context.Context, serviceName, namespace string) error {
crd, found := crdDirectory.CRDDirectory.GetCRD(serviceName)
if !found {
s.logger.Error("Failed to get CRD details from directory")
}
- if err := s.k8sHelper.ScaleDeploymentWhenAtZero(namespace, crd.DeploymentName, s.minReplicas); err != nil {
- s.logger.Error("Failed to scale deployment", zap.Error(err))
+ if err := s.k8sHelper.ScaleTargetWhenAtZero(namespace, crd.Spec.ScaleTargetRef.Name, crd.Spec.ScaleTargetRef.Kind, crd.Spec.MinTargetReplicas); err != nil {
+ s.logger.Error("Failed to scale TargetRef", zap.Any("TargetRef", crd.Spec.ScaleTargetRef), zap.Error(err))
return err
}
- s.logger.Info("Deployment is scaled up", zap.Any("Deployment", crd.DeploymentName))
+ s.logger.Info("TargetRef is scaled up", zap.Any("TargetRef", crd.Spec.ScaleTargetRef))
return nil
}
diff --git a/pkg/k8sHelper/ops.go b/pkg/k8sHelper/ops.go
index bdd922c3..8f22a598 100644
--- a/pkg/k8sHelper/ops.go
+++ b/pkg/k8sHelper/ops.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/truefoundry/elasti/pkg/values"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -70,20 +71,45 @@ func (k *Ops) getServiceSelectorStr(ns, svc string) (string, error) {
return selectorString, nil
}
-// ScaleDeploymentWhenAtZero scales the deployment to the provided replicas when it's at 0
-func (k *Ops) ScaleDeploymentWhenAtZero(ns, deployment string, replicas int32) error {
- k.logger.Debug("Scaling deployment", zap.String("deployment", deployment), zap.Int32("replicas", replicas))
+// ScaleTargetWhenAtZero scales the TargetRef to the provided replicas when it's at 0
+func (k *Ops) ScaleTargetWhenAtZero(ns, targetName, targetKind string, replicas int32) error {
+ switch targetKind {
+ case values.KindDeployments:
+ k.logger.Info("ScaleTargetRef kind is deployment", zap.String("kind", targetKind))
+ err := k.ScaleDeployment(ns, targetName, replicas)
+ if err != nil {
+ return err
+ }
+ case values.KindRollout:
+ k.logger.Info("ScaleTargetRef kind is rollout", zap.String("kind", targetKind))
+ err := k.ScaleArgoRollout(ns, targetName, replicas)
+ if err != nil {
+ return err
+ }
+ default:
+ k.logger.Error("Unsupported target kind", zap.String("kind", targetKind))
+ }
+ return nil
+}
+
+func (k *Ops) ScaleDeployment(ns, targetName string, replicas int32) error {
+ k.logger.Debug("Scaling deployment", zap.String("deployment", targetName), zap.Int32("replicas", replicas))
deploymentClient := k.kClient.AppsV1().Deployments(ns)
- deploy, err := deploymentClient.Get(context.TODO(), deployment, metav1.GetOptions{})
+ deploy, err := deploymentClient.Get(context.TODO(), targetName, metav1.GetOptions{})
if err != nil {
return err
}
- k.logger.Debug("Deployment found", zap.String("deployment", deployment), zap.Int32("current replicas", *deploy.Spec.Replicas), zap.Int32("desired replicas", replicas))
+ k.logger.Debug("Deployment found", zap.String("deployment", targetName), zap.Int32("current replicas", *deploy.Spec.Replicas), zap.Int32("desired replicas", replicas))
if *deploy.Spec.Replicas == 0 {
deploy.Spec.Replicas = &replicas
_, err = deploymentClient.Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}
- return err
+ return nil
+}
+
+func (k *Ops) ScaleArgoRollout(ns, targetName string, replicas int32) error {
+ k.logger.Debug("Scaling Rollout yet to be implimented", zap.String("rollout", targetName), zap.Int32("replicas", replicas))
+ return nil
}
diff --git a/pkg/k8sHelper/utils.go b/pkg/k8sHelper/utils.go
new file mode 100644
index 00000000..d5b72715
--- /dev/null
+++ b/pkg/k8sHelper/utils.go
@@ -0,0 +1,15 @@
+package k8sHelper
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func UnstructuredToResource(obj interface{}, resource interface{}) error {
+ unstructuredObj := obj.(*unstructured.Unstructured)
+ err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.UnstructuredContent(), resource)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/values/values.go b/pkg/values/values.go
new file mode 100644
index 00000000..abf7f1fe
--- /dev/null
+++ b/pkg/values/values.go
@@ -0,0 +1,13 @@
+package values
+
+const (
+ ArgoPhaseHealthy = "Healthy"
+ DeploymentConditionStatusTrue = "True"
+
+ KindDeployments = "Deployments"
+ KindRollout = "Rollout"
+
+ ServeMode = "serve"
+ ProxyMode = "proxy"
+ NullMode = ""
+)
diff --git a/playground/config/gateway.yaml b/playground/config/gateway.yaml
index a3296c16..351613ca 100644
--- a/playground/config/gateway.yaml
+++ b/playground/config/gateway.yaml
@@ -33,23 +33,3 @@ spec:
host: target-service
port:
number: 8014
----
-apiVersion: networking.istio.io/v1alpha3
-kind: VirtualService
-metadata:
- name: target-virtual-service
- namespace: default
-spec:
- hosts:
- - "*"
- gateways:
- - target-gateway
- http:
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- host: target-service
- port:
- number: 8014
diff --git a/playground/config/watch-crd.yaml b/playground/config/watch-crd.yaml
index 0a2fd8a6..e3e27071 100644
--- a/playground/config/watch-crd.yaml
+++ b/playground/config/watch-crd.yaml
@@ -10,8 +10,9 @@ spec:
idlePeriod: 20
service: target-service
deploymentName: target
+ minTargetReplicas: 2
scaleTargetRef:
apiVersion: apps/v1
kind: Deployments
name: target
-
+
diff --git a/resolver/Dockerfile b/resolver/Dockerfile
index f600dcf2..57e1f0a3 100644
--- a/resolver/Dockerfile
+++ b/resolver/Dockerfile
@@ -1,3 +1,4 @@
+# Build the resolver bianry
FROM golang:1.22 AS builder
ARG TARGETOS
ARG TARGETARCH
@@ -5,15 +6,27 @@ ARG TARGETARCH
COPY ../pkg ../pkg
WORKDIR /workspace
+# Copy the Go Modules manifests
COPY ./resolver/go.mod go.mod
COPY ./resolver/go.sum go.sum
+
+# cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
RUN go mod download
+
+# Copy the go source
COPY ./resolver/ .
+
+# Build
+# the GOARCH has not a default value to allow the binary be built according to the host where the command
+# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
+# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
+# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o resolver cmd/main.go
+
FROM alpine:latest
COPY --from=builder /workspace/resolver .
-
EXPOSE 8080
EXPOSE 8012
EXPOSE 8013
diff --git a/resolver/Makefile b/resolver/Makefile
index 3b2833ae..8e2eae7b 100644
--- a/resolver/Makefile
+++ b/resolver/Makefile
@@ -2,6 +2,8 @@ ENVFILE := .env
include $(ENVFILE)
export $(shell sed 's/=.*//' $(ENVFILE))
+CONTAINER_TOOL ?= docker
+
.PHONY: help
help:
@echo "Available targets:"
@@ -12,27 +14,37 @@ help:
printf " %-15s %s\n", target, helpMsg; \
}' $(MAKEFILE_LIST) | column -s ':' -t
-.PHONY: run-resolver
-run-resolver: ## Run resolver locally
+.PHONY: run
+run: ## Run resolver locally
go run ./cmd/
-.PHONY: docker-build-resolver
-docker-build-resolver: ## Build docker image for the resolver
+.PHONY: docker-build
+docker-build: ## Build docker image for the resolver
docker build -t ${IMG} -f ./Dockerfile ../
-.PHONY: docker-publish-resolver
-docker-publish-resolver: ## Publish docker image for the resolver
+.PHONY: docker-push
+docker-push: ## Publish docker image for the resolver
docker push ${IMG}
-.PHONY: deploy-resolver
-deploy-resolver: ## Deploy resolver on k8s
- # kubectl apply -f ./config/namespace.yaml -n elasti
+PLATFORMS ?= linux/arm64,linux/amd64 #,linux/s390x,linux/ppc64le
+.PHONY: docker-buildx
+docker-buildx: ## Build and push docker image for cross-platform support
+ # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
+ sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
+ - $(CONTAINER_TOOL) buildx create --name project-resolver-v3-builder
+ $(CONTAINER_TOOL) buildx use project-resolver-v3-builder
+ - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross ../
+ - $(CONTAINER_TOOL) buildx rm project-resolver-v3-builder
+ rm Dockerfile.cross
+
+
+.PHONY: deploy
+deploy: ## Deploy resolver on k8s
kubectl apply -f ./config/deployment.yaml -n elasti
kubectl apply -f ./config/service.yaml -n elasti
-.PHONY: undeploy-resolver
-undeploy-resolver: ## undeploy docker image for resolver, publish it, and deploy it on k8s
- # kubectl delete -f ./config/namespace.yaml -n elasti
+.PHONY: undeploy
+undeploy: ## undeploy docker image for resolver, publish it, and deploy it on k8s
kubectl delete -f ./config/deployment.yaml -n elasti
kubectl delete -f ./config/service.yaml -n elasti
diff --git a/resolver/config/deployment.yaml b/resolver/config/deployment.yaml
index 52266e67..6635d41a 100644
--- a/resolver/config/deployment.yaml
+++ b/resolver/config/deployment.yaml
@@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: playground
- image: localhost:5001/elasti-resolver:latest
+ image: localhost:5001/elasti-resolver:v1alpha1
env:
- name: SYSTEM_NAMESPACE
value: elasti