diff --git a/deployment/invalidator/deployment.yaml b/deployment/invalidator/deployment.yaml index 1792d07..69ac179 100644 --- a/deployment/invalidator/deployment.yaml +++ b/deployment/invalidator/deployment.yaml @@ -24,7 +24,7 @@ spec: - command: [ "./main" ] args: [ "--varnishNamespace", "default", - "--varnishLabel", "'app=varnish'", + "--varnishLabel", "app=varnish", "--inCluster=true" ] image: 'docker.io/bilalcaliskan/varnish-cache-invalidator:latest' diff --git a/deployment/varnish/default.vcl b/deployment/varnish/default.vcl index 999f795..6661ba4 100644 --- a/deployment/varnish/default.vcl +++ b/deployment/varnish/default.vcl @@ -149,15 +149,6 @@ sub vcl_deliver { return (deliver); } -sub vcl_purge { - # Only handle actual PURGE HTTP methods, everything else is discarded - if (req.method == "PURGE") { - # restart request - set req.http.X-Purge = "Yes"; - return(restart); - } -} - sub vcl_synth { if (resp.status == 720) { # We use this special error status 720 to force redirects with 301 (permanent) redirects @@ -201,7 +192,11 @@ sub vcl_recv { # Normalize the query arguments set req.url = std.querysort(req.url); + # Purge logic if (req.method == "PURGE") { + if (req.http.purge-domain) { + set req.http.host = req.http.purge-domain; + } return (purge); } @@ -237,8 +232,8 @@ sub vcl_backend_response { # Here you clean the response headers, removing silly Set-Cookie headers # and other mistakes your backend does. - set beresp.ttl = 5m; - set beresp.grace = 30m; + #set beresp.ttl = 5m; + #set beresp.grace = 30m; # Don't cache 50x responses if (beresp.status == 500 || beresp.status == 502 || beresp.status == 503 || beresp.status == 504) { diff --git a/internal/k8s/informers.go b/internal/k8s/informers.go index 3fd93f4..9310d0b 100644 --- a/internal/k8s/informers.go +++ b/internal/k8s/informers.go @@ -70,10 +70,10 @@ func RunPodInformer() { if key == varnishLabelKey && value == varnishLabelValue && pod.Namespace == opts.VarnishNamespace { if pod.Status.PodIP != "" { podUrl := fmt.Sprintf(PodUrl, pod.Status.PodIP, pod.Spec.Containers[0].Ports[0].ContainerPort) - logger.Info("Adding pod url to the varnishPods slice", zap.String("podUrl", podUrl)) + logger.Info("adding pod url to the varnishPods slice", zap.String("podUrl", podUrl)) addVarnishPod(&options.VarnishInstances, &podUrl) } else { - logger.Warn("Varnish pod does not have an ip address yet, skipping add operation", + logger.Warn("varnish pod does not have an ip address yet, skipping add operation", zap.String("pod", pod.Name), zap.String("namespace", pod.Namespace)) } } @@ -88,10 +88,10 @@ func RunPodInformer() { if key == varnishLabelKey && value == varnishLabelValue && oldPod.ResourceVersion != newPod.ResourceVersion && oldPod.Namespace == opts.VarnishNamespace { if oldPod.Status.PodIP == "" && newPod.Status.PodIP != "" { - logger.Info("Assigned an ip address to the pod, adding to varnishPods slice", zap.String("pod", newPod.Name), + logger.Info("assigned an ip address to the pod, adding to varnishPods slice", zap.String("pod", newPod.Name), zap.String("namespace", newPod.Namespace), zap.String("ipAddress", newPod.Status.PodIP)) podUrl := fmt.Sprintf(PodUrl, newPod.Status.PodIP, newPod.Spec.Containers[0].Ports[0].ContainerPort) - logger.Info("Adding pod url to the varnishPods slice", zap.String("podUrl", podUrl)) + logger.Info("adding pod url to the varnishPods slice", zap.String("podUrl", podUrl)) addVarnishPod(&options.VarnishInstances, &podUrl) } } @@ -102,7 +102,7 @@ func RunPodInformer() { labels := pod.GetLabels() for key, value := range labels { if key == varnishLabelKey && value == varnishLabelValue && pod.Namespace == opts.VarnishNamespace { - logger.Info("Varnish pod is deleted, removing from varnishPods slice", zap.String("pod", pod.Name), + logger.Info("varnish pod is deleted, removing from varnishPods slice", zap.String("pod", pod.Name), zap.String("namespace", pod.Namespace)) podUrl := fmt.Sprintf(PodUrl, pod.Status.PodIP, pod.Spec.Containers[0].Ports[0].ContainerPort) index, found := findVarnishPod(options.VarnishInstances, podUrl) diff --git a/internal/web/handlers.go b/internal/web/handlers.go index 46ef38a..04eee9f 100644 --- a/internal/web/handlers.go +++ b/internal/web/handlers.go @@ -9,12 +9,15 @@ import ( ) func purgeHandler(w http.ResponseWriter, r *http.Request) { - var successCount int - var response string + var ( + successCount, failureCount int + httpResponse string + ) + logger = logger.With(zap.String("requestMethod", "PURGE")) purgePath := r.Header.Get("purge-path") if purgePath == "" { - logger.Error("Unable to make a PURGE request to Varnish targets, header purge-path must be set!", + logger.Error("unable to make a PURGE request to Varnish targets, header purge-path must be set!", zap.String("requestMethod", "PURGE")) http.Error(w, "Header purge-path must be set!", http.StatusBadRequest) return @@ -22,38 +25,44 @@ func purgeHandler(w http.ResponseWriter, r *http.Request) { purgeDomain := r.Header.Get("purge-domain") if purgeDomain == "" { - logger.Error("Unable to make a PURGE request to Varnish targets, header purge-domain must be set!") + logger.Error("unable to make a PURGE request to Varnish targets, header purge-domain must be set!") http.Error(w, "Header purge-domain must be set!", http.StatusBadRequest) return } for _, v := range options.VarnishInstances { - fullUrl := fmt.Sprintf("%s%s", *v, purgePath) + // fullUrl := fmt.Sprintf("%s%s", *v, purgePath) + logger.Debug(*v) + fullUrl := fmt.Sprintf("http://192.168.49.2:30654%s", purgePath) req, _ := http.NewRequest("PURGE", fullUrl, nil) - req.Host = purgeDomain + req.Host = "nginx.default.svc" - logger.Info("Making PURGE request", zap.String("targetHost", *v)) + logger.Info("making PURGE request", zap.String("url", fullUrl)) res, err := client.Do(req) if err != nil { - logger.Error("An error occurred while making PURGE request", zap.String("targetHost", *v), + logger.Error("an error occurred while making PURGE request", zap.String("url", fullUrl), zap.String("error", err.Error())) + failureCount++ } - if res != nil && res.StatusCode == http.StatusOK { + if res.StatusCode == http.StatusOK { successCount++ } } if successCount == len(options.VarnishInstances) { - logger.Info("All PURGE requests succeeded on Varnish pods!", zap.Int("successCount", successCount)) + logger.Info("all PURGE requests succeeded on Varnish pods!", zap.Int("successCount", successCount), + zap.Int("failureCount", failureCount)) + httpResponse = fmt.Sprintf("All PURGE requests succeeded on Varnish pods!\nSucceeded request = %d\n"+ + "Failed request = %d\n", successCount, failureCount) w.WriteHeader(http.StatusOK) } else { - logger.Warn("One or more Varnish PURGE requests failed", zap.Int("successCount", successCount), + logger.Warn("one or more Varnish PURGE requests failed", zap.Int("successCount", successCount), zap.Int("failureCount", len(options.VarnishInstances)-successCount)) - response = fmt.Sprintf("One or more Varnish PURGE requests failed, check the logs!\nSucceeded request = %d\n"+ - "Failed request = %d", successCount, len(options.VarnishInstances)-successCount) + httpResponse = fmt.Sprintf("One or more Varnish PURGE requests failed, check the logs!\nSucceeded request = %d\n"+ + "Failed request = %d\n", successCount, failureCount) w.WriteHeader(http.StatusBadRequest) } - writeResponse(w, response) + writeResponse(w, httpResponse) }