diff --git a/api/v1alpha1/k8sgpt_types.go b/api/v1alpha1/k8sgpt_types.go index 3fc6bd8c..86fb996d 100644 --- a/api/v1alpha1/k8sgpt_types.go +++ b/api/v1alpha1/k8sgpt_types.go @@ -88,10 +88,12 @@ type AISpec struct { BaseUrl string `json:"baseUrl,omitempty"` Region string `json:"region,omitempty"` // +kubebuilder:default:=gpt-3.5-turbo - Model string `json:"model,omitempty"` - Engine string `json:"engine,omitempty"` - Secret *SecretRef `json:"secret,omitempty"` - Enabled bool `json:"enabled,omitempty"` + Model string `json:"model,omitempty"` + Engine string `json:"engine,omitempty"` + // +kubebuilder:default:=0 + Interval int `json:"interval,omitempty"` + Secret *SecretRef `json:"secret,omitempty"` + Enabled bool `json:"enabled,omitempty"` // +kubebuilder:default:=true Anonymize *bool `json:"anonymized,omitempty"` // +kubebuilder:default:=english diff --git a/config/crd/bases/core.k8sgpt.ai_k8sgpts.yaml b/config/crd/bases/core.k8sgpt.ai_k8sgpts.yaml index 4a26a524..c8ab99e5 100644 --- a/config/crd/bases/core.k8sgpt.ai_k8sgpts.yaml +++ b/config/crd/bases/core.k8sgpt.ai_k8sgpts.yaml @@ -74,6 +74,9 @@ spec: type: boolean engine: type: string + interval: + default: 0 + type: integer language: default: english type: string diff --git a/controllers/k8sgpt_controller.go b/controllers/k8sgpt_controller.go index ae1eef2e..06687de1 100644 --- a/controllers/k8sgpt_controller.go +++ b/controllers/k8sgpt_controller.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics" kclient "github.com/k8sgpt-ai/k8sgpt-operator/pkg/client" + "github.com/k8sgpt-ai/k8sgpt-operator/pkg/common" "github.com/k8sgpt-ai/k8sgpt-operator/pkg/integrations" "github.com/k8sgpt-ai/k8sgpt-operator/pkg/resources" "github.com/k8sgpt-ai/k8sgpt-operator/pkg/sinks" @@ -77,6 +78,8 @@ var ( analysisRetryCount int // allowBackendAIRequest a circuit breaker that switching on/off backend AI calls allowBackendAIRequest = true + calledOnce = false + latestResponse = &common.K8sGPTReponse{} ) // K8sGPTReconciler reconciles a K8sGPT object @@ -88,6 +91,22 @@ type K8sGPTReconciler struct { K8sGPTClient *kclient.Client } +func repeatBackendRequest(interval time.Duration, k8sgptClient *kclient.Client, k8sgptConfig *corev1alpha1.K8sGPT) { + time.AfterFunc(interval, func() { + fmt.Println("Hello backend") + response, err := k8sgptClient.ProcessAnalysis(k8sgptConfig, allowBackendAIRequest) + if err != nil { + fmt.Printf("error: %s\n", err) + k8sgptClient.Close() + return + } + + latestResponse = response + fmt.Println("Number of results", len(latestResponse.Results)) + repeatBackendRequest(interval, k8sgptClient, k8sgptConfig) + }) +} + // +kubebuilder:rbac:groups=core.k8sgpt.ai,resources=k8sgpts,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core.k8sgpt.ai,resources=k8sgpts/status,verbs=get;update;patch // +kubebuilder:rbac:groups=core.k8sgpt.ai,resources=k8sgpts/finalizers,verbs=update @@ -221,8 +240,6 @@ func (r *K8sGPTReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return r.finishReconcile(err, false) } - defer k8sgptClient.Close() - // Configure the k8sgpt deployment if required if k8sgptConfig.Spec.RemoteCache != nil { err = k8sgptClient.AddConfig(k8sgptConfig) @@ -243,7 +260,7 @@ func (r *K8sGPTReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } } - response, err := k8sgptClient.ProcessAnalysis(deployment, k8sgptConfig, allowBackendAIRequest) + response, err := k8sgptClient.ProcessAnalysis(k8sgptConfig, allowBackendAIRequest) if err != nil { if k8sgptConfig.Spec.AI.Enabled { k8sgptNumberOfFailedBackendAICalls.With(prometheus.Labels{ @@ -270,6 +287,18 @@ func (r *K8sGPTReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // Reset analysisRetryCount analysisRetryCount = 0 + // interval := time.Duration(10) * time.Second + // if interval >= time.Duration(1)*time.Second && !calledOnce { + interval := time.Duration(k8sgptConfig.Spec.AI.Interval) * time.Second + if interval >= ReconcileSuccessInterval && !calledOnce { + calledOnce = true + repeatBackendRequest(interval, k8sgptClient, k8sgptConfig) + } else { + // If backend request interval is not set, close the client as soon + // as the reconciler call ends. + defer k8sgptClient.Close() + } + // Update metrics count if k8sgptConfig.Spec.AI.Enabled && len(response.Results) > 0 { k8sgptNumberOfBackendAICalls.With(prometheus.Labels{ diff --git a/pkg/client/analysis.go b/pkg/client/analysis.go index 01816fb3..82ca58d2 100644 --- a/pkg/client/analysis.go +++ b/pkg/client/analysis.go @@ -9,10 +9,9 @@ import ( schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1" "github.com/k8sgpt-ai/k8sgpt-operator/api/v1alpha1" "github.com/k8sgpt-ai/k8sgpt-operator/pkg/common" - v1 "k8s.io/api/apps/v1" ) -func (c *Client) ProcessAnalysis(deployment v1.Deployment, config *v1alpha1.K8sGPT, allowAIRequest bool) (*common.K8sGPTReponse, error) { +func (c *Client) ProcessAnalysis(config *v1alpha1.K8sGPT, allowAIRequest bool) (*common.K8sGPTReponse, error) { client := rpc.NewServerServiceClient(c.conn) req := &schemav1.AnalyzeRequest{