diff --git a/README.md b/README.md index 7f405b6b87..0f4b9de464 100644 --- a/README.md +++ b/README.md @@ -343,7 +343,7 @@ Unused: > huggingface > noopai > googlevertexai -> watsonxai +> ibmwatsonxai ``` For detailed documentation on how to configure and use each provider see [here](https://docs.k8sgpt.ai/reference/providers/backend/). diff --git a/cmd/auth/add.go b/cmd/auth/add.go index e4afde81c2..67370cc1b0 100644 --- a/cmd/auth/add.go +++ b/cmd/auth/add.go @@ -48,7 +48,7 @@ var addCmd = &cobra.Command{ if strings.ToLower(backend) == "amazonbedrock" { _ = cmd.MarkFlagRequired("providerRegion") } - if strings.ToLower(backend) == "watsonxai" { + if strings.ToLower(backend) == "ibmwatsonxai" { _ = cmd.MarkFlagRequired("providerId") } }, @@ -180,7 +180,7 @@ func init() { //add flag for amazonbedrock region name addCmd.Flags().StringVarP(&providerRegion, "providerRegion", "r", "", "Provider Region name (only for amazonbedrock, googlevertexai backend)") //add flag for vertexAI/WatsonxAI Project ID - addCmd.Flags().StringVarP(&providerId, "providerId", "i", "", "Provider specific ID for e.g. project (only for googlevertexai/watsonxai backend)") + addCmd.Flags().StringVarP(&providerId, "providerId", "i", "", "Provider specific ID for e.g. project (only for googlevertexai/ibmwatsonxai backend)") //add flag for OCI Compartment ID addCmd.Flags().StringVarP(&compartmentId, "compartmentId", "k", "", "Compartment ID for generative AI model (only for oci backend)") // add flag for openai organization diff --git a/pkg/ai/iai.go b/pkg/ai/iai.go index e1f1c41e10..e9a5618dca 100644 --- a/pkg/ai/iai.go +++ b/pkg/ai/iai.go @@ -32,7 +32,7 @@ var ( &HuggingfaceClient{}, &GoogleVertexAIClient{}, &OCIGenAIClient{}, - &WatsonxAIClient{}, + &IBMWatsonxAIClient{}, } Backends = []string{ openAIClientName, @@ -47,7 +47,7 @@ var ( huggingfaceAIClientName, googleVertexAIClientName, ociClientName, - watsonxAIClientName, + ibmWatsonxAIClientName, } ) diff --git a/pkg/ai/watsonxai.go b/pkg/ai/watsonxai.go index 15bbfdc922..784d802c9b 100644 --- a/pkg/ai/watsonxai.go +++ b/pkg/ai/watsonxai.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" + wx "github.com/IBM/watsonx-go/pkg/models" ) -const watsonxAIClientName = "watsonxai" +const ibmWatsonxAIClientName = "ibmwatsonxai" -type WatsonxAIClient struct { +type IBMWatsonxAIClient struct { nopCloser client *wx.Client @@ -25,7 +26,7 @@ const ( maxTokens = 2048 ) -func (c *WatsonxAIClient) Configure(config IAIConfig) error { +func (c *IBMWatsonxAIClient) Configure(config IAIConfig) error { if config.GetModel() == "" { c.model = modelMetallama } else { @@ -62,7 +63,7 @@ func (c *WatsonxAIClient) Configure(config IAIConfig) error { return nil } -func (c *WatsonxAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) { +func (c *IBMWatsonxAIClient) GetCompletion(ctx context.Context, prompt string) (string, error) { result, err := c.client.GenerateText( c.model, prompt, @@ -80,6 +81,6 @@ func (c *WatsonxAIClient) GetCompletion(ctx context.Context, prompt string) (str return result.Text, nil } -func (c *WatsonxAIClient) GetName() string { - return watsonxAIClientName +func (c *IBMWatsonxAIClient) GetName() string { + return ibmWatsonxAIClientName }