Skip to content

Commit

Permalink
SDK regeneration
Browse files Browse the repository at this point in the history
  • Loading branch information
fern-api[bot] committed Sep 18, 2024
1 parent abe8044 commit 0ee1044
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 6 deletions.
11 changes: 9 additions & 2 deletions client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
context "context"
json "encoding/json"
errors "errors"
fmt "fmt"
v2 "github.com/cohere-ai/cohere-go/v2"
connectors "github.com/cohere-ai/cohere-go/v2/connectors"
core "github.com/cohere-ai/cohere-go/v2/core"
Expand Down Expand Up @@ -55,7 +56,7 @@ func NewClient(opts ...option.RequestOption) *Client {
}

// Generates a text response to a user message.
// To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
// To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
func (c *Client) ChatStream(
ctx context.Context,
request *v2.ChatStreamRequest,
Expand All @@ -73,6 +74,9 @@ func (c *Client) ChatStream(
endpointURL := baseURL + "/v1/chat"

headers := core.MergeHeaders(c.header.Clone(), options.ToHeader())
if request.Accepts != nil {
headers.Add("Accepts", fmt.Sprintf("%v", request.Accepts))
}

errorDecoder := func(statusCode int, body io.Reader) error {
raw, err := io.ReadAll(body)
Expand Down Expand Up @@ -179,7 +183,7 @@ func (c *Client) ChatStream(
}

// Generates a text response to a user message.
// To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
// To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
func (c *Client) Chat(
ctx context.Context,
request *v2.ChatRequest,
Expand All @@ -197,6 +201,9 @@ func (c *Client) Chat(
endpointURL := baseURL + "/v1/chat"

headers := core.MergeHeaders(c.header.Clone(), options.ToHeader())
if request.Accepts != nil {
headers.Add("Accepts", fmt.Sprintf("%v", request.Accepts))
}

errorDecoder := func(statusCode int, body io.Reader) error {
raw, err := io.ReadAll(body)
Expand Down
2 changes: 1 addition & 1 deletion core/request_option.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (r *RequestOptions) cloneHeader() http.Header {
headers := r.HTTPHeader.Clone()
headers.Set("X-Fern-Language", "Go")
headers.Set("X-Fern-SDK-Name", "github.com/cohere-ai/cohere-go/v2")
headers.Set("X-Fern-SDK-Version", "v2.11.0")
headers.Set("X-Fern-SDK-Version", "v2.11.1")
return headers
}

Expand Down
6 changes: 3 additions & 3 deletions finetuning/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func (b *BaseModel) String() string {
// The possible types of fine-tuned models.
//
// - BASE_TYPE_UNSPECIFIED: Unspecified model.
// - BASE_TYPE_GENERATIVE: Generative model.
// - BASE_TYPE_GENERATIVE: Deprecated: Generative model.
// - BASE_TYPE_CLASSIFICATION: Classification model.
// - BASE_TYPE_RERANK: Rerank model.
// - BASE_TYPE_CHAT: Chat model.
Expand Down Expand Up @@ -546,7 +546,7 @@ type Settings struct {
Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty" url:"hyperparameters,omitempty"`
// read-only. Whether the model is single-label or multi-label (only for classification).
MultiLabel *bool `json:"multi_label,omitempty" url:"multi_label,omitempty"`
// The Weights & Biases configuration.
// The Weights & Biases configuration (Chat fine-tuning only).
Wandb *WandbConfig `json:"wandb,omitempty" url:"wandb,omitempty"`

extraProperties map[string]interface{}
Expand Down Expand Up @@ -644,7 +644,7 @@ func (s Status) Ptr() *Status {
// The possible strategy used to serve a fine-tuned models.
//
// - STRATEGY_UNSPECIFIED: Unspecified strategy.
// - STRATEGY_VANILLA: Serve the fine-tuned model on a dedicated GPU.
// - STRATEGY_VANILLA: Deprecated: Serve the fine-tuned model on a dedicated GPU.
// - STRATEGY_TFEW: Serve the fine-tuned model on a shared GPU.
type Strategy string

Expand Down
12 changes: 12 additions & 0 deletions types.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
)

type ChatRequest struct {
// Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
Accepts *string `json:"-" url:"-"`
// Text input for the model to respond to.
//
// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
Expand Down Expand Up @@ -237,6 +239,8 @@ func (c *ChatRequest) MarshalJSON() ([]byte, error) {
}

type ChatStreamRequest struct {
// Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
Accepts *string `json:"-" url:"-"`
// Text input for the model to respond to.
//
// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
Expand Down Expand Up @@ -491,6 +495,10 @@ type DetokenizeRequest struct {
type EmbedRequest struct {
// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
Texts []string `json:"texts,omitempty" url:"-"`
// An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
//
// The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.
Images []string `json:"images,omitempty" url:"-"`
// Defaults to embed-english-v2.0
//
// The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
Expand Down Expand Up @@ -3174,13 +3182,15 @@ func (e *EmbedFloatsResponse) String() string {
// - `"search_query"`: Used for embeddings of search queries run against a vector DB to find relevant documents.
// - `"classification"`: Used for embeddings passed through a text classifier.
// - `"clustering"`: Used for the embeddings run through a clustering algorithm.
// - `"image"`: Used for embeddings with image input.
type EmbedInputType string

const (
EmbedInputTypeSearchDocument EmbedInputType = "search_document"
EmbedInputTypeSearchQuery EmbedInputType = "search_query"
EmbedInputTypeClassification EmbedInputType = "classification"
EmbedInputTypeClustering EmbedInputType = "clustering"
EmbedInputTypeImage EmbedInputType = "image"
)

func NewEmbedInputTypeFromString(s string) (EmbedInputType, error) {
Expand All @@ -3193,6 +3203,8 @@ func NewEmbedInputTypeFromString(s string) (EmbedInputType, error) {
return EmbedInputTypeClassification, nil
case "clustering":
return EmbedInputTypeClustering, nil
case "image":
return EmbedInputTypeImage, nil
}
var t EmbedInputType
return "", fmt.Errorf("%s is not a valid %T", s, t)
Expand Down

0 comments on commit 0ee1044

Please sign in to comment.