Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
l-winston committed Sep 25, 2024
1 parent 3f1af62 commit 498d335
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 20 deletions.
18 changes: 9 additions & 9 deletions chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,15 +210,15 @@ type ChatCompletionRequest struct {
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
Seed *int `json:"seed,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
Seed *int `json:"seed,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
Expand Down
20 changes: 10 additions & 10 deletions chat_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,16 +101,16 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "log_probs_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
LogProbs: true,
Model: openai.O1Preview,
LogProbs: true,
Model: openai.O1Preview,
},
expectedError: openai.ErrO1BetaLimitationsLogprobs,
},
{
name: "message_type_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Expand All @@ -123,7 +123,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "tool_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Expand All @@ -144,7 +144,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "set_temperature_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Expand All @@ -161,7 +161,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "set_top_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Expand All @@ -179,7 +179,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "set_n_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Expand All @@ -198,7 +198,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "set_presence_penalty_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Expand All @@ -215,7 +215,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
name: "set_frequency_penalty_unsupported",
in: openai.ChatCompletionRequest{
MaxCompletionTokens: 1000,
Model: openai.O1Mini,
Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Expand Down Expand Up @@ -296,7 +296,7 @@ func TestO1ModelChatCompletions(t *testing.T) {
defer teardown()
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
Model: openai.O1Preview,
Model: openai.O1Preview,
MaxCompletionTokens: 1000,
Messages: []openai.ChatCompletionMessage{
{
Expand Down
2 changes: 1 addition & 1 deletion completion.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
)

var (
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll
ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll
ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll
Expand Down

0 comments on commit 498d335

Please sign in to comment.