From 94e84e04651125d5004fe0a962e6338c9a7fa2c7 Mon Sep 17 00:00:00 2001 From: Tanner Kvarfordt Date: Sun, 14 Jan 2024 01:23:04 -0700 Subject: [PATCH] Deprecations and Fine Tuning (#12) * Updated model for completions test * Removed obsolete edits package * Added support for fine-tuning endpoint * Removed accidentally committed binary files * Updated .gitignore * Deprecated the completions endpoint --- .gitignore | 3 +- README.md | 4 +- common/common.go | 4 + completions/completions.go | 1 + completions/completions_test.go | 2 +- edits/README.md | 7 - edits/edits.go | 98 ------- edits/edits_test.go | 31 --- examples/edits/edits-example.go | 33 --- .../finetuning-example.go} | 4 +- finetunes/README.md | 9 - finetunes/finetunes.go | 255 ------------------ finetunes/finetunes_test.go | 26 -- finetuning/README.md | 9 + finetuning/finetuning.go | 241 +++++++++++++++++ finetuning/finetuning_test.go | 48 ++++ 16 files changed, 308 insertions(+), 467 deletions(-) delete mode 100644 edits/README.md delete mode 100644 edits/edits.go delete mode 100644 edits/edits_test.go delete mode 100644 examples/edits/edits-example.go rename examples/{finetunes/finetunes-example.go => finetuning/finetuning-example.go} (82%) delete mode 100644 finetunes/README.md delete mode 100644 finetunes/finetunes.go delete mode 100644 finetunes/finetunes_test.go create mode 100644 finetuning/README.md create mode 100644 finetuning/finetuning.go create mode 100644 finetuning/finetuning_test.go diff --git a/.gitignore b/.gitignore index 26e3bbd..8a116b7 100644 --- a/.gitignore +++ b/.gitignore @@ -27,10 +27,9 @@ dist/ examples/audio/audio examples/chat/chat examples/completions/completions -examples/edits/edits examples/embeddings/embeddings examples/files/files -examples/finetunes/finetunes +examples/finetuning/finetuning examples/images/images examples/models/models examples/moderations/moderations diff --git a/README.md b/README.md index cde7bce..eff8dd9 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,9 @@ The links below lead to examples of how to use each library package. - [x] [Audio](./audio/README.md) - [x] [Chat](./chat/README.md) - [x] [Completions](./completions/README.md) -- [x] ~~[Edits](./edits/README.md)~~ (Deprecated) - [x] [Embeddings](./embeddings/README.md) -- [ ] Fine-Tuning - [x] [Files](./files/README.md) -- [x] ~~[Fine-Tunes](./finetunes/README.md)~~ (Deprecated) +- [x] [Fine-Tuning](./finetuning/README.md) - [x] [Images](./images/README.md) - [x] [Models](./models/README.md) - [x] [Moderations](./moderations/README.md) diff --git a/common/common.go b/common/common.go index bc79690..c7df461 100644 --- a/common/common.go +++ b/common/common.go @@ -33,8 +33,12 @@ type responseErrorWrapper struct { type ResponseError struct { // The error message. Message string `json:"message"` + // The error type. Type string `json:"type"` + + // The parameter that was invalid. + Param string `json:"param"` } func (e *ResponseError) Error() string { diff --git a/completions/completions.go b/completions/completions.go index 8a76f42..52f6774 100644 --- a/completions/completions.go +++ b/completions/completions.go @@ -1,3 +1,4 @@ +// Deprecated: Replaced by Chat Completions. See https://platform.openai.com/docs/api-reference/completions. // Package completions provides bindings for the [completions] [endpoint]. // Given a prompt, the model will return one or more predicted completions, // and can also return the probabilities of alternative tokens at each position. diff --git a/completions/completions_test.go b/completions/completions_test.go index 152e015..3e6612a 100644 --- a/completions/completions_test.go +++ b/completions/completions_test.go @@ -18,7 +18,7 @@ func init() { func TestCompletions(t *testing.T) { resp, err := completions.MakeRequest(&completions.Request{ - Model: "text-ada-001", + Model: "gpt-3.5-turbo-instruct", Prompt: []string{"So long, and thanks for all the"}, MaxTokens: 5, Echo: true, diff --git a/edits/README.md b/edits/README.md deleted file mode 100644 index b76e869..0000000 --- a/edits/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Edits - -Bindings for the [edits](https://beta.openai.com/docs/api-reference/edits) [endpoint](https://api.openai.com/v1/edits). - -## Example - -See [edits-example.go](../examples/edits/edits-example.go). diff --git a/edits/edits.go b/edits/edits.go deleted file mode 100644 index e098662..0000000 --- a/edits/edits.go +++ /dev/null @@ -1,98 +0,0 @@ -// Deprecated: Replaced by GPT-3.5 Turbo. See https://community.openai.com/t/openai-deprecation-summary/289539 -// -// Package edits provides bindings for the [edits] [endpoint]. -// Given a prompt and an instruction, the model will return -// an edited version of the prompt. -// -// [edits]: https://beta.openai.com/docs/api-reference/edits -// [endpoint]: https://api.openai.com/v1/edits -package edits - -import ( - "errors" - "net/http" - - "github.com/Kardbord/gopenai/common" - "github.com/Kardbord/gopenai/moderations" -) - -const Endpoint = common.BaseURL + "edits" - -// Request structure for the edits API endpoint. -type Request struct { - // ID of the model to use. You can use the List models API - // to see all of your available models, or see our Model - // overview for descriptions of them. - Model string `json:"model"` - - // The input text to use as a starting point for the edit. - Input string `json:"input"` - - // The instruction that tells the model how to edit the prompt. - Instruction string `json:"instruction"` - - // How many edits to generate for the input and instruction. - N *int64 `json:"n"` - - // What sampling temperature to use. Higher values means the model - // will take more risks. Try 0.9 for more creative applications, - // and 0 (argmax sampling) for ones with a well-defined answer. - // We generally recommend altering this or top_p but not both. - Temperature *float64 `json:"temperature"` - - // An alternative to sampling with temperature, called nucleus sampling, - // where the model considers the results of the tokens with top_p - // probability mass. So 0.1 means only the tokens comprising the top 10% - // probability mass are considered. - // We generally recommend altering this or temperature but not both. - TopP *float64 `json:"top_p"` -} - -// Response structure for the edits API endpoint. -type Response struct { - Object string `json:"object"` - Created int64 `json:"created"` - Choices []struct { - Text string `json:"text"` - Index int64 `json:"index"` - } `json:"choices"` - Usage common.ResponseUsage `json:"usage"` - Error *common.ResponseError `json:"error,omitempty"` -} - -// Make an edits request. -func MakeRequest(request *Request, organizationID *string) (*Response, error) { - r, err := common.MakeRequest[Request, Response](request, Endpoint, http.MethodPost, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - if len(r.Choices) == 0 { - return r, errors.New("no choices in response") - } - return r, nil -} - -// Runs request inputs through the moderations endpoint prior to making the request. -// Returns a moderations.ModerationFlagError prior to making the request if the -// inputs are flagged by the moderations endpoint. -func MakeModeratedRequest(request *Request, organizationID *string) (*Response, *moderations.Response, error) { - modr, err := moderations.MakeModeratedRequest(&moderations.Request{ - Input: []string{request.Input}, - Model: moderations.ModelLatest, - }, organizationID) - if err != nil { - return nil, modr, err - } - - r, err := MakeRequest(request, organizationID) - if err != nil { - return nil, modr, err - } - return r, modr, nil -} diff --git a/edits/edits_test.go b/edits/edits_test.go deleted file mode 100644 index 00295db..0000000 --- a/edits/edits_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package edits_test - -import ( - "os" - "testing" - - "github.com/Kardbord/gopenai/authentication" - "github.com/Kardbord/gopenai/edits" - _ "github.com/joho/godotenv/autoload" -) - -const OpenAITokenEnv = "OPENAI_API_KEY" - -func init() { - key := os.Getenv(OpenAITokenEnv) - authentication.SetAPIKey(key) -} - -func TestEdits(t *testing.T) { - resp, err := edits.MakeRequest(&edits.Request{ - Model: "text-davinci-edit-001", - Input: "What day of the wek is it?", - Instruction: "Fix the spelling mistakes", - }, nil) - if err != nil { - t.Fatal(err) - } - if len(resp.Choices) < 1 { - t.Fatal("No choices received") - } -} diff --git a/examples/edits/edits-example.go b/examples/edits/edits-example.go deleted file mode 100644 index 5467a63..0000000 --- a/examples/edits/edits-example.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/Kardbord/gopenai/authentication" - "github.com/Kardbord/gopenai/edits" - _ "github.com/joho/godotenv/autoload" -) - -const OpenAITokenEnv = "OPENAI_API_KEY" - -func init() { - key := os.Getenv(OpenAITokenEnv) - authentication.SetAPIKey(key) -} - -func main() { - input := "What day of the wek is it?" - resp, _, err := edits.MakeModeratedRequest(&edits.Request{ - Model: "text-davinci-edit-001", - Input: input, - Instruction: "Fix the spelling mistakes", - }, nil) - if err != nil { - fmt.Println(err) - return - } - - fmt.Printf("Input=%s\n", input) - fmt.Printf("Result=%s\n", resp.Choices[0].Text) -} diff --git a/examples/finetunes/finetunes-example.go b/examples/finetuning/finetuning-example.go similarity index 82% rename from examples/finetunes/finetunes-example.go rename to examples/finetuning/finetuning-example.go index 95b5063..e5bc61c 100644 --- a/examples/finetunes/finetunes-example.go +++ b/examples/finetuning/finetuning-example.go @@ -5,7 +5,7 @@ import ( "os" "github.com/Kardbord/gopenai/authentication" - "github.com/Kardbord/gopenai/finetunes" + "github.com/Kardbord/gopenai/finetuning" _ "github.com/joho/godotenv/autoload" ) @@ -19,7 +19,7 @@ func init() { func main() { // TODO: build a more comprehensive example of how to use this endpoint. - resp, err := finetunes.MakeListRequest(nil) + resp, err := finetuning.MakeListRequest(nil, nil, nil) if err != nil { fmt.Println(err) return diff --git a/finetunes/README.md b/finetunes/README.md deleted file mode 100644 index 8a9f652..0000000 --- a/finetunes/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Fine-Tunes - -Bindings for the [fine-tunes](https://beta.openai.com/docs/api-reference/fine-tunes) [endpoint](https://api.openai.com/v1/fine-tunes). - -## Example - -See [Open AI's guide](https://beta.openai.com/docs/guides/fine-tuning) to fine-tunes. - -See [finetunes-example.go](../examples/finetunes/finetunes-example.go). diff --git a/finetunes/finetunes.go b/finetunes/finetunes.go deleted file mode 100644 index 9b309cb..0000000 --- a/finetunes/finetunes.go +++ /dev/null @@ -1,255 +0,0 @@ -// Deprecated: See https://platform.openai.com/docs/deprecations/2023-08-22-fine-tunes-endpoint -// -// Package finetunes provides bindings for the [finetunes] [endpoint]. -// Manage fine-tuning jobs to tailor a model to your specific training data. -// Related guide: [Fine-tune models]. -// -// [finetunes]: https://beta.openai.com/docs/api-reference/finetunes -// [endpoint]: https://api.openai.com/v1/finetunes -// [Fine-tune models]: https://beta.openai.com/docs/guides/fine-tuning -package finetunes - -import ( - "errors" - "fmt" - "net/http" - - "github.com/Kardbord/gopenai/common" - "github.com/Kardbord/gopenai/files" - "github.com/Kardbord/gopenai/models" -) - -const Endpoint = common.BaseURL + "fine-tunes" - -// Request structure for the "create" fine-tune endpoint. -type CreationRequest struct { - - // The ID of an uploaded file that contains training data. - // - // See [upload file] for how to upload a file. - // - // Your dataset must be formatted as a JSONL file, where each training example - // is a JSON object with the keys "prompt" and "completion". Additionally, you - // must upload your file with the purpose fine-tune. - // - // See the [fine-tuning guide] for more details. - // - // [upload file]: https://beta.openai.com/docs/api-reference/files/upload - // [fine-tuning guide]: https://beta.openai.com/docs/guides/fine-tuning/creating-training-data - TrainingFile string `json:"training_file,omitempty"` - // The ID of an uploaded file that contains validation data. - // - // If you provide this file, the data is used to generate validation metrics periodically - // during fine-tuning. These metrics can be viewed in the [fine-tuning results] file. - // Your train and validation data should be mutually exclusive. - // - // Your dataset must be formatted as a JSONL file, where each validation example is a - // JSON object with the keys "prompt" and "completion". Additionally, you must upload - // your file with the purpose fine-tune. - // - // See the [fine-tuning guide] for more details. - // - // [fine-tuning results]: https://beta.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model - // [fine-tuning guide]: https://beta.openai.com/docs/guides/fine-tuning/creating-training-data - ValidationFile *string `json:"validation_file,omitempty"` - // The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - // "davinci", or a fine-tuned model created after 2022-04-21. To learn more about these models, - // see the [Models] documentation. - // - // Defaults to "curie". - // - // [Models]: https://beta.openai.com/docs/models - Model *string `json:"model,omitempty"` - // The number of epochs to train the model for. An epoch refers to one full cycle through the - // training dataset. - NEpochs *uint64 `json:"n_epochs,omitempty"` - // The batch size to use for training. The batch size is the number of training examples used - // to train a single forward and backward pass. - // - // By default, the batch size will be dynamically configured to be ~0.2% of the number of - // examples in the training set, capped at 256 - in general, we've found that larger batch - // sizes tend to work better for larger datasets. - BatchSize *uint64 `json:"batch_size,omitempty"` - // The learning rate multiplier to use for training. The fine-tuning learning rate is the - // original learning rate used for pretraining multiplied by this value. - // - // By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - // batch_size (larger learning rates tend to perform better with larger batch sizes). - // We recommend experimenting with values in the range 0.02 to 0.2 to see what produces - // the best results. - LearningRateMultiplier *float64 `json:"learning_rate_multiplier,omitempty"` - // The weight to use for loss on the prompt tokens. This controls how much the model tries - // to learn to generate the prompt (as compared to the completion which always has a weight - // of 1.0), and can add a stabilizing effect to training when completions are short. - // - // If prompts are extremely long (relative to completions), it may make sense to reduce - // this weight so as to avoid over-prioritizing learning the prompt. - PromptLossWeight *float64 `json:"prompt_loss_weight,omitempty"` - // If set, we calculate classification-specific metrics such as accuracy and F-1 score using - // the validation set at the end of every epoch. These metrics can be viewed in the results file. - // - // In order to compute classification metrics, you must provide a validation_file. Additionally, - // you must specify classification_n_classes for multiclass classification or classification_positive_class - // for binary classification. - ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"` - // The number of classes in a classification task. This parameter is required for multiclass classification. - ClassificationNClasses *uint64 `json:"classification_n_classes,omitempty"` - // The positive class in binary classification. - // - // This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. - ClassificationPositiveClass *string `json:"classification_positive_class,omitempty"` - // A string of up to 40 characters that will be added to your fine-tuned model name. - // - // For example, a suffix of "custom-model-name" would produce a model name like - // ada:ft-your-org:custom-model-name-2022-02-15-04-21-04. - Suffix *string `json:"suffix,omitempty"` - // TODO: Add support for classification_betas: https://beta.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_betas - //ClassificationBetas -} - -type FineTuneEvent struct { - Object string `json:"object"` - CreatedAt uint64 `json:"created_at"` - Level string `json:"level"` - Message string `json:"message"` -} - -// Response structure for the "create" fine-tune endpoint. -type FineTune struct { - ID string `json:"id"` - Object string `json:"object"` - Model string `json:"model"` - CreatedAt uint64 `json:"created_at"` - Events []FineTuneEvent `json:"events"` - FineTunedModel string `json:"fine_tuned_model"` - Hyperparams struct { - BatchSize uint64 `json:"batch_size"` - LearningRateMultiplier float64 `json:"learning_rate_multiplier"` - NEpochs uint64 `json:"n_epochs"` - PromptLossWeight float64 `json:"prompt_loss_weight"` - } `json:"hyperparams"` - OrganizationID string `json:"organization_id"` - ResultsFiles []files.UploadedFile `json:"results_files"` - Status string `json:"status"` - ValidationFiles []files.UploadedFile `json:"validation_files"` - TrainingFiles []files.UploadedFile `json:"training_files"` - UpdatedAt uint64 `json:"updated_at"` - Error *common.ResponseError `json:"error,omitempty"` -} - -// Creates a job that fine-tunes a specified model from a given dataset. -// -// Response includes details of the enqueued job including job status and -// the name of the fine-tuned models once complete. -// -// [Learn more about Fine-tuning] -// -// [Learn more about Fine-tuning]: https://beta.openai.com/docs/guides/fine-tuning -func MakeCreationRequest(request *CreationRequest, organizationID *string) (*FineTune, error) { - r, err := common.MakeRequest[CreationRequest, FineTune](request, Endpoint, http.MethodPost, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - return r, nil -} - -type ListResponse struct { - Object string `json:"object"` - Data []FineTune `json:"data"` - Error *common.ResponseError `json:"error,omitempty"` -} - -// List your organization's fine-tuning jobs -func MakeListRequest(organizationID *string) (*ListResponse, error) { - r, err := common.MakeRequest[any, ListResponse](nil, Endpoint, http.MethodGet, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - return r, nil -} - -// Gets info about the fine-tune job. -func MakeRetrieveRequest(fineTuneID string, organizationID *string) (*FineTune, error) { - r, err := common.MakeRequest[any, FineTune](nil, fmt.Sprintf("%s/%s", Endpoint, fineTuneID), http.MethodGet, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - return r, nil -} - -// Immediately cancel a fine-tune job. -func MakeCancelRequest(fineTuneID string, organizationID *string) (*FineTune, error) { - r, err := common.MakeRequest[any, FineTune](nil, fmt.Sprintf("%s/%s/cancel", Endpoint, fineTuneID), http.MethodPost, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - return r, nil -} - -type ListEventsResponse struct { - Object string `json:"object"` - Data []FineTuneEvent `json:"data"` - Error *common.ResponseError `json:"error,omitempty"` -} - -// Get fine-grained status updates for a fine-tune job. -func MakeListEventsRequest(fineTuneID string, organizationID *string) (*ListEventsResponse, error) { - // TODO: support streaming: https://beta.openai.com/docs/api-reference/fine-tunes/events#fine-tunes/events-stream - r, err := common.MakeRequest[any, ListEventsResponse](nil, fmt.Sprintf("%s/%s/events", Endpoint, fineTuneID), http.MethodGet, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - return r, nil -} - -type DeleteResponse struct { - ID int64 `json:"id"` - Object string `json:"object"` - Deleted bool `json:"deleted"` - Error *common.ResponseError `json:"error,omitempty"` -} - -// Delete a fine-tuned model. You must have the Owner role in your organization. -func MakeDeleteRequest(fineTuneModel string, organizationID *string) (*DeleteResponse, error) { - r, err := common.MakeRequest[any, DeleteResponse](nil, fmt.Sprintf("%s/%s", models.Endpoint, fineTuneModel), http.MethodDelete, organizationID) - if err != nil { - return nil, err - } - if r == nil { - return nil, errors.New("nil response received") - } - if r.Error != nil { - return r, r.Error - } - return r, nil -} diff --git a/finetunes/finetunes_test.go b/finetunes/finetunes_test.go deleted file mode 100644 index 55e2c38..0000000 --- a/finetunes/finetunes_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package finetunes_test - -import ( - "os" - "testing" - - "github.com/Kardbord/gopenai/authentication" - "github.com/Kardbord/gopenai/finetunes" - _ "github.com/joho/godotenv/autoload" -) - -const OpenAITokenEnv = "OPENAI_API_KEY" - -func init() { - key := os.Getenv(OpenAITokenEnv) - authentication.SetAPIKey(key) -} - -func TestFinetunes(t *testing.T) { - // TODO: build a more comprehensive tests for the finetunes endpoint. - - _, err := finetunes.MakeListRequest(nil) - if err != nil { - t.Fatal(err) - } -} diff --git a/finetuning/README.md b/finetuning/README.md new file mode 100644 index 0000000..62dbe20 --- /dev/null +++ b/finetuning/README.md @@ -0,0 +1,9 @@ +# Fine-Tuning + +Bindings for the [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) [endpoint](https://api.openai.com/v1/fine_tuning/jobs). + +## Example + +See [Open AI's guide](https://platform.openai.com/docs/api-reference/fine-tuning) to fine-tunes. + +See [finetunes-example.go](../examples/finetuning/finetuning-example.go). diff --git a/finetuning/finetuning.go b/finetuning/finetuning.go new file mode 100644 index 0000000..403bf75 --- /dev/null +++ b/finetuning/finetuning.go @@ -0,0 +1,241 @@ +// Package fine-tuning provides bindings for the [finetunes] [endpoint]. +// Manage fine-tuning jobs to tailor a model to your specific training data. +// Related guide: [Fine-tune models]. +// +// [finetunes]: https://platform.openai.com/docs/api-reference/fine-tuning +// [endpoint]: https://api.openai.com/v1/fine_tuning/jobs +// [Fine-tune models]: https://platform.openai.com/docs/guides/fine-tuning +package finetuning + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Kardbord/gopenai/common" + "github.com/Kardbord/gopenai/models" +) + +const Endpoint = common.BaseURL + "fine_tuning/jobs" + +type HyperParameters struct { + // Number of examples in each batch. A larger batch size means that model + // parameters are updated less frequently, but with lower variance. + BatchSize uint64 `json:"batch_size"` + + // Scaling factor for the learning rate. A smaller learning rate may be + // useful to avoid overfitting. + LearningRateMultiplier float64 `json:"learning_rate_multiplier"` + + // The number of epochs to train the model for. An epoch refers to one + // full cycle through the training dataset. + NEpochs uint64 `json:"n_epochs"` +} + +// Request structure for the "create" fine-tune endpoint. +type CreationRequest struct { + // The name of the model to fine-tune. You can select one of the supported [models]. + // + // [models]: https://platform.openai.com/docs/models/overview + Model string `json:"model,omitempty"` + + // The ID of an uploaded file that contains training data. + // + // See [upload file] for how to upload a file. + // + // Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose fine-tune. + // + // See the [fine-tuning guide] for more details. + // + // [upload file]: https://beta.openai.com/docs/api-reference/files/upload + // [fine-tuning guide]: https://beta.openai.com/docs/guides/fine-tuning/creating-training-data + TrainingFile string `json:"training_file,omitempty"` + + // The hyperparameters used for the fine-tuning job. + Hyperparams HyperParameters `json:"hyperparameters"` + + // A string of up to 18 characters that will be added to your fine-tuned model name. + // + // For example, a suffix of "custom-model-name" would produce a model name like + // ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel. + Suffix *string `json:"suffix,omitempty"` + + // The ID of an uploaded file that contains validation data. + // + // If you provide this file, the data is used to generate validation metrics periodically + // during fine-tuning. These metrics can be viewed in the fine-tuning results file. The + // same data should not be present in both train and validation files. + // + // Your dataset must be formatted as a JSONL file. You must upload your file with the + // purpose fine-tune. + // + // See the [fine-tuning guide] for more details. + // + // [fine-tuning results]: https://beta.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model + // [fine-tuning guide]: https://beta.openai.com/docs/guides/fine-tuning/creating-training-data + ValidationFile *string `json:"validation_file,omitempty"` +} + +type FineTuneEvent struct { + ID string `json:"id"` + CreatedAt uint64 `json:"created_at"` + Level string `json:"level"` + Message string `json:"message"` + Object string `json:"object"` + Type string `json:"type"` +} + +// Response structure for the "create" fine-tune endpoint. +type FineTune struct { + ID string `json:"id"` + CreatedAt uint64 `json:"created_at"` + Error *common.ResponseError `json:"error,omitempty"` + FineTunedModel string `json:"fine_tuned_model"` + FinishedAt uint64 `json:"finished_at"` + Hyperparams HyperParameters `json:"hyperparameters"` + Model string `json:"model"` + Object string `json:"object"` + OrganizationID string `json:"organization_id"` + ResultFiles []string `json:"result_files"` + Status string `json:"status"` + TrainedTokens uint64 `json:"trained_tokens"` + TrainingFile string `json:"training_file"` + ValidationFile string `json:"validation_file"` +} + +// Creates a job that fine-tunes a specified model from a given dataset. +// +// Response includes details of the enqueued job including job status and +// the name of the fine-tuned models once complete. +// +// [Learn more about Fine-tuning] +// +// [Learn more about Fine-tuning]: https://beta.openai.com/docs/guides/fine-tuning +func MakeCreationRequest(request *CreationRequest, organizationID *string) (*FineTune, error) { + r, err := common.MakeRequest[CreationRequest, FineTune](request, Endpoint, http.MethodPost, organizationID) + if err != nil { + return nil, err + } + if r == nil { + return nil, errors.New("nil response received") + } + if r.Error != nil { + return r, r.Error + } + return r, nil +} + +type ListResponse struct { + Object string `json:"object"` + Data []FineTune `json:"data"` + Error *common.ResponseError `json:"error,omitempty"` + HasMore bool `json:"has_more"` +} + +// List your organization's fine-tuning jobs +func MakeListRequest(limit *uint64, after, organizationID *string) (*ListResponse, error) { + endpoint := Endpoint + if after != nil && limit != nil { + endpoint = fmt.Sprintf("%s?after=%s&limit=%d", endpoint, *after, *limit) + } else if after != nil { + endpoint = fmt.Sprintf("%s?after=%s", endpoint, *after) + } else if limit != nil { + endpoint = fmt.Sprintf("%s?limit=%d", endpoint, *limit) + } + r, err := common.MakeRequest[any, ListResponse](nil, endpoint, http.MethodGet, organizationID) + if err != nil { + return nil, err + } + if r == nil { + return nil, errors.New("nil response received") + } + if r.Error != nil { + return r, r.Error + } + return r, nil +} + +// Gets info about the fine-tune job. +func MakeRetrieveRequest(fineTuneID string, organizationID *string) (*FineTune, error) { + r, err := common.MakeRequest[any, FineTune](nil, fmt.Sprintf("%s/%s", Endpoint, fineTuneID), http.MethodGet, organizationID) + if err != nil { + return nil, err + } + if r == nil { + return nil, errors.New("nil response received") + } + if r.Error != nil { + return r, r.Error + } + return r, nil +} + +// Immediately cancel a fine-tune job. +func MakeCancelRequest(fineTuneID string, organizationID *string) (*FineTune, error) { + r, err := common.MakeRequest[any, FineTune](nil, fmt.Sprintf("%s/%s/cancel", Endpoint, fineTuneID), http.MethodPost, organizationID) + if err != nil { + return nil, err + } + if r == nil { + return nil, errors.New("nil response received") + } + if r.Error != nil { + return r, r.Error + } + return r, nil +} + +type ListEventsResponse struct { + Object string `json:"object"` + Data []FineTuneEvent `json:"data"` + Error *common.ResponseError `json:"error,omitempty"` + HasMore bool `json:"has_more"` +} + +// Get fine-grained status updates for a fine-tune job. +func MakeListEventsRequest(fineTuneID string, limit *uint64, after, organizationID *string) (*ListEventsResponse, error) { + // TODO: support streaming: https://beta.openai.com/docs/api-reference/fine-tunes/events#fine-tunes/events-stream + + endpoint := fmt.Sprintf("%s/%s/events", Endpoint, fineTuneID) + if after != nil && limit != nil { + endpoint = fmt.Sprintf("%s?after=%s&limit=%d", endpoint, *after, *limit) + } else if after != nil { + endpoint = fmt.Sprintf("%s?after=%s", endpoint, *after) + } else if limit != nil { + endpoint = fmt.Sprintf("%s?limit=%d", endpoint, *limit) + } + + r, err := common.MakeRequest[any, ListEventsResponse](nil, endpoint, http.MethodGet, organizationID) + if err != nil { + return nil, err + } + if r == nil { + return nil, errors.New("nil response received") + } + if r.Error != nil { + return r, r.Error + } + return r, nil +} + +type DeleteResponse struct { + ID int64 `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + Error *common.ResponseError `json:"error,omitempty"` +} + +// Delete a fine-tuned model. You must have the Owner role in your organization. +func MakeDeleteRequest(fineTuneModel string, organizationID *string) (*DeleteResponse, error) { + r, err := common.MakeRequest[any, DeleteResponse](nil, fmt.Sprintf("%s/%s", models.Endpoint, fineTuneModel), http.MethodDelete, organizationID) + if err != nil { + return nil, err + } + if r == nil { + return nil, errors.New("nil response received") + } + if r.Error != nil { + return r, r.Error + } + return r, nil +} diff --git a/finetuning/finetuning_test.go b/finetuning/finetuning_test.go new file mode 100644 index 0000000..bcd2cd1 --- /dev/null +++ b/finetuning/finetuning_test.go @@ -0,0 +1,48 @@ +package finetuning_test + +import ( + "os" + "testing" + + "github.com/Kardbord/gopenai/authentication" + "github.com/Kardbord/gopenai/finetuning" + _ "github.com/joho/godotenv/autoload" +) + +const OpenAITokenEnv = "OPENAI_API_KEY" + +func init() { + key := os.Getenv(OpenAITokenEnv) + authentication.SetAPIKey(key) +} + +func TestFinetunes(t *testing.T) { + // TODO: build more comprehensive tests for the finetunes endpoint. + limit := uint64(2) + after := "ft-event-TjX0lMfOniCZX64t9PUQT5hn" + + { // Simple request + _, err := finetuning.MakeListRequest(nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } + { // After + _, err := finetuning.MakeListRequest(nil, &after, nil) + if err != nil { + t.Fatal(err) + } + } + { // Limit + _, err := finetuning.MakeListRequest(&limit, nil, nil) + if err != nil { + t.Fatal(err) + } + } + { // After and Limit + _, err := finetuning.MakeListRequest(&limit, &after, nil) + if err != nil { + t.Fatal(err) + } + } +}