Skip to content

Commit

Permalink
feat: Add support for Structured Outputs in openai_dart (#525)
Browse files Browse the repository at this point in the history
  • Loading branch information
davidmigloz authored Aug 17, 2024
1 parent 79db87a commit c757407
Show file tree
Hide file tree
Showing 30 changed files with 3,911 additions and 1,770 deletions.
51 changes: 48 additions & 3 deletions packages/openai_dart/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ await for (final res in stream) {
// 789
```

**Multi-modal prompt:**
**Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision))

```dart
final res = await client.createChatCompletion(
Expand Down Expand Up @@ -198,7 +198,7 @@ print(res.choices.first.message.content);
// The fruit in the image is an apple.
```

**JSON mode:**
**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode))

```dart
final res = await client.createChatCompletion(
Expand Down Expand Up @@ -227,7 +227,52 @@ final res = await client.createChatCompletion(
// { "names": ["John", "Mary", "Peter"] }
```

**Tools:**
**Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))**

```dart
final res = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: ChatCompletionModel.model(
ChatCompletionModels.gpt4oMini,
),
messages: [
ChatCompletionMessage.system(
content:
'You are a helpful assistant. That extracts names from text.',
),
ChatCompletionMessage.user(
content: ChatCompletionUserMessageContent.string(
'John, Mary, and Peter.',
),
),
],
temperature: 0,
responseFormat: ResponseFormat.jsonSchema(
jsonSchema: JsonSchemaObject(
name: 'Names',
description: 'A list of names',
strict: true,
schema: {
'type': 'object',
'properties': {
'names': {
'type': 'array',
'items': {
'type': 'string',
},
},
},
'additionalProperties': false,
'required': ['names'],
},
),
),
),
);
// {"names":["John","Mary","Peter"]}
```

**Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling))

```dart
const function = FunctionObject(
Expand Down
27 changes: 13 additions & 14 deletions packages/openai_dart/lib/src/generated/schema/assistant_object.dart
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,10 @@ class AssistantObject with _$AssistantObject {
/// We generally recommend altering this or temperature but not both.
@JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP,

/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// Specifies the format that the model must output. Compatible with
/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
/// since `gpt-4o-mini-1106`.
/// since `gpt-3.5-turbo-1106`.
///
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees
/// the model will match your supplied JSON schema. Learn more in the
Expand Down Expand Up @@ -177,8 +178,6 @@ enum AssistantObjectObject {

/// `auto` is the default value
enum AssistantResponseFormatMode {
@JsonValue('none')
none,
@JsonValue('auto')
auto,
}
Expand All @@ -187,9 +186,10 @@ enum AssistantResponseFormatMode {
// CLASS: AssistantObjectResponseFormat
// ==========================================

/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// Specifies the format that the model must output. Compatible with
/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
/// since `gpt-4o-mini-1106`.
/// since `gpt-3.5-turbo-1106`.
///
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees
/// the model will match your supplied JSON schema. Learn more in the
Expand All @@ -209,14 +209,14 @@ sealed class AssistantObjectResponseFormat
const AssistantObjectResponseFormat._();

/// `auto` is the default value
const factory AssistantObjectResponseFormat.enumeration(
const factory AssistantObjectResponseFormat.mode(
AssistantResponseFormatMode value,
) = AssistantObjectResponseFormatEnumeration;

/// No Description
const factory AssistantObjectResponseFormat.assistantsResponseFormat(
AssistantsResponseFormat value,
) = AssistantObjectResponseFormatAssistantsResponseFormat;
const factory AssistantObjectResponseFormat.responseFormat(
ResponseFormat value,
) = AssistantObjectResponseFormatResponseFormat;

/// Object construction from a JSON representation
factory AssistantObjectResponseFormat.fromJson(Map<String, dynamic> json) =>
Expand All @@ -243,8 +243,8 @@ class _AssistantObjectResponseFormatConverter
}
if (data is Map<String, dynamic>) {
try {
return AssistantObjectResponseFormatAssistantsResponseFormat(
AssistantsResponseFormat.fromJson(data),
return AssistantObjectResponseFormatResponseFormat(
ResponseFormat.fromJson(data),
);
} catch (e) {}
}
Expand All @@ -258,8 +258,7 @@ class _AssistantObjectResponseFormatConverter
return switch (data) {
AssistantObjectResponseFormatEnumeration(value: final v) =>
_$AssistantResponseFormatModeEnumMap[v]!,
AssistantObjectResponseFormatAssistantsResponseFormat(value: final v) =>
v.toJson(),
AssistantObjectResponseFormatResponseFormat(value: final v) => v.toJson(),
null => null,
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class AssistantToolsFileSearchFileSearch
/// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive.
///
/// Note that the file search tool may output fewer than `max_num_results` results. See the [file search
/// tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information.
/// tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) for more information.
@JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults,
}) = _AssistantToolsFileSearchFileSearch;

Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs {
/// Factory constructor for ChatCompletionLogprobs
const factory ChatCompletionLogprobs({
/// A list of message content tokens with log probability information.
required List<ChatCompletionTokenLogprob>? content,
@JsonKey(includeIfNull: false) List<ChatCompletionTokenLogprob>? content,
}) = _ChatCompletionLogprobs;

/// Object construction from a JSON representation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ sealed class ChatCompletionMessage with _$ChatCompletionMessage {
/// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.
@JsonKey(includeIfNull: false) String? content,

/// The refusal message by the assistant.
@JsonKey(includeIfNull: false) String? refusal,

/// An optional name for the participant. Provides the model information to differentiate between participants of the same role.
@JsonKey(includeIfNull: false) String? name,

Expand Down Expand Up @@ -140,7 +143,7 @@ sealed class ChatCompletionUserMessageContent
List<ChatCompletionMessageContentPart> value,
) = ChatCompletionMessageContentParts;

/// The text contents of the message.
/// The text contents of the user message.
const factory ChatCompletionUserMessageContent.string(
String value,
) = ChatCompletionUserMessageContentString;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ sealed class ChatCompletionMessageContentPart
// UNION: ChatCompletionMessageContentPartText
// ------------------------------------------

/// A text content part of a user message.
/// A text content part of a message.
const factory ChatCompletionMessageContentPart.text({
/// The type of the content part, in this case `text`.
@Default(ChatCompletionMessageContentPartType.text)
Expand All @@ -32,8 +32,7 @@ sealed class ChatCompletionMessageContentPart
// UNION: ChatCompletionMessageContentPartImage
// ------------------------------------------

/// Union constructor for [ChatCompletionMessageContentPartImage]
@FreezedUnionValue('image_url')
/// An image content part of a user message.
const factory ChatCompletionMessageContentPart.image({
/// The type of the content part, in this case `image_url`.
@Default(ChatCompletionMessageContentPartType.imageUrl)
Expand All @@ -43,6 +42,20 @@ sealed class ChatCompletionMessageContentPart
@JsonKey(name: 'image_url') required ChatCompletionMessageImageUrl imageUrl,
}) = ChatCompletionMessageContentPartImage;

// ------------------------------------------
// UNION: ChatCompletionMessageContentPartRefusal
// ------------------------------------------

/// A refusal content part of a message.
const factory ChatCompletionMessageContentPart.refusal({
/// The type of the content part, in this case `refusal`.
@Default(ChatCompletionMessageContentPartType.refusal)
ChatCompletionMessageContentPartType type,

/// The refusal message generated by the model.
required String refusal,
}) = ChatCompletionMessageContentPartRefusal;

/// Object construction from a JSON representation
factory ChatCompletionMessageContentPart.fromJson(
Map<String, dynamic> json) =>
Expand All @@ -58,6 +71,8 @@ enum ChatCompletionMessageContentPartEnumType {
text,
@JsonValue('image_url')
imageUrl,
@JsonValue('refusal')
refusal,
}

// ==========================================
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,6 @@ enum ChatCompletionMessageContentPartType {
text,
@JsonValue('image_url')
imageUrl,
@JsonValue('refusal')
refusal,
}
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class ChatCompletionStreamResponseChoiceLogprobs
/// Factory constructor for ChatCompletionStreamResponseChoiceLogprobs
const factory ChatCompletionStreamResponseChoiceLogprobs({
/// A list of message content tokens with log probability information.
required List<ChatCompletionTokenLogprob>? content,
@JsonKey(includeIfNull: false) List<ChatCompletionTokenLogprob>? content,
}) = _ChatCompletionStreamResponseChoiceLogprobs;

/// Object construction from a JSON representation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ class ChatCompletionStreamResponseDelta
/// The contents of the chunk message.
@JsonKey(includeIfNull: false) String? content,

/// The refusal message generated by the model.
@JsonKey(includeIfNull: false) String? refusal,

/// The name and arguments of a function that should be called, as generated by the model.
@JsonKey(name: 'function_call', includeIfNull: false)
ChatCompletionStreamMessageFunctionCall? functionCall,
Expand All @@ -43,6 +46,7 @@ class ChatCompletionStreamResponseDelta
/// List of all property names of schema
static const List<String> propertyNames = [
'content',
'refusal',
'function_call',
'tool_calls',
'role'
Expand All @@ -57,6 +61,7 @@ class ChatCompletionStreamResponseDelta
Map<String, dynamic> toMap() {
return {
'content': content,
'refusal': refusal,
'function_call': functionCall,
'tool_calls': toolCalls,
'role': role,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,10 @@ class CreateAssistantRequest with _$CreateAssistantRequest {
/// We generally recommend altering this or temperature but not both.
@JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP,

/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// Specifies the format that the model must output. Compatible with
/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
/// since `gpt-4o-mini-1106`.
/// since `gpt-3.5-turbo-1106`.
///
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees
/// the model will match your supplied JSON schema. Learn more in the
Expand Down Expand Up @@ -263,8 +264,6 @@ class _AssistantModelConverter

/// `auto` is the default value
enum CreateAssistantResponseFormatMode {
@JsonValue('none')
none,
@JsonValue('auto')
auto,
}
Expand All @@ -273,9 +272,10 @@ enum CreateAssistantResponseFormatMode {
// CLASS: CreateAssistantRequestResponseFormat
// ==========================================

/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// Specifies the format that the model must output. Compatible with
/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
/// since `gpt-4o-mini-1106`.
/// since `gpt-3.5-turbo-1106`.
///
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees
/// the model will match your supplied JSON schema. Learn more in the
Expand All @@ -300,9 +300,9 @@ sealed class CreateAssistantRequestResponseFormat
) = CreateAssistantRequestResponseFormatEnumeration;

/// No Description
const factory CreateAssistantRequestResponseFormat.format(
AssistantsResponseFormat value,
) = CreateAssistantRequestResponseFormatAssistantsResponseFormat;
const factory CreateAssistantRequestResponseFormat.responseFormat(
ResponseFormat value,
) = CreateAssistantRequestResponseFormatResponseFormat;

/// Object construction from a JSON representation
factory CreateAssistantRequestResponseFormat.fromJson(
Expand Down Expand Up @@ -332,8 +332,8 @@ class _CreateAssistantRequestResponseFormatConverter
}
if (data is Map<String, dynamic>) {
try {
return CreateAssistantRequestResponseFormatAssistantsResponseFormat(
AssistantsResponseFormat.fromJson(data),
return CreateAssistantRequestResponseFormatResponseFormat(
ResponseFormat.fromJson(data),
);
} catch (e) {}
}
Expand All @@ -347,9 +347,7 @@ class _CreateAssistantRequestResponseFormatConverter
return switch (data) {
CreateAssistantRequestResponseFormatEnumeration(value: final v) =>
_$CreateAssistantResponseFormatModeEnumMap[v]!,
CreateAssistantRequestResponseFormatAssistantsResponseFormat(
value: final v
) =>
CreateAssistantRequestResponseFormatResponseFormat(value: final v) =>
v.toJson(),
null => null,
};
Expand Down
Loading

0 comments on commit c757407

Please sign in to comment.