Skip to content

Commit

Permalink
feat: Sync OpenAI API (#347)
Browse files Browse the repository at this point in the history
- Removed `instance_id` in `CreateChatCompletionRequest`
- Increased `top_logprobs` max limit to 20
- Handle `invalid_prompt` error in Assistant runs
- Updated some descriptions
  • Loading branch information
davidmigloz authored Mar 27, 2024
1 parent 0d73aee commit f296eef
Show file tree
Hide file tree
Showing 12 changed files with 366 additions and 159 deletions.
2 changes: 1 addition & 1 deletion packages/openai_dart/lib/src/generated/client.dart
Original file line number Diff line number Diff line change
Expand Up @@ -663,7 +663,7 @@ class OpenAIClient {
// METHOD: createModeration
// ------------------------------------------

/// Classifies if text violates OpenAI's Content Policy.
/// Classifies if text is potentially harmful.
///
/// `request`: Request object for the Create moderation endpoint.
///
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class ChatCompletionTokenLogprob with _$ChatCompletionTokenLogprob {
/// The token.
required String token,

/// The log probability of this token.
/// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
required double logprob,

/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class ChatCompletionTokenTopLogprob with _$ChatCompletionTokenTopLogprob {
/// The token.
required String token,

/// The log probability of this token.
/// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
required double logprob,

/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
@Default(0.0)
double? frequencyPenalty,

/// An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance.
@JsonKey(name: 'instance_id', includeIfNull: false) String? instanceId,

/// Modify the likelihood of specified tokens appearing in the completion.
///
/// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
Expand All @@ -40,7 +37,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
/// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model.
@JsonKey(includeIfNull: false) bool? logprobs,

/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
/// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
@JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs,

/// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion.
Expand Down Expand Up @@ -132,7 +129,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
'model',
'messages',
'frequency_penalty',
'instance_id',
'logit_bias',
'logprobs',
'top_logprobs',
Expand All @@ -157,7 +153,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
static const frequencyPenaltyMinValue = -2.0;
static const frequencyPenaltyMaxValue = 2.0;
static const topLogprobsMinValue = 0;
static const topLogprobsMaxValue = 5;
static const topLogprobsMaxValue = 20;
static const nDefaultValue = 1;
static const nMinValue = 1;
static const nMaxValue = 128;
Expand Down Expand Up @@ -220,7 +216,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
'model': model,
'messages': messages,
'frequency_penalty': frequencyPenalty,
'instance_id': instanceId,
'logit_bias': logitBias,
'logprobs': logprobs,
'top_logprobs': topLogprobs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class CreateImageRequest with _$CreateImageRequest {
/// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`.
@Default(ImageQuality.standard) ImageQuality quality,

/// The format in which the generated images are returned. Must be one of `url` or `b64_json`.
/// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated.
@JsonKey(
name: 'response_format',
includeIfNull: false,
Expand Down Expand Up @@ -194,7 +194,7 @@ enum ImageQuality {
// ENUM: ImageResponseFormat
// ==========================================

/// The format in which the generated images are returned. Must be one of `url` or `b64_json`.
/// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated.
enum ImageResponseFormat {
@JsonValue('url')
url,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ part of open_a_i_schema;
// CLASS: CreateModerationResponse
// ==========================================

/// Represents policy compliance report by OpenAI's content moderation model against a given input.
/// Represents if a given text input is potentially harmful.
@freezed
class CreateModerationResponse with _$CreateModerationResponse {
const CreateModerationResponse._();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class Moderation with _$Moderation {

/// Factory constructor for Moderation
const factory Moderation({
/// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies).
/// Whether any of the below categories are flagged.
required bool flagged,

/// A list of the categories, and whether they are flagged or not.
Expand Down
6 changes: 4 additions & 2 deletions packages/openai_dart/lib/src/generated/schema/run_object.dart
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ class RunLastError with _$RunLastError {

/// Factory constructor for RunLastError
const factory RunLastError({
/// One of `server_error` or `rate_limit_exceeded`.
/// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
required RunLastErrorCode code,

/// A human-readable description of the error.
Expand Down Expand Up @@ -294,10 +294,12 @@ class RunSubmitToolOutputs with _$RunSubmitToolOutputs {
// ENUM: RunLastErrorCode
// ==========================================

/// One of `server_error` or `rate_limit_exceeded`.
/// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
enum RunLastErrorCode {
@JsonValue('server_error')
serverError,
@JsonValue('rate_limit_exceeded')
rateLimitExceeded,
@JsonValue('invalid_prompt')
invalidPrompt,
}
Loading

0 comments on commit f296eef

Please sign in to comment.