From 60a970455cd4b5c9ca310a246421cc1fdb8d2e9c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:55:10 +0100 Subject: [PATCH] feat(api): updates (#874) --- .stats.yml | 2 +- src/resources/batches.ts | 6 +- src/resources/beta/assistants.ts | 99 +++++++++++++++++++ src/resources/beta/threads/threads.ts | 90 +++++++++++++++++ .../beta/vector-stores/file-batches.ts | 47 +++++++++ src/resources/beta/vector-stores/files.ts | 90 +++++++++++++++++ .../beta/vector-stores/vector-stores.ts | 43 ++++++++ src/resources/chat/completions.ts | 6 +- src/resources/files.ts | 12 ++- src/resources/fine-tuning/jobs/jobs.ts | 5 + src/resources/shared.ts | 8 +- tests/api-resources/beta/assistants.test.ts | 4 +- .../beta/threads/threads.test.ts | 12 ++- .../beta/vector-stores/file-batches.test.ts | 5 +- .../beta/vector-stores/files.test.ts | 5 +- 15 files changed, 416 insertions(+), 18 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2e5c705a0..11d2b0b18 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 399c931e1..d23c059dc 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -37,7 +37,9 @@ export class Batches extends APIResource { } /** - * Cancels an in-progress batch. + * Cancels an in-progress batch. The batch will be in status `cancelling` for up to + * 10 minutes, before changing to `cancelled`, where it will have partial results + * (if any) available in the output file. */ cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/batches/${batchId}/cancel`, options); @@ -228,7 +230,7 @@ export interface BatchCreateParams { * for how to upload a file. * * Your input file must be formatted as a - * [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + * [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), * and must be uploaded with the purpose `batch`. The file can contain up to 50,000 * requests, and can be up to 100 MB in size. */ diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 120e63773..cdea09266 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -258,6 +258,7 @@ export type AssistantStreamEvent = | AssistantStreamEvent.ThreadRunInProgress | AssistantStreamEvent.ThreadRunRequiresAction | AssistantStreamEvent.ThreadRunCompleted + | AssistantStreamEvent.ThreadRunIncomplete | AssistantStreamEvent.ThreadRunFailed | AssistantStreamEvent.ThreadRunCancelling | AssistantStreamEvent.ThreadRunCancelled @@ -362,6 +363,20 @@ export namespace AssistantStreamEvent { event: 'thread.run.completed'; } + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * ends with status `incomplete`. + */ + export interface ThreadRunIncomplete { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.incomplete'; + } + /** * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) * fails. @@ -618,6 +633,30 @@ export interface FileSearchTool { * The type of tool being defined: `file_search` */ type: 'file_search'; + + /** + * Overrides for the file search tool. + */ + file_search?: FileSearchTool.FileSearch; +} + +export namespace FileSearchTool { + /** + * Overrides for the file search tool. + */ + export interface FileSearch { + /** + * The maximum number of results the file search tool should output. The default is + * 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 + * and 50 inclusive. + * + * Note that the file search tool may output fewer than `max_num_results` results. + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + * for more information. + */ + max_num_results?: number; + } } export interface FunctionTool { @@ -843,6 +882,7 @@ export type RunStreamEvent = | RunStreamEvent.ThreadRunInProgress | RunStreamEvent.ThreadRunRequiresAction | RunStreamEvent.ThreadRunCompleted + | RunStreamEvent.ThreadRunIncomplete | RunStreamEvent.ThreadRunFailed | RunStreamEvent.ThreadRunCancelling | RunStreamEvent.ThreadRunCancelled @@ -919,6 +959,20 @@ export namespace RunStreamEvent { event: 'thread.run.completed'; } + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * ends with status `incomplete`. + */ + export interface ThreadRunIncomplete { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.incomplete'; + } + /** * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) * fails. @@ -1140,6 +1194,12 @@ export namespace AssistantCreateParams { export namespace FileSearch { export interface VectorStore { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: VectorStore.Auto | VectorStore.Static; + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -1155,6 +1215,45 @@ export namespace AssistantCreateParams { */ metadata?: unknown; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index bbcb92450..fa2208fcb 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -345,6 +345,12 @@ export namespace ThreadCreateParams { export namespace FileSearch { export interface VectorStore { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: VectorStore.Auto | VectorStore.Static; + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -360,6 +366,45 @@ export namespace ThreadCreateParams { */ metadata?: unknown; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -687,6 +732,12 @@ export namespace ThreadCreateAndRunParams { export namespace FileSearch { export interface VectorStore { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: VectorStore.Auto | VectorStore.Static; + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -702,6 +753,45 @@ export namespace ThreadCreateAndRunParams { */ metadata?: unknown; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index ba48c4d16..0c7f7566f 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -157,6 +157,53 @@ export interface FileBatchCreateParams { * files. */ file_ids: Array; + + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: + | FileBatchCreateParams.AutoChunkingStrategyRequestParam + | FileBatchCreateParams.StaticChunkingStrategyRequestParam; +} + +export namespace FileBatchCreateParams { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface AutoChunkingStrategyRequestParam { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface StaticChunkingStrategyRequestParam { + static: StaticChunkingStrategyRequestParam.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace StaticChunkingStrategyRequestParam { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } export interface FileBatchListFilesParams extends CursorPageParams { diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index 466fcee0f..4e5551d8c 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -130,6 +130,11 @@ export interface VectorStoreFile { * attached to. */ vector_store_id: string; + + /** + * The strategy used to chunk the file. + */ + chunking_strategy?: VectorStoreFile.Static | VectorStoreFile.Other; } export namespace VectorStoreFile { @@ -148,6 +153,44 @@ export namespace VectorStoreFile { */ message: string; } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + + /** + * This is returned when the chunking strategy is unknown. Typically, this is + * because the file was indexed before the `chunking_strategy` concept was + * introduced in the API. + */ + export interface Other { + /** + * Always `other`. + */ + type: 'other'; + } } export interface VectorStoreFileDeleted { @@ -165,6 +208,53 @@ export interface FileCreateParams { * files. */ file_id: string; + + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: + | FileCreateParams.AutoChunkingStrategyRequestParam + | FileCreateParams.StaticChunkingStrategyRequestParam; +} + +export namespace FileCreateParams { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface AutoChunkingStrategyRequestParam { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface StaticChunkingStrategyRequestParam { + static: StaticChunkingStrategyRequestParam.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace StaticChunkingStrategyRequestParam { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } export interface FileListParams extends CursorPageParams { diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 3f5df1fc5..d2d4c7d39 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -200,6 +200,12 @@ export interface VectorStoreDeleted { } export interface VectorStoreCreateParams { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + chunking_strategy?: VectorStoreCreateParams.Auto | VectorStoreCreateParams.Static; + /** * The expiration policy for a vector store. */ @@ -227,6 +233,43 @@ export interface VectorStoreCreateParams { } export namespace VectorStoreCreateParams { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + /** * The expiration policy for a vector store. */ diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 07b75debe..cbf7bcc2c 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -137,7 +137,7 @@ export interface ChatCompletionAssistantMessageParam { * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. */ - function_call?: ChatCompletionAssistantMessageParam.FunctionCall; + function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null; /** * An optional name for the participant. Provides the model information to @@ -885,8 +885,8 @@ export namespace ChatCompletionCreateParams { /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - * for examples, and the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * diff --git a/src/resources/files.ts b/src/resources/files.ts index 7f36fc368..0d8ca0797 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -19,9 +19,15 @@ export class Files extends APIResource { * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for * details. * - * The Fine-tuning API only supports `.jsonl` files. + * The Fine-tuning API only supports `.jsonl` files. The input also has certain + * required formats for fine-tuning + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * models. * - * The Batch API only supports `.jsonl` files up to 100 MB in size. + * The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + * has a specific required + * [format](https://platform.openai.com/docs/api-reference/batch/request-input). * * Please [contact us](https://help.openai.com/) if you need to increase these * storage limits. @@ -166,7 +172,7 @@ export interface FileCreateParams { * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). */ - purpose: 'assistants' | 'batch' | 'fine-tune'; + purpose: 'assistants' | 'batch' | 'fine-tune' | 'vision'; } export interface FileListParams { diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 403e0069f..12990c6fc 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -312,6 +312,11 @@ export interface JobCreateParams { * Your dataset must be formatted as a JSONL file. Additionally, you must upload * your file with the purpose `fine-tune`. * + * The contents of the file should differ depending on if the model uses the + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * format. + * * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) * for more details. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 93fa05fa4..45969ea65 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -25,8 +25,8 @@ export interface FunctionDefinition { /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - * for examples, and the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * @@ -37,8 +37,8 @@ export interface FunctionDefinition { /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - * for examples, and the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 56ce8446a..4049f09b3 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -33,7 +33,9 @@ describe('resource assistants', () => { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + vector_stores: [ + { file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, metadata: {} }, + ], }, }, tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 4c4256258..ebc78f357 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -132,7 +132,13 @@ describe('resource threads', () => { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + vector_stores: [ + { + file_ids: ['string', 'string', 'string'], + chunking_strategy: { type: 'auto' }, + metadata: {}, + }, + ], }, }, }, @@ -310,7 +316,9 @@ describe('resource threads', () => { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + vector_stores: [ + { file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, metadata: {} }, + ], }, }, metadata: {}, diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/beta/vector-stores/file-batches.test.ts index 782b33a0c..b8ff697b7 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/beta/vector-stores/file-batches.test.ts @@ -23,7 +23,10 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); + const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { + file_ids: ['string'], + chunking_strategy: { type: 'auto' }, + }); }); test('retrieve', async () => { diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/beta/vector-stores/files.test.ts index 03340753c..60906dac3 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/beta/vector-stores/files.test.ts @@ -21,7 +21,10 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' }); + const response = await openai.beta.vectorStores.files.create('vs_abc123', { + file_id: 'string', + chunking_strategy: { type: 'auto' }, + }); }); test('retrieve', async () => {