Skip to content

Commit

Permalink
feat(api): updates (#874)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot authored Jun 3, 2024
1 parent 2c51038 commit 60a9704
Show file tree
Hide file tree
Showing 15 changed files with 416 additions and 18 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 64
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml
6 changes: 4 additions & 2 deletions src/resources/batches.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ export class Batches extends APIResource {
}

/**
* Cancels an in-progress batch.
* Cancels an in-progress batch. The batch will be in status `cancelling` for up to
* 10 minutes, before changing to `cancelled`, where it will have partial results
* (if any) available in the output file.
*/
cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {
return this._client.post(`/batches/${batchId}/cancel`, options);
Expand Down Expand Up @@ -228,7 +230,7 @@ export interface BatchCreateParams {
* for how to upload a file.
*
* Your input file must be formatted as a
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
* and must be uploaded with the purpose `batch`. The file can contain up to 50,000
* requests, and can be up to 100 MB in size.
*/
Expand Down
99 changes: 99 additions & 0 deletions src/resources/beta/assistants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ export type AssistantStreamEvent =
| AssistantStreamEvent.ThreadRunInProgress
| AssistantStreamEvent.ThreadRunRequiresAction
| AssistantStreamEvent.ThreadRunCompleted
| AssistantStreamEvent.ThreadRunIncomplete
| AssistantStreamEvent.ThreadRunFailed
| AssistantStreamEvent.ThreadRunCancelling
| AssistantStreamEvent.ThreadRunCancelled
Expand Down Expand Up @@ -362,6 +363,20 @@ export namespace AssistantStreamEvent {
event: 'thread.run.completed';
}

/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
* ends with status `incomplete`.
*/
export interface ThreadRunIncomplete {
/**
* Represents an execution run on a
* [thread](https://platform.openai.com/docs/api-reference/threads).
*/
data: RunsAPI.Run;

event: 'thread.run.incomplete';
}

/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
* fails.
Expand Down Expand Up @@ -618,6 +633,30 @@ export interface FileSearchTool {
* The type of tool being defined: `file_search`
*/
type: 'file_search';

/**
* Overrides for the file search tool.
*/
file_search?: FileSearchTool.FileSearch;
}

export namespace FileSearchTool {
/**
* Overrides for the file search tool.
*/
export interface FileSearch {
/**
* The maximum number of results the file search tool should output. The default is
* 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
* and 50 inclusive.
*
* Note that the file search tool may output fewer than `max_num_results` results.
* See the
* [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned)
* for more information.
*/
max_num_results?: number;
}
}

export interface FunctionTool {
Expand Down Expand Up @@ -843,6 +882,7 @@ export type RunStreamEvent =
| RunStreamEvent.ThreadRunInProgress
| RunStreamEvent.ThreadRunRequiresAction
| RunStreamEvent.ThreadRunCompleted
| RunStreamEvent.ThreadRunIncomplete
| RunStreamEvent.ThreadRunFailed
| RunStreamEvent.ThreadRunCancelling
| RunStreamEvent.ThreadRunCancelled
Expand Down Expand Up @@ -919,6 +959,20 @@ export namespace RunStreamEvent {
event: 'thread.run.completed';
}

/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
* ends with status `incomplete`.
*/
export interface ThreadRunIncomplete {
/**
* Represents an execution run on a
* [thread](https://platform.openai.com/docs/api-reference/threads).
*/
data: RunsAPI.Run;

event: 'thread.run.incomplete';
}

/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
* fails.
Expand Down Expand Up @@ -1140,6 +1194,12 @@ export namespace AssistantCreateParams {

export namespace FileSearch {
export interface VectorStore {
/**
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
* strategy.
*/
chunking_strategy?: VectorStore.Auto | VectorStore.Static;

/**
* A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
* add to the vector store. There can be a maximum of 10000 files in a vector
Expand All @@ -1155,6 +1215,45 @@ export namespace AssistantCreateParams {
*/
metadata?: unknown;
}

export namespace VectorStore {
/**
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
* `800` and `chunk_overlap_tokens` of `400`.
*/
export interface Auto {
/**
* Always `auto`.
*/
type: 'auto';
}

export interface Static {
static: Static.Static;

/**
* Always `static`.
*/
type: 'static';
}

export namespace Static {
export interface Static {
/**
* The number of tokens that overlap between chunks. The default value is `400`.
*
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
*/
chunk_overlap_tokens: number;

/**
* The maximum number of tokens in each chunk. The default value is `800`. The
* minimum value is `100` and the maximum value is `4096`.
*/
max_chunk_size_tokens: number;
}
}
}
}
}
}
Expand Down
90 changes: 90 additions & 0 deletions src/resources/beta/threads/threads.ts
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,12 @@ export namespace ThreadCreateParams {

export namespace FileSearch {
export interface VectorStore {
/**
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
* strategy.
*/
chunking_strategy?: VectorStore.Auto | VectorStore.Static;

/**
* A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
* add to the vector store. There can be a maximum of 10000 files in a vector
Expand All @@ -360,6 +366,45 @@ export namespace ThreadCreateParams {
*/
metadata?: unknown;
}

export namespace VectorStore {
/**
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
* `800` and `chunk_overlap_tokens` of `400`.
*/
export interface Auto {
/**
* Always `auto`.
*/
type: 'auto';
}

export interface Static {
static: Static.Static;

/**
* Always `static`.
*/
type: 'static';
}

export namespace Static {
export interface Static {
/**
* The number of tokens that overlap between chunks. The default value is `400`.
*
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
*/
chunk_overlap_tokens: number;

/**
* The maximum number of tokens in each chunk. The default value is `800`. The
* minimum value is `100` and the maximum value is `4096`.
*/
max_chunk_size_tokens: number;
}
}
}
}
}
}
Expand Down Expand Up @@ -687,6 +732,12 @@ export namespace ThreadCreateAndRunParams {

export namespace FileSearch {
export interface VectorStore {
/**
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
* strategy.
*/
chunking_strategy?: VectorStore.Auto | VectorStore.Static;

/**
* A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
* add to the vector store. There can be a maximum of 10000 files in a vector
Expand All @@ -702,6 +753,45 @@ export namespace ThreadCreateAndRunParams {
*/
metadata?: unknown;
}

export namespace VectorStore {
/**
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
* `800` and `chunk_overlap_tokens` of `400`.
*/
export interface Auto {
/**
* Always `auto`.
*/
type: 'auto';
}

export interface Static {
static: Static.Static;

/**
* Always `static`.
*/
type: 'static';
}

export namespace Static {
export interface Static {
/**
* The number of tokens that overlap between chunks. The default value is `400`.
*
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
*/
chunk_overlap_tokens: number;

/**
* The maximum number of tokens in each chunk. The default value is `800`. The
* minimum value is `100` and the maximum value is `4096`.
*/
max_chunk_size_tokens: number;
}
}
}
}
}
}
Expand Down
47 changes: 47 additions & 0 deletions src/resources/beta/vector-stores/file-batches.ts
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,53 @@ export interface FileBatchCreateParams {
* files.
*/
file_ids: Array<string>;

/**
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
* strategy.
*/
chunking_strategy?:
| FileBatchCreateParams.AutoChunkingStrategyRequestParam
| FileBatchCreateParams.StaticChunkingStrategyRequestParam;
}

export namespace FileBatchCreateParams {
/**
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
* `800` and `chunk_overlap_tokens` of `400`.
*/
export interface AutoChunkingStrategyRequestParam {
/**
* Always `auto`.
*/
type: 'auto';
}

export interface StaticChunkingStrategyRequestParam {
static: StaticChunkingStrategyRequestParam.Static;

/**
* Always `static`.
*/
type: 'static';
}

export namespace StaticChunkingStrategyRequestParam {
export interface Static {
/**
* The number of tokens that overlap between chunks. The default value is `400`.
*
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
*/
chunk_overlap_tokens: number;

/**
* The maximum number of tokens in each chunk. The default value is `800`. The
* minimum value is `100` and the maximum value is `4096`.
*/
max_chunk_size_tokens: number;
}
}
}

export interface FileBatchListFilesParams extends CursorPageParams {
Expand Down
Loading

0 comments on commit 60a9704

Please sign in to comment.