From 1e4ddd6b849a5f55d3f4ffdfd4e44993a5e6dfb3 Mon Sep 17 00:00:00 2001 From: Travis Fischer Date: Wed, 1 Mar 2023 23:32:13 -0600 Subject: [PATCH] fix: openai types --- package.json | 1 - pnpm-lock.yaml | 66 ----------- src/chatgpt-api.ts | 2 +- src/types.ts | 271 ++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 254 insertions(+), 86 deletions(-) diff --git a/package.json b/package.json index 04e9fa382..831f4dbb0 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,6 @@ "husky": "^8.0.2", "lint-staged": "^13.1.2", "npm-run-all": "^4.1.5", - "openai": "^3.2.1", "ora": "^6.1.2", "prettier": "^2.8.4", "tsup": "^6.6.3", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5605a8072..9bc099dfb 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -15,7 +15,6 @@ specifiers: keyv: ^4.5.2 lint-staged: ^13.1.2 npm-run-all: ^4.1.5 - openai: ^3.2.1 ora: ^6.1.2 p-timeout: ^6.1.1 prettier: ^2.8.4 @@ -49,7 +48,6 @@ devDependencies: husky: 8.0.3 lint-staged: 13.1.2 npm-run-all: 4.1.5 - openai: 3.2.1 ora: 6.1.2 prettier: 2.8.4 tsup: 6.6.3_typescript@4.9.5 @@ -563,10 +561,6 @@ packages: engines: {node: '>=8'} dev: true - /asynckit/0.4.0: - resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} - dev: true - /atomically/2.0.1: resolution: {integrity: sha512-sxBhVZUFBFhqSAsYMM3X2oaUi2NVDJ8U026FsIusM8gYXls9AYs/eXzgGrufs1Qjpkxi9zunds+75QUFz+m7UQ==} dependencies: @@ -579,14 +573,6 @@ packages: engines: {node: '>= 0.4'} dev: true - /axios/0.26.1: - resolution: {integrity: sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==} - dependencies: - follow-redirects: 1.15.2 - transitivePeerDependencies: - - debug - dev: true - /balanced-match/1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} dev: true @@ -783,13 +769,6 @@ packages: resolution: {integrity: sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ==} dev: true - /combined-stream/1.0.8: - resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} - engines: {node: '>= 0.8'} - dependencies: - delayed-stream: 1.0.0 - dev: true - /commander/4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} @@ -912,11 +891,6 @@ packages: slash: 4.0.0 dev: true - /delayed-stream/1.0.0: - resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} - engines: {node: '>=0.4.0'} - dev: true - /denque/2.1.0: resolution: {integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==} engines: {node: '>=0.10'} @@ -1144,31 +1118,12 @@ packages: path-exists: 5.0.0 dev: false - /follow-redirects/1.15.2: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} - engines: {node: '>=4.0'} - peerDependencies: - debug: '*' - peerDependenciesMeta: - debug: - optional: true - dev: true - /for-each/0.3.3: resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} dependencies: is-callable: 1.2.7 dev: true - /form-data/4.0.0: - resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} - engines: {node: '>= 6'} - dependencies: - asynckit: 0.4.0 - combined-stream: 1.0.8 - mime-types: 2.1.35 - dev: true - /fs.realpath/1.0.0: resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} dev: true @@ -1837,18 +1792,6 @@ packages: picomatch: 2.3.1 dev: true - /mime-db/1.52.0: - resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} - engines: {node: '>= 0.6'} - dev: true - - /mime-types/2.1.35: - resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} - engines: {node: '>= 0.6'} - dependencies: - mime-db: 1.52.0 - dev: true - /mimic-fn/2.1.0: resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} engines: {node: '>=6'} @@ -2006,15 +1949,6 @@ packages: mimic-fn: 4.0.0 dev: true - /openai/3.2.1: - resolution: {integrity: sha512-762C9BNlJPbjjlWZi4WYK9iM2tAVAv0uUp1UmI34vb0CN5T2mjB/qM6RYBmNKMh/dN9fC+bxqPwWJZUTWW052A==} - dependencies: - axios: 0.26.1 - form-data: 4.0.0 - transitivePeerDependencies: - - debug - dev: true - /ora/6.1.2: resolution: {integrity: sha512-EJQ3NiP5Xo94wJXIzAyOtSb0QEIAUu7m8t6UZ9krbz0vAJqr92JpcK/lEXg91q6B9pEGqrykkd2EQplnifDSBw==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} diff --git a/src/chatgpt-api.ts b/src/chatgpt-api.ts index 5f8f3ae57..53302cca8 100644 --- a/src/chatgpt-api.ts +++ b/src/chatgpt-api.ts @@ -227,7 +227,7 @@ export class ChatGPTAPI { } try { - const response: types.CreateChatCompletionDeltaResponse = + const response: types.openai.CreateChatCompletionDeltaResponse = JSON.parse(data) if (response.id) { diff --git a/src/types.ts b/src/types.ts index 12e4239cb..23beb1f64 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,7 +1,3 @@ -import type * as openai from 'openai' - -export { openai } - export type Role = 'user' | 'assistant' | 'system' export type FetchFn = typeof fetch @@ -144,19 +140,258 @@ export type MessageContent = { export type MessageMetadata = any -export interface CreateChatCompletionDeltaResponse { - id: string - object: 'chat.completion.chunk' - created: number - model: string - choices: [ - { - delta: { - role: Role - content?: string +export namespace openai { + export interface CreateChatCompletionDeltaResponse { + id: string + object: 'chat.completion.chunk' + created: number + model: string + choices: [ + { + delta: { + role: Role + content?: string + } + index: number + finish_reason: string | null } - index: number - finish_reason: string | null - } - ] + ] + } + + /** + * + * @export + * @interface ChatCompletionRequestMessage + */ + export interface ChatCompletionRequestMessage { + /** + * The role of the author of this message. + * @type {string} + * @memberof ChatCompletionRequestMessage + */ + role: ChatCompletionRequestMessageRoleEnum + /** + * The contents of the message + * @type {string} + * @memberof ChatCompletionRequestMessage + */ + content: string + /** + * The name of the user in a multi-user chat + * @type {string} + * @memberof ChatCompletionRequestMessage + */ + name?: string + } + export declare const ChatCompletionRequestMessageRoleEnum: { + readonly System: 'system' + readonly User: 'user' + readonly Assistant: 'assistant' + } + export declare type ChatCompletionRequestMessageRoleEnum = + (typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum] + /** + * + * @export + * @interface ChatCompletionResponseMessage + */ + export interface ChatCompletionResponseMessage { + /** + * The role of the author of this message. + * @type {string} + * @memberof ChatCompletionResponseMessage + */ + role: ChatCompletionResponseMessageRoleEnum + /** + * The contents of the message + * @type {string} + * @memberof ChatCompletionResponseMessage + */ + content: string + } + export declare const ChatCompletionResponseMessageRoleEnum: { + readonly System: 'system' + readonly User: 'user' + readonly Assistant: 'assistant' + } + export declare type ChatCompletionResponseMessageRoleEnum = + (typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum] + /** + * + * @export + * @interface CreateChatCompletionRequest + */ + export interface CreateChatCompletionRequest { + /** + * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. + * @type {string} + * @memberof CreateChatCompletionRequest + */ + model: string + /** + * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). + * @type {Array} + * @memberof CreateChatCompletionRequest + */ + messages: Array + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + * @type {number} + * @memberof CreateChatCompletionRequest + */ + temperature?: number | null + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + * @type {number} + * @memberof CreateChatCompletionRequest + */ + top_p?: number | null + /** + * How many chat completion choices to generate for each input message. + * @type {number} + * @memberof CreateChatCompletionRequest + */ + n?: number | null + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. + * @type {boolean} + * @memberof CreateChatCompletionRequest + */ + stream?: boolean | null + /** + * + * @type {CreateChatCompletionRequestStop} + * @memberof CreateChatCompletionRequest + */ + stop?: CreateChatCompletionRequestStop + /** + * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + * @type {number} + * @memberof CreateChatCompletionRequest + */ + max_tokens?: number + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * @type {number} + * @memberof CreateChatCompletionRequest + */ + presence_penalty?: number | null + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * @type {number} + * @memberof CreateChatCompletionRequest + */ + frequency_penalty?: number | null + /** + * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * @type {object} + * @memberof CreateChatCompletionRequest + */ + logit_bias?: object | null + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * @type {string} + * @memberof CreateChatCompletionRequest + */ + user?: string + } + /** + * @type CreateChatCompletionRequestStop + * Up to 4 sequences where the API will stop generating further tokens. + * @export + */ + export declare type CreateChatCompletionRequestStop = Array | string + /** + * + * @export + * @interface CreateChatCompletionResponse + */ + export interface CreateChatCompletionResponse { + /** + * + * @type {string} + * @memberof CreateChatCompletionResponse + */ + id: string + /** + * + * @type {string} + * @memberof CreateChatCompletionResponse + */ + object: string + /** + * + * @type {number} + * @memberof CreateChatCompletionResponse + */ + created: number + /** + * + * @type {string} + * @memberof CreateChatCompletionResponse + */ + model: string + /** + * + * @type {Array} + * @memberof CreateChatCompletionResponse + */ + choices: Array + /** + * + * @type {CreateCompletionResponseUsage} + * @memberof CreateChatCompletionResponse + */ + usage?: CreateCompletionResponseUsage + } + /** + * + * @export + * @interface CreateChatCompletionResponseChoicesInner + */ + export interface CreateChatCompletionResponseChoicesInner { + /** + * + * @type {number} + * @memberof CreateChatCompletionResponseChoicesInner + */ + index?: number + /** + * + * @type {ChatCompletionResponseMessage} + * @memberof CreateChatCompletionResponseChoicesInner + */ + message?: ChatCompletionResponseMessage + /** + * + * @type {string} + * @memberof CreateChatCompletionResponseChoicesInner + */ + finish_reason?: string + } + /** + * + * @export + * @interface CreateCompletionResponseUsage + */ + export interface CreateCompletionResponseUsage { + /** + * + * @type {number} + * @memberof CreateCompletionResponseUsage + */ + prompt_tokens: number + /** + * + * @type {number} + * @memberof CreateCompletionResponseUsage + */ + completion_tokens: number + /** + * + * @type {number} + * @memberof CreateCompletionResponseUsage + */ + total_tokens: number + } }