Skip to content

Commit

Permalink
chore(docs): fix incorrect client var names (#955)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] authored Jul 26, 2024
1 parent 38fa3f8 commit ea4a68c
Show file tree
Hide file tree
Showing 24 changed files with 182 additions and 182 deletions.
32 changes: 16 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ const client = new OpenAI({
});

async function main() {
const chatCompletion = await openai.chat.completions.create({
const chatCompletion = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'gpt-3.5-turbo',
});
Expand All @@ -53,7 +53,7 @@ import OpenAI from 'openai';

const client = new OpenAI();

const stream = await openai.chat.completions.create({
const stream = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'gpt-3.5-turbo',
stream: true,
Expand Down Expand Up @@ -83,7 +83,7 @@ async function main() {
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'gpt-3.5-turbo',
};
const chatCompletion: OpenAI.Chat.ChatCompletion = await openai.chat.completions.create(params);
const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params);
}

main();
Expand All @@ -108,20 +108,20 @@ import OpenAI, { toFile } from 'openai';
const client = new OpenAI();

// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });
await client.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });

// Or if you have the web `File` API you can pass a `File` instance:
await openai.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });
await client.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });

// You can also pass a `fetch` `Response`:
await openai.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });
await client.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });

// Finally, if none of the above are convenient, you can use our `toFile` helper:
await openai.files.create({
await client.files.create({
file: await toFile(Buffer.from('my bytes'), 'input.jsonl'),
purpose: 'fine-tune',
});
await openai.files.create({
await client.files.create({
file: await toFile(new Uint8Array([0, 1, 2]), 'input.jsonl'),
purpose: 'fine-tune',
});
Expand All @@ -136,7 +136,7 @@ a subclass of `APIError` will be thrown:
<!-- prettier-ignore -->
```ts
async function main() {
const job = await openai.fineTuning.jobs
const job = await client.fineTuning.jobs
.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' })
.catch(async (err) => {
if (err instanceof OpenAI.APIError) {
Expand Down Expand Up @@ -181,7 +181,7 @@ const client = new OpenAI({
});

// Or, configure per-request:
await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
maxRetries: 5,
});
```
Expand All @@ -198,7 +198,7 @@ const client = new OpenAI({
});

// Override per-request:
await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
timeout: 5 * 1000,
});
```
Expand All @@ -216,7 +216,7 @@ You can use `for await … of` syntax to iterate through items across all pages:
async function fetchAllFineTuningJobs(params) {
const allFineTuningJobs = [];
// Automatically fetches more pages as needed.
for await (const fineTuningJob of openai.fineTuning.jobs.list({ limit: 20 })) {
for await (const fineTuningJob of client.fineTuning.jobs.list({ limit: 20 })) {
allFineTuningJobs.push(fineTuningJob);
}
return allFineTuningJobs;
Expand All @@ -226,7 +226,7 @@ async function fetchAllFineTuningJobs(params) {
Alternatively, you can make request a single page at a time:

```ts
let page = await openai.fineTuning.jobs.list({ limit: 20 });
let page = await client.fineTuning.jobs.list({ limit: 20 });
for (const fineTuningJob of page.data) {
console.log(fineTuningJob);
}
Expand All @@ -250,13 +250,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
```ts
const client = new OpenAI();

const response = await openai.chat.completions
const response = await client.chat.completions
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
.asResponse();
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object

const { data: chatCompletion, response: raw } = await openai.chat.completions
const { data: chatCompletion, response: raw } = await client.chat.completions
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
Expand Down Expand Up @@ -364,7 +364,7 @@ const client = new OpenAI({
});

// Override per-request:
await openai.models.list({
await client.models.list({
httpAgent: new http.Agent({ keepAlive: false }),
});
```
Expand Down
4 changes: 2 additions & 2 deletions tests/api-resources/audio/speech.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,15 @@

import OpenAI from 'openai';

const openai = new OpenAI({
const client = new OpenAI({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});

describe('resource speech', () => {
// binary tests are currently broken
test.skip('create: required and optional params', async () => {
const response = await openai.audio.speech.create({
const response = await client.audio.speech.create({
input: 'input',
model: 'string',
voice: 'alloy',
Expand Down
6 changes: 3 additions & 3 deletions tests/api-resources/audio/transcriptions.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import OpenAI, { toFile } from 'openai';
import { Response } from 'node-fetch';

const openai = new OpenAI({
const client = new OpenAI({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});

describe('resource transcriptions', () => {
test('create: only required params', async () => {
const responsePromise = openai.audio.transcriptions.create({
const responsePromise = client.audio.transcriptions.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
model: 'whisper-1',
});
Expand All @@ -24,7 +24,7 @@ describe('resource transcriptions', () => {
});

test('create: required and optional params', async () => {
const response = await openai.audio.transcriptions.create({
const response = await client.audio.transcriptions.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
model: 'whisper-1',
language: 'language',
Expand Down
6 changes: 3 additions & 3 deletions tests/api-resources/audio/translations.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import OpenAI, { toFile } from 'openai';
import { Response } from 'node-fetch';

const openai = new OpenAI({
const client = new OpenAI({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});

describe('resource translations', () => {
test('create: only required params', async () => {
const responsePromise = openai.audio.translations.create({
const responsePromise = client.audio.translations.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
model: 'whisper-1',
});
Expand All @@ -24,7 +24,7 @@ describe('resource translations', () => {
});

test('create: required and optional params', async () => {
const response = await openai.audio.translations.create({
const response = await client.audio.translations.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
model: 'whisper-1',
prompt: 'prompt',
Expand Down
20 changes: 10 additions & 10 deletions tests/api-resources/batches.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import OpenAI from 'openai';
import { Response } from 'node-fetch';

const openai = new OpenAI({
const client = new OpenAI({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});

describe('resource batches', () => {
test('create: only required params', async () => {
const responsePromise = openai.batches.create({
const responsePromise = client.batches.create({
completion_window: '24h',
endpoint: '/v1/chat/completions',
input_file_id: 'input_file_id',
Expand All @@ -25,7 +25,7 @@ describe('resource batches', () => {
});

test('create: required and optional params', async () => {
const response = await openai.batches.create({
const response = await client.batches.create({
completion_window: '24h',
endpoint: '/v1/chat/completions',
input_file_id: 'input_file_id',
Expand All @@ -34,7 +34,7 @@ describe('resource batches', () => {
});

test('retrieve', async () => {
const responsePromise = openai.batches.retrieve('batch_id');
const responsePromise = client.batches.retrieve('batch_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -46,13 +46,13 @@ describe('resource batches', () => {

test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(openai.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
await expect(client.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
OpenAI.NotFoundError,
);
});

test('list', async () => {
const responsePromise = openai.batches.list();
const responsePromise = client.batches.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -64,20 +64,20 @@ describe('resource batches', () => {

test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(openai.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
await expect(client.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
OpenAI.NotFoundError,
);
});

test('list: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
openai.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }),
client.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }),
).rejects.toThrow(OpenAI.NotFoundError);
});

test('cancel', async () => {
const responsePromise = openai.batches.cancel('batch_id');
const responsePromise = client.batches.cancel('batch_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -89,7 +89,7 @@ describe('resource batches', () => {

test('cancel: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(openai.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
await expect(client.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
OpenAI.NotFoundError,
);
});
Expand Down
22 changes: 11 additions & 11 deletions tests/api-resources/beta/assistants.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import OpenAI from 'openai';
import { Response } from 'node-fetch';

const openai = new OpenAI({
const client = new OpenAI({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});

describe('resource assistants', () => {
test('create: only required params', async () => {
const responsePromise = openai.beta.assistants.create({ model: 'gpt-4-turbo' });
const responsePromise = client.beta.assistants.create({ model: 'gpt-4-turbo' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -21,7 +21,7 @@ describe('resource assistants', () => {
});

test('create: required and optional params', async () => {
const response = await openai.beta.assistants.create({
const response = await client.beta.assistants.create({
model: 'gpt-4-turbo',
description: 'description',
instructions: 'instructions',
Expand All @@ -44,7 +44,7 @@ describe('resource assistants', () => {
});

test('retrieve', async () => {
const responsePromise = openai.beta.assistants.retrieve('assistant_id');
const responsePromise = client.beta.assistants.retrieve('assistant_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -57,12 +57,12 @@ describe('resource assistants', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
openai.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }),
client.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(OpenAI.NotFoundError);
});

test('update', async () => {
const responsePromise = openai.beta.assistants.update('assistant_id', {});
const responsePromise = client.beta.assistants.update('assistant_id', {});
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -73,7 +73,7 @@ describe('resource assistants', () => {
});

test('list', async () => {
const responsePromise = openai.beta.assistants.list();
const responsePromise = client.beta.assistants.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -85,23 +85,23 @@ describe('resource assistants', () => {

test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(openai.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
await expect(client.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
OpenAI.NotFoundError,
);
});

test('list: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
openai.beta.assistants.list(
client.beta.assistants.list(
{ after: 'after', before: 'before', limit: 0, order: 'asc' },
{ path: '/_stainless_unknown_path' },
),
).rejects.toThrow(OpenAI.NotFoundError);
});

test('del', async () => {
const responsePromise = openai.beta.assistants.del('assistant_id');
const responsePromise = client.beta.assistants.del('assistant_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -114,7 +114,7 @@ describe('resource assistants', () => {
test('del: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
openai.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }),
client.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(OpenAI.NotFoundError);
});
});
Loading

0 comments on commit ea4a68c

Please sign in to comment.