Skip to content

Commit

Permalink
feat(chat-models)!: Move all model config options to ChatVertexAIOpti…
Browse files Browse the repository at this point in the history
…ons (#242)
  • Loading branch information
davidmigloz authored Nov 21, 2023
1 parent a714882 commit 89bef8a
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 26 deletions.
18 changes: 18 additions & 0 deletions packages/langchain_google/lib/src/chat_models/models/models.dart
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import 'package:langchain/langchain.dart';
class ChatVertexAIOptions extends ChatModelOptions {
/// {@macro chat_vertex_ai_options}
const ChatVertexAIOptions({
this.publisher = 'google',
this.model = 'chat-bison',
this.maxOutputTokens = 1024,
this.temperature = 0.2,
this.topP = 0.95,
Expand All @@ -15,6 +17,22 @@ class ChatVertexAIOptions extends ChatModelOptions {
this.examples,
});

/// The publisher of the model.
///
/// Use `google` for first-party models.
final String publisher;

/// The text model to use.
///
/// To use the latest model version, specify the model name without a version
/// number (e.g. `chat-bison`).
/// To use a stable model version, specify the model version number
/// (e.g. `chat-bison@001`).
///
/// You can find a list of available models here:
/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models
final String model;

/// Maximum number of tokens that can be generated in the response. A token
/// is approximately four characters. 100 tokens correspond to roughly
/// 60-80 words.
Expand Down
24 changes: 3 additions & 21 deletions packages/langchain_google/lib/src/chat_models/vertex_ai.dart
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,6 @@ class ChatVertexAI extends BaseChatModel<ChatVertexAIOptions> {
required final String project,
final String location = 'us-central1',
final String? rootUrl,
this.publisher = 'google',
this.model = 'chat-bison',
this.defaultOptions = const ChatVertexAIOptions(),
}) : client = VertexAIGenAIClient(
httpClient: httpClient,
Expand All @@ -129,22 +127,6 @@ class ChatVertexAI extends BaseChatModel<ChatVertexAIOptions> {
/// A client for interacting with Vertex AI API.
final VertexAIGenAIClient client;

/// The publisher of the model.
///
/// Use `google` for first-party models.
final String publisher;

/// The text model to use.
///
/// To use the latest model version, specify the model name without a version
/// number (e.g. `chat-bison`).
/// To use a stable model version, specify the model version number
/// (e.g. `chat-bison@001`).
///
/// You can find a list of available models here:
/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models
final String model;

/// The default options to use when calling the model.
final ChatVertexAIOptions defaultOptions;

Expand Down Expand Up @@ -181,8 +163,8 @@ class ChatVertexAI extends BaseChatModel<ChatVertexAIOptions> {
context: context,
examples: examples,
messages: vertexMessages,
publisher: publisher,
model: model,
publisher: options?.publisher ?? defaultOptions.publisher,
model: options?.model ?? defaultOptions.model,
parameters: VertexAITextChatModelRequestParams(
maxOutputTokens:
options?.maxOutputTokens ?? defaultOptions.maxOutputTokens,
Expand All @@ -194,7 +176,7 @@ class ChatVertexAI extends BaseChatModel<ChatVertexAIOptions> {
options?.candidateCount ?? defaultOptions.candidateCount,
),
);
return result.toChatResult(id, model);
return result.toChatResult(id, options?.model ?? defaultOptions.model);
}

/// Tokenizes the given prompt using tiktoken.
Expand Down
10 changes: 5 additions & 5 deletions packages/langchain_google/test/chat_models/vertex_ai_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ void main() async {
project: Platform.environment['VERTEX_AI_PROJECT_ID']!,
location: 'us-central1',
rootUrl: 'https://us-central1-aiplatform.googleapis.com/',
publisher: 'google',
model: 'text-bison@001',
defaultOptions: const ChatVertexAIOptions(
publisher: 'google',
model: 'chat-bison@001',
maxOutputTokens: 10,
temperature: 0.1,
topP: 0.1,
Expand All @@ -32,11 +32,11 @@ void main() async {
);
expect(llm.client.project, Platform.environment['VERTEX_AI_PROJECT_ID']);
expect(llm.client.location, 'us-central1');
expect(llm.publisher, 'google');
expect(llm.model, 'text-bison@001');
expect(
llm.defaultOptions,
const ChatVertexAIOptions(
publisher: 'google',
model: 'chat-bison@001',
maxOutputTokens: 10,
temperature: 0.1,
topP: 0.1,
Expand Down Expand Up @@ -83,7 +83,7 @@ void main() async {
[ChatMessage.humanText('Hello, how are you?')],
);
expect(res.modelOutput, isNotNull);
expect(res.modelOutput!['model'], chat.model);
expect(res.modelOutput!['model'], chat.defaultOptions.model);
expect(res.usage?.promptTokens, isNotNull);
expect(res.usage?.promptBillableCharacters, isNotNull);
expect(res.usage?.responseTokens, isNotNull);
Expand Down

0 comments on commit 89bef8a

Please sign in to comment.