From 0f32a5f228ca891b01736d9f9386713eeaeef331 Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Mon, 15 Jul 2024 14:53:26 -0700 Subject: [PATCH 1/5] Remove internal-only property from countTokens sample (#210) --- samples/count_tokens.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/samples/count_tokens.js b/samples/count_tokens.js index d0fc41241..7aec5b183 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -43,8 +43,6 @@ async function tokensTextOnly() { ); console.log(countResult.totalTokens); // 11 - console.log(countResult.contentTokens[0]); - // { partTokens: [ 10 ], roleTokens: 1 } const generateResult = await model.generateContent( "The quick brown fox jumps over the lazy dog.", From 9eabe6cef8471babfd120e3f2f2ba79dc2b393d7 Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Mon, 22 Jul 2024 06:14:20 -0700 Subject: [PATCH 2/5] Fix sample comment describing video/audio processing (#212) Comment now reads: "A video or audio file is converted to tokens at a fixed rate of tokens per second." --- samples/count_tokens.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/samples/count_tokens.js b/samples/count_tokens.js index 7aec5b183..99a98ae7b 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -230,7 +230,8 @@ async function tokensMultimodalVideoAudioFileApi() { // Call `countTokens` to get the input token count // of the combined text and file (`totalTokens`). - // An video or audio file's display or file size does not affect its token count. + // A video or audio file is converted to tokens at a fixed rate of tokens + // per second. // Optionally, you can call `countTokens` for the text and file separately. const countResult = await model.countTokens([prompt, videoPart]); From d2d42ca5985535c80bee31bca02c6f95e9829bf8 Mon Sep 17 00:00:00 2001 From: DellaBitta Date: Mon, 22 Jul 2024 10:27:39 -0400 Subject: [PATCH 3/5] AbortSignal support (#144) Add `SingleRequestOptions` with `AbortSignal` support to most of the asynchronous methods of: - `GenerativeModel` - `GoogleAIFileManager` - `ChatSession` **NOTE:** `AbortSignal` is a client-only operation. Using it to cancel an operation will not cancel the request in the service. You will still be charged usage for any applicable operations. Some methods are not supported as they would leave the server in an ambiguous state, namely `GoogleAIFileManager.uploadFile()` and `GoogleAIFileManager.deleteFile()`. --- .changeset/tough-beds-serve.md | 5 + common/api-review/generative-ai-server.api.md | 5 +- common/api-review/generative-ai.api.md | 25 +- ...ative-ai.googleaifilemanager.deletefile.md | 7 +- ...nerative-ai.googleaifilemanager.getfile.md | 7 +- ...rative-ai.googleaifilemanager.listfiles.md | 7 +- .../generative-ai.googleaifilemanager.md | 8 +- ...ative-ai.googleaifilemanager.uploadfile.md | 7 +- docs/reference/files/generative-ai.md | 1 + .../generative-ai.singlerequestoptions.md | 21 ++ ...nerative-ai.singlerequestoptions.signal.md | 13 + ...generative-ai.chatsession._constructor_.md | 4 +- .../main/generative-ai.chatsession.md | 7 +- ...enerative-ai.chatsession.requestoptions.md | 11 - .../generative-ai.chatsession.sendmessage.md | 7 +- ...rative-ai.chatsession.sendmessagestream.md | 5 +- ...rative-ai.generativemodel._constructor_.md | 4 +- ...e-ai.generativemodel.batchembedcontents.md | 5 +- ...nerative-ai.generativemodel.counttokens.md | 5 +- ...erative-ai.generativemodel.embedcontent.md | 5 +- ...tive-ai.generativemodel.generatecontent.md | 5 +- ...i.generativemodel.generatecontentstream.md | 5 +- .../main/generative-ai.generativemodel.md | 12 +- docs/reference/main/generative-ai.md | 1 + .../generative-ai.singlerequestoptions.md | 21 ++ ...nerative-ai.singlerequestoptions.signal.md | 15 ++ ...ative-ai.googleaifilemanager.deletefile.md | 2 +- ...nerative-ai.googleaifilemanager.getfile.md | 7 +- ...rative-ai.googleaifilemanager.listfiles.md | 7 +- .../generative-ai.googleaifilemanager.md | 8 +- ...ative-ai.googleaifilemanager.uploadfile.md | 2 +- packages/main/src/methods/chat-session.ts | 27 +- packages/main/src/methods/count-tokens.ts | 6 +- packages/main/src/methods/generate-content.ts | 6 +- packages/main/src/models/generative-model.ts | 67 ++++- packages/main/src/requests/request.ts | 24 +- packages/main/src/server/file-manager.ts | 42 ++- packages/main/src/server/request.ts | 25 +- .../node/abort-signal.test.ts | 250 ++++++++++++++++++ packages/main/types/requests.ts | 16 ++ 40 files changed, 590 insertions(+), 117 deletions(-) create mode 100644 .changeset/tough-beds-serve.md create mode 100644 docs/reference/files/generative-ai.singlerequestoptions.md create mode 100644 docs/reference/files/generative-ai.singlerequestoptions.signal.md delete mode 100644 docs/reference/main/generative-ai.chatsession.requestoptions.md create mode 100644 docs/reference/main/generative-ai.singlerequestoptions.md create mode 100644 docs/reference/main/generative-ai.singlerequestoptions.signal.md create mode 100644 packages/main/test-integration/node/abort-signal.test.ts diff --git a/.changeset/tough-beds-serve.md b/.changeset/tough-beds-serve.md new file mode 100644 index 000000000..2b35de938 --- /dev/null +++ b/.changeset/tough-beds-serve.md @@ -0,0 +1,5 @@ +--- +"@google/generative-ai": minor +--- + +Adds `SingleRequestOptions` with `AbortSignal` support to most of the asynchronous methods of `GenerativeModel`, `GoogleAIFileManager` and `ChatSession`. diff --git a/common/api-review/generative-ai-server.api.md b/common/api-review/generative-ai-server.api.md index ae98439cf..456c52735 100644 --- a/common/api-review/generative-ai-server.api.md +++ b/common/api-review/generative-ai-server.api.md @@ -355,8 +355,9 @@ export class GoogleAIFileManager { // (undocumented) apiKey: string; deleteFile(fileId: string): Promise; - getFile(fileId: string): Promise; - listFiles(listParams?: ListParams): Promise; + getFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; + // Warning: (ae-forgotten-export) The symbol "SingleRequestOptions" needs to be exported by the entry point index.d.ts + listFiles(listParams?: ListParams, requestOptions?: SingleRequestOptions): Promise; uploadFile(filePath: string, fileMetadata: FileMetadata): Promise; } diff --git a/common/api-review/generative-ai.api.md b/common/api-review/generative-ai.api.md index 83f151fcd..2275a977e 100644 --- a/common/api-review/generative-ai.api.md +++ b/common/api-review/generative-ai.api.md @@ -62,16 +62,14 @@ export interface CachedContentBase { // @public export class ChatSession { - constructor(apiKey: string, model: string, params?: StartChatParams, requestOptions?: RequestOptions); + constructor(apiKey: string, model: string, params?: StartChatParams, _requestOptions?: RequestOptions); getHistory(): Promise; // (undocumented) model: string; // (undocumented) params?: StartChatParams; - // (undocumented) - requestOptions?: RequestOptions; - sendMessage(request: string | Array): Promise; - sendMessageStream(request: string | Array): Promise; + sendMessage(request: string | Array, requestOptions?: SingleRequestOptions): Promise; + sendMessageStream(request: string | Array, requestOptions?: SingleRequestOptions): Promise; } // @public @@ -462,16 +460,16 @@ export interface GenerativeContentBlob { // @public export class GenerativeModel { - constructor(apiKey: string, modelParams: ModelParams, requestOptions?: RequestOptions); + constructor(apiKey: string, modelParams: ModelParams, _requestOptions?: RequestOptions); // (undocumented) apiKey: string; - batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest): Promise; + batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest, requestOptions?: SingleRequestOptions): Promise; // (undocumented) cachedContent: CachedContent; - countTokens(request: CountTokensRequest | string | Array): Promise; - embedContent(request: EmbedContentRequest | string | Array): Promise; - generateContent(request: GenerateContentRequest | string | Array): Promise; - generateContentStream(request: GenerateContentRequest | string | Array): Promise; + countTokens(request: CountTokensRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; + embedContent(request: EmbedContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; + generateContent(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; + generateContentStream(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; // (undocumented) generationConfig: GenerationConfig; // (undocumented) @@ -667,6 +665,11 @@ export interface Schema { type?: FunctionDeclarationSchemaType; } +// @public +export interface SingleRequestOptions extends RequestOptions { + signal?: AbortSignal; +} + // @public export interface StartChatParams extends BaseParams { cachedContent?: string; diff --git a/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md b/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md index e5ecfdade..0fae42e61 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.deleteFile() method -Delete file with given ID +Delete file with given ID. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -deleteFile(fileId: string): Promise; +deleteFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ deleteFile(fileId: string): Promise; | Parameter | Type | Description | | --- | --- | --- | | fileId | string | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/files/generative-ai.googleaifilemanager.getfile.md b/docs/reference/files/generative-ai.googleaifilemanager.getfile.md index 613d6b6c3..a8a08472e 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.getfile.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.getfile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.getFile() method -Get metadata for file with given ID +Get metadata for file with given ID. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -getFile(fileId: string): Promise; +getFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ getFile(fileId: string): Promise; | Parameter | Type | Description | | --- | --- | --- | | fileId | string | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md b/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md index ef116fb68..1c229fbb3 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.listFiles() method -List all uploaded files +List all uploaded files. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -listFiles(listParams?: ListParams): Promise; +listFiles(listParams?: ListParams, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ listFiles(listParams?: ListParams): Promise; | Parameter | Type | Description | | --- | --- | --- | | listParams | [ListParams](./generative-ai.listparams.md) | _(Optional)_ | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/files/generative-ai.googleaifilemanager.md b/docs/reference/files/generative-ai.googleaifilemanager.md index 655c8d0c4..e0f3144c6 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.md @@ -28,8 +28,8 @@ export declare class GoogleAIFileManager | Method | Modifiers | Description | | --- | --- | --- | -| [deleteFile(fileId)](./generative-ai.googleaifilemanager.deletefile.md) | | Delete file with given ID | -| [getFile(fileId)](./generative-ai.googleaifilemanager.getfile.md) | | Get metadata for file with given ID | -| [listFiles(listParams)](./generative-ai.googleaifilemanager.listfiles.md) | | List all uploaded files | -| [uploadFile(filePath, fileMetadata)](./generative-ai.googleaifilemanager.uploadfile.md) | | Upload a file | +| [deleteFile(fileId, requestOptions)](./generative-ai.googleaifilemanager.deletefile.md) | |

Delete file with given ID.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [getFile(fileId, requestOptions)](./generative-ai.googleaifilemanager.getfile.md) | |

Get metadata for file with given ID.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [listFiles(listParams, requestOptions)](./generative-ai.googleaifilemanager.listfiles.md) | |

List all uploaded files.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [uploadFile(filePath, fileMetadata, requestOptions)](./generative-ai.googleaifilemanager.uploadfile.md) | |

Upload a file.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| diff --git a/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md b/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md index 90648e904..0b29cb685 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.uploadFile() method -Upload a file +Upload a file. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -uploadFile(filePath: string, fileMetadata: FileMetadata): Promise; +uploadFile(filePath: string, fileMetadata: FileMetadata, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -18,6 +20,7 @@ uploadFile(filePath: string, fileMetadata: FileMetadata): Promise + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) + +## SingleRequestOptions interface + +Params passed to atomic asynchronous operations. + +**Signature:** + +```typescript +export interface SingleRequestOptions extends RequestOptions +``` +**Extends:** [RequestOptions](./generative-ai.requestoptions.md) + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [signal?](./generative-ai.singlerequestoptions.signal.md) | | AbortSignal | _(Optional)_ An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided, and if the timeout occurs first. | + diff --git a/docs/reference/files/generative-ai.singlerequestoptions.signal.md b/docs/reference/files/generative-ai.singlerequestoptions.signal.md new file mode 100644 index 000000000..9f0672b70 --- /dev/null +++ b/docs/reference/files/generative-ai.singlerequestoptions.signal.md @@ -0,0 +1,13 @@ + + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) > [signal](./generative-ai.singlerequestoptions.signal.md) + +## SingleRequestOptions.signal property + +An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided, and if the timeout occurs first. + +**Signature:** + +```typescript +signal?: AbortSignal; +``` diff --git a/docs/reference/main/generative-ai.chatsession._constructor_.md b/docs/reference/main/generative-ai.chatsession._constructor_.md index 918fd3107..8bd24a6e8 100644 --- a/docs/reference/main/generative-ai.chatsession._constructor_.md +++ b/docs/reference/main/generative-ai.chatsession._constructor_.md @@ -9,7 +9,7 @@ Constructs a new instance of the `ChatSession` class **Signature:** ```typescript -constructor(apiKey: string, model: string, params?: StartChatParams, requestOptions?: RequestOptions); +constructor(apiKey: string, model: string, params?: StartChatParams, _requestOptions?: RequestOptions); ``` ## Parameters @@ -19,5 +19,5 @@ constructor(apiKey: string, model: string, params?: StartChatParams, requestOpti | apiKey | string | | | model | string | | | params | [StartChatParams](./generative-ai.startchatparams.md) | _(Optional)_ | -| requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | +| \_requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | diff --git a/docs/reference/main/generative-ai.chatsession.md b/docs/reference/main/generative-ai.chatsession.md index 948cd4cd4..360a0ef9c 100644 --- a/docs/reference/main/generative-ai.chatsession.md +++ b/docs/reference/main/generative-ai.chatsession.md @@ -16,7 +16,7 @@ export declare class ChatSession | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(apiKey, model, params, requestOptions)](./generative-ai.chatsession._constructor_.md) | | Constructs a new instance of the ChatSession class | +| [(constructor)(apiKey, model, params, \_requestOptions)](./generative-ai.chatsession._constructor_.md) | | Constructs a new instance of the ChatSession class | ## Properties @@ -24,13 +24,12 @@ export declare class ChatSession | --- | --- | --- | --- | | [model](./generative-ai.chatsession.model.md) | | string | | | [params?](./generative-ai.chatsession.params.md) | | [StartChatParams](./generative-ai.startchatparams.md) | _(Optional)_ | -| [requestOptions?](./generative-ai.chatsession.requestoptions.md) | | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | ## Methods | Method | Modifiers | Description | | --- | --- | --- | | [getHistory()](./generative-ai.chatsession.gethistory.md) | | Gets the chat history so far. Blocked prompts are not added to history. Blocked candidates are not added to history, nor are the prompts that generated them. | -| [sendMessage(request)](./generative-ai.chatsession.sendmessage.md) | | Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md) | -| [sendMessageStream(request)](./generative-ai.chatsession.sendmessagestream.md) | | Sends a chat message and receives the response as a [GenerateContentStreamResult](./generative-ai.generatecontentstreamresult.md) containing an iterable stream and a response promise. | +| [sendMessage(request, requestOptions)](./generative-ai.chatsession.sendmessage.md) | |

Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md).

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [sendMessageStream(request, requestOptions)](./generative-ai.chatsession.sendmessagestream.md) | |

Sends a chat message and receives the response as a [GenerateContentStreamResult](./generative-ai.generatecontentstreamresult.md) containing an iterable stream and a response promise.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| diff --git a/docs/reference/main/generative-ai.chatsession.requestoptions.md b/docs/reference/main/generative-ai.chatsession.requestoptions.md deleted file mode 100644 index bc7402c4d..000000000 --- a/docs/reference/main/generative-ai.chatsession.requestoptions.md +++ /dev/null @@ -1,11 +0,0 @@ - - -[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [ChatSession](./generative-ai.chatsession.md) > [requestOptions](./generative-ai.chatsession.requestoptions.md) - -## ChatSession.requestOptions property - -**Signature:** - -```typescript -requestOptions?: RequestOptions; -``` diff --git a/docs/reference/main/generative-ai.chatsession.sendmessage.md b/docs/reference/main/generative-ai.chatsession.sendmessage.md index 08a5ff6c1..dfa667135 100644 --- a/docs/reference/main/generative-ai.chatsession.sendmessage.md +++ b/docs/reference/main/generative-ai.chatsession.sendmessage.md @@ -4,12 +4,14 @@ ## ChatSession.sendMessage() method -Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md) +Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md). + +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. **Signature:** ```typescript -sendMessage(request: string | Array): Promise; +sendMessage(request: string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ sendMessage(request: string | Array): Promise> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.chatsession.sendmessagestream.md b/docs/reference/main/generative-ai.chatsession.sendmessagestream.md index e6f7cbe33..ac409f774 100644 --- a/docs/reference/main/generative-ai.chatsession.sendmessagestream.md +++ b/docs/reference/main/generative-ai.chatsession.sendmessagestream.md @@ -6,10 +6,12 @@ Sends a chat message and receives the response as a [GenerateContentStreamResult](./generative-ai.generatecontentstreamresult.md) containing an iterable stream and a response promise. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -sendMessageStream(request: string | Array): Promise; +sendMessageStream(request: string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ sendMessageStream(request: string | Array): Promise> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel._constructor_.md b/docs/reference/main/generative-ai.generativemodel._constructor_.md index 47030ab76..1a410e65a 100644 --- a/docs/reference/main/generative-ai.generativemodel._constructor_.md +++ b/docs/reference/main/generative-ai.generativemodel._constructor_.md @@ -9,7 +9,7 @@ Constructs a new instance of the `GenerativeModel` class **Signature:** ```typescript -constructor(apiKey: string, modelParams: ModelParams, requestOptions?: RequestOptions); +constructor(apiKey: string, modelParams: ModelParams, _requestOptions?: RequestOptions); ``` ## Parameters @@ -18,5 +18,5 @@ constructor(apiKey: string, modelParams: ModelParams, requestOptions?: RequestOp | --- | --- | --- | | apiKey | string | | | modelParams | [ModelParams](./generative-ai.modelparams.md) | | -| requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | +| \_requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | diff --git a/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md b/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md index cd3ccadc0..76d31f98c 100644 --- a/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md +++ b/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md @@ -6,10 +6,12 @@ Embeds an array of [EmbedContentRequest](./generative-ai.embedcontentrequest.md)s. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest): Promise; +batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest): Promise | Parameter | Type | Description | | --- | --- | --- | | batchEmbedContentRequest | [BatchEmbedContentsRequest](./generative-ai.batchembedcontentsrequest.md) | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.counttokens.md b/docs/reference/main/generative-ai.generativemodel.counttokens.md index 1e3b982d8..d81236a22 100644 --- a/docs/reference/main/generative-ai.generativemodel.counttokens.md +++ b/docs/reference/main/generative-ai.generativemodel.counttokens.md @@ -6,10 +6,12 @@ Counts the tokens in the provided request. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -countTokens(request: CountTokensRequest | string | Array): Promise; +countTokens(request: CountTokensRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ countTokens(request: CountTokensRequest | string | Array): Promis | Parameter | Type | Description | | --- | --- | --- | | request | [CountTokensRequest](./generative-ai.counttokensrequest.md) \| string \| Array<string \| [Part](./generative-ai.part.md)> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.embedcontent.md b/docs/reference/main/generative-ai.generativemodel.embedcontent.md index 445d130d7..8c2105d82 100644 --- a/docs/reference/main/generative-ai.generativemodel.embedcontent.md +++ b/docs/reference/main/generative-ai.generativemodel.embedcontent.md @@ -6,10 +6,12 @@ Embeds the provided content. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -embedContent(request: EmbedContentRequest | string | Array): Promise; +embedContent(request: EmbedContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ embedContent(request: EmbedContentRequest | string | Array): Prom | Parameter | Type | Description | | --- | --- | --- | | request | [EmbedContentRequest](./generative-ai.embedcontentrequest.md) \| string \| Array<string \| [Part](./generative-ai.part.md)> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.generatecontent.md b/docs/reference/main/generative-ai.generativemodel.generatecontent.md index 86d0ac70b..8cc74496c 100644 --- a/docs/reference/main/generative-ai.generativemodel.generatecontent.md +++ b/docs/reference/main/generative-ai.generativemodel.generatecontent.md @@ -6,10 +6,12 @@ Makes a single non-streaming call to the model and returns an object containing a single [GenerateContentResponse](./generative-ai.generatecontentresponse.md). +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -generateContent(request: GenerateContentRequest | string | Array): Promise; +generateContent(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ generateContent(request: GenerateContentRequest | string | Array) | Parameter | Type | Description | | --- | --- | --- | | request | [GenerateContentRequest](./generative-ai.generatecontentrequest.md) \| string \| Array<string \| [Part](./generative-ai.part.md)> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md b/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md index 5288ad821..6cfd125c9 100644 --- a/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md +++ b/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md @@ -6,10 +6,12 @@ Makes a single streaming call to the model and returns an object containing an iterable stream that iterates over all chunks in the streaming response as well as a promise that returns the final aggregated response. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -generateContentStream(request: GenerateContentRequest | string | Array): Promise; +generateContentStream(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ generateContentStream(request: GenerateContentRequest | string | Array> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.md b/docs/reference/main/generative-ai.generativemodel.md index f78e6b8a2..c822694ce 100644 --- a/docs/reference/main/generative-ai.generativemodel.md +++ b/docs/reference/main/generative-ai.generativemodel.md @@ -16,7 +16,7 @@ export declare class GenerativeModel | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(apiKey, modelParams, requestOptions)](./generative-ai.generativemodel._constructor_.md) | | Constructs a new instance of the GenerativeModel class | +| [(constructor)(apiKey, modelParams, \_requestOptions)](./generative-ai.generativemodel._constructor_.md) | | Constructs a new instance of the GenerativeModel class | ## Properties @@ -36,10 +36,10 @@ export declare class GenerativeModel | Method | Modifiers | Description | | --- | --- | --- | -| [batchEmbedContents(batchEmbedContentRequest)](./generative-ai.generativemodel.batchembedcontents.md) | | Embeds an array of [EmbedContentRequest](./generative-ai.embedcontentrequest.md)s. | -| [countTokens(request)](./generative-ai.generativemodel.counttokens.md) | | Counts the tokens in the provided request. | -| [embedContent(request)](./generative-ai.generativemodel.embedcontent.md) | | Embeds the provided content. | -| [generateContent(request)](./generative-ai.generativemodel.generatecontent.md) | | Makes a single non-streaming call to the model and returns an object containing a single [GenerateContentResponse](./generative-ai.generatecontentresponse.md). | -| [generateContentStream(request)](./generative-ai.generativemodel.generatecontentstream.md) | | Makes a single streaming call to the model and returns an object containing an iterable stream that iterates over all chunks in the streaming response as well as a promise that returns the final aggregated response. | +| [batchEmbedContents(batchEmbedContentRequest, requestOptions)](./generative-ai.generativemodel.batchembedcontents.md) | |

Embeds an array of [EmbedContentRequest](./generative-ai.embedcontentrequest.md)s.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [countTokens(request, requestOptions)](./generative-ai.generativemodel.counttokens.md) | |

Counts the tokens in the provided request.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [embedContent(request, requestOptions)](./generative-ai.generativemodel.embedcontent.md) | |

Embeds the provided content.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [generateContent(request, requestOptions)](./generative-ai.generativemodel.generatecontent.md) | |

Makes a single non-streaming call to the model and returns an object containing a single [GenerateContentResponse](./generative-ai.generatecontentresponse.md).

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [generateContentStream(request, requestOptions)](./generative-ai.generativemodel.generatecontentstream.md) | |

Makes a single streaming call to the model and returns an object containing an iterable stream that iterates over all chunks in the streaming response as well as a promise that returns the final aggregated response.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| | [startChat(startChatParams)](./generative-ai.generativemodel.startchat.md) | | Gets a new [ChatSession](./generative-ai.chatsession.md) instance which can be used for multi-turn chats. | diff --git a/docs/reference/main/generative-ai.md b/docs/reference/main/generative-ai.md index b4a67bb90..8c02731c2 100644 --- a/docs/reference/main/generative-ai.md +++ b/docs/reference/main/generative-ai.md @@ -81,6 +81,7 @@ | [SafetyRating](./generative-ai.safetyrating.md) | A safety rating associated with a [GenerateContentCandidate](./generative-ai.generatecontentcandidate.md) | | [SafetySetting](./generative-ai.safetysetting.md) | Safety setting that can be sent as part of request parameters. | | [Schema](./generative-ai.schema.md) | Schema is used to define the format of input/output data. Represents a select subset of an OpenAPI 3.0 schema object. More fields may be added in the future as needed. | +| [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | Params passed to atomic asynchronous operations. | | [StartChatParams](./generative-ai.startchatparams.md) | Params for [GenerativeModel.startChat()](./generative-ai.generativemodel.startchat.md). | | [TextPart](./generative-ai.textpart.md) | Content part interface if the part represents a text string. | | [ToolConfig](./generative-ai.toolconfig.md) | Tool config. This config is shared for all tools provided in the request. | diff --git a/docs/reference/main/generative-ai.singlerequestoptions.md b/docs/reference/main/generative-ai.singlerequestoptions.md new file mode 100644 index 000000000..4d9a23d31 --- /dev/null +++ b/docs/reference/main/generative-ai.singlerequestoptions.md @@ -0,0 +1,21 @@ + + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) + +## SingleRequestOptions interface + +Params passed to atomic asynchronous operations. + +**Signature:** + +```typescript +export interface SingleRequestOptions extends RequestOptions +``` +**Extends:** [RequestOptions](./generative-ai.requestoptions.md) + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [signal?](./generative-ai.singlerequestoptions.signal.md) | | AbortSignal |

_(Optional)_ An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided.

NOTE: AbortSignal is a client-only operation. Using it to cancel an operation will not cancel the request in the service. You will still be charged usage for any applicable operations.

| + diff --git a/docs/reference/main/generative-ai.singlerequestoptions.signal.md b/docs/reference/main/generative-ai.singlerequestoptions.signal.md new file mode 100644 index 000000000..ac064709e --- /dev/null +++ b/docs/reference/main/generative-ai.singlerequestoptions.signal.md @@ -0,0 +1,15 @@ + + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) > [signal](./generative-ai.singlerequestoptions.signal.md) + +## SingleRequestOptions.signal property + +An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided. + +NOTE: AbortSignal is a client-only operation. Using it to cancel an operation will not cancel the request in the service. You will still be charged usage for any applicable operations. + +**Signature:** + +```typescript +signal?: AbortSignal; +``` diff --git a/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md b/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md index e5ecfdade..acaefb74b 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md @@ -4,7 +4,7 @@ ## GoogleAIFileManager.deleteFile() method -Delete file with given ID +Delete file with given ID. **Signature:** diff --git a/docs/reference/server/generative-ai.googleaifilemanager.getfile.md b/docs/reference/server/generative-ai.googleaifilemanager.getfile.md index 613d6b6c3..79f467533 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.getfile.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.getfile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.getFile() method -Get metadata for file with given ID +Get metadata for file with given ID. + +Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -getFile(fileId: string): Promise; +getFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ getFile(fileId: string): Promise; | Parameter | Type | Description | | --- | --- | --- | | fileId | string | | +| requestOptions | SingleRequestOptions | _(Optional)_ | **Returns:** diff --git a/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md b/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md index ef116fb68..f8449bd07 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.listFiles() method -List all uploaded files +List all uploaded files. + +Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -listFiles(listParams?: ListParams): Promise; +listFiles(listParams?: ListParams, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ listFiles(listParams?: ListParams): Promise; | Parameter | Type | Description | | --- | --- | --- | | listParams | [ListParams](./generative-ai.listparams.md) | _(Optional)_ | +| requestOptions | SingleRequestOptions | _(Optional)_ | **Returns:** diff --git a/docs/reference/server/generative-ai.googleaifilemanager.md b/docs/reference/server/generative-ai.googleaifilemanager.md index 655c8d0c4..04d222325 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.md @@ -28,8 +28,8 @@ export declare class GoogleAIFileManager | Method | Modifiers | Description | | --- | --- | --- | -| [deleteFile(fileId)](./generative-ai.googleaifilemanager.deletefile.md) | | Delete file with given ID | -| [getFile(fileId)](./generative-ai.googleaifilemanager.getfile.md) | | Get metadata for file with given ID | -| [listFiles(listParams)](./generative-ai.googleaifilemanager.listfiles.md) | | List all uploaded files | -| [uploadFile(filePath, fileMetadata)](./generative-ai.googleaifilemanager.uploadfile.md) | | Upload a file | +| [deleteFile(fileId)](./generative-ai.googleaifilemanager.deletefile.md) | | Delete file with given ID. | +| [getFile(fileId, requestOptions)](./generative-ai.googleaifilemanager.getfile.md) | |

Get metadata for file with given ID.

Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [listFiles(listParams, requestOptions)](./generative-ai.googleaifilemanager.listfiles.md) | |

List all uploaded files.

Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [uploadFile(filePath, fileMetadata)](./generative-ai.googleaifilemanager.uploadfile.md) | | Upload a file. | diff --git a/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md b/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md index 90648e904..71e4f76bd 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md @@ -4,7 +4,7 @@ ## GoogleAIFileManager.uploadFile() method -Upload a file +Upload a file. **Signature:** diff --git a/packages/main/src/methods/chat-session.ts b/packages/main/src/methods/chat-session.ts index fd469bfe3..2d1c7e6b3 100644 --- a/packages/main/src/methods/chat-session.ts +++ b/packages/main/src/methods/chat-session.ts @@ -22,6 +22,7 @@ import { GenerateContentStreamResult, Part, RequestOptions, + SingleRequestOptions, StartChatParams, } from "../../types"; import { formatNewContent } from "../requests/request-helpers"; @@ -49,7 +50,7 @@ export class ChatSession { apiKey: string, public model: string, public params?: StartChatParams, - public requestOptions?: RequestOptions, + private _requestOptions: RequestOptions = {}, ) { this._apiKey = apiKey; if (params?.history) { @@ -70,10 +71,15 @@ export class ChatSession { /** * Sends a chat message and receives a non-streaming - * {@link GenerateContentResult} + * {@link GenerateContentResult}. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async sendMessage( request: string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { await this._sendPromise; const newContent = formatNewContent(request); @@ -86,6 +92,10 @@ export class ChatSession { cachedContent: this.params?.cachedContent, contents: [...this._history, newContent], }; + const chatSessionRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; let finalResult; // Add onto the chain. this._sendPromise = this._sendPromise @@ -94,7 +104,7 @@ export class ChatSession { this._apiKey, this.model, generateContentRequest, - this.requestOptions, + chatSessionRequestOptions, ), ) .then((result) => { @@ -128,9 +138,14 @@ export class ChatSession { * Sends a chat message and receives the response as a * {@link GenerateContentStreamResult} containing an iterable stream * and a response promise. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async sendMessageStream( request: string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { await this._sendPromise; const newContent = formatNewContent(request); @@ -143,11 +158,15 @@ export class ChatSession { cachedContent: this.params?.cachedContent, contents: [...this._history, newContent], }; + const chatSessionRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; const streamPromise = generateContentStream( this._apiKey, this.model, generateContentRequest, - this.requestOptions, + chatSessionRequestOptions, ); // Add onto the chain. diff --git a/packages/main/src/methods/count-tokens.ts b/packages/main/src/methods/count-tokens.ts index a8a38e930..49d5ba1cc 100644 --- a/packages/main/src/methods/count-tokens.ts +++ b/packages/main/src/methods/count-tokens.ts @@ -18,7 +18,7 @@ import { CountTokensRequest, CountTokensResponse, - RequestOptions, + SingleRequestOptions, } from "../../types"; import { Task, makeModelRequest } from "../requests/request"; @@ -26,7 +26,7 @@ export async function countTokens( apiKey: string, model: string, params: CountTokensRequest, - requestOptions?: RequestOptions, + singleRequestOptions: SingleRequestOptions, ): Promise { const response = await makeModelRequest( model, @@ -34,7 +34,7 @@ export async function countTokens( apiKey, false, JSON.stringify(params), - requestOptions, + singleRequestOptions, ); return response.json(); } diff --git a/packages/main/src/methods/generate-content.ts b/packages/main/src/methods/generate-content.ts index 327cd96e9..8c1e13934 100644 --- a/packages/main/src/methods/generate-content.ts +++ b/packages/main/src/methods/generate-content.ts @@ -20,7 +20,7 @@ import { GenerateContentResponse, GenerateContentResult, GenerateContentStreamResult, - RequestOptions, + SingleRequestOptions, } from "../../types"; import { Task, makeModelRequest } from "../requests/request"; import { addHelpers } from "../requests/response-helpers"; @@ -30,7 +30,7 @@ export async function generateContentStream( apiKey: string, model: string, params: GenerateContentRequest, - requestOptions?: RequestOptions, + requestOptions: SingleRequestOptions, ): Promise { const response = await makeModelRequest( model, @@ -47,7 +47,7 @@ export async function generateContent( apiKey: string, model: string, params: GenerateContentRequest, - requestOptions?: RequestOptions, + requestOptions?: SingleRequestOptions, ): Promise { const response = await makeModelRequest( model, diff --git a/packages/main/src/models/generative-model.ts b/packages/main/src/models/generative-model.ts index bbc17601b..375d9ddff 100644 --- a/packages/main/src/models/generative-model.ts +++ b/packages/main/src/models/generative-model.ts @@ -36,6 +36,7 @@ import { Part, RequestOptions, SafetySetting, + SingleRequestOptions, StartChatParams, Tool, ToolConfig, @@ -67,7 +68,7 @@ export class GenerativeModel { constructor( public apiKey: string, modelParams: ModelParams, - requestOptions?: RequestOptions, + private _requestOptions: RequestOptions = {}, ) { if (modelParams.model.includes("/")) { // Models may be named "models/model-name" or "tunedModels/model-name" @@ -84,17 +85,25 @@ export class GenerativeModel { modelParams.systemInstruction, ); this.cachedContent = modelParams.cachedContent; - this.requestOptions = requestOptions || {}; } /** * Makes a single non-streaming call to the model * and returns an object containing a single {@link GenerateContentResponse}. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async generateContent( request: GenerateContentRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatGenerateContentInput(request); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return generateContent( this.apiKey, this.model, @@ -107,20 +116,29 @@ export class GenerativeModel { cachedContent: this.cachedContent?.name, ...formattedParams, }, - this.requestOptions, + generativeModelRequestOptions, ); } /** - * Makes a single streaming call to the model - * and returns an object containing an iterable stream that iterates - * over all chunks in the streaming response as well as - * a promise that returns the final aggregated response. + * Makes a single streaming call to the model and returns an object + * containing an iterable stream that iterates over all chunks in the + * streaming response as well as a promise that returns the final + * aggregated response. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async generateContentStream( request: GenerateContentRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatGenerateContentInput(request); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return generateContentStream( this.apiKey, this.model, @@ -133,7 +151,7 @@ export class GenerativeModel { cachedContent: this.cachedContent?.name, ...formattedParams, }, - this.requestOptions, + generativeModelRequestOptions, ); } @@ -160,9 +178,14 @@ export class GenerativeModel { /** * Counts the tokens in the provided request. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async countTokens( request: CountTokensRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatCountTokensInput(request, { model: this.model, @@ -173,40 +196,62 @@ export class GenerativeModel { systemInstruction: this.systemInstruction, cachedContent: this.cachedContent, }); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return countTokens( this.apiKey, this.model, formattedParams, - this.requestOptions, + generativeModelRequestOptions, ); } /** * Embeds the provided content. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async embedContent( request: EmbedContentRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatEmbedContentInput(request); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return embedContent( this.apiKey, this.model, formattedParams, - this.requestOptions, + generativeModelRequestOptions, ); } /** * Embeds an array of {@link EmbedContentRequest}s. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async batchEmbedContents( batchEmbedContentRequest: BatchEmbedContentsRequest, + requestOptions: SingleRequestOptions = {}, ): Promise { + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return batchEmbedContents( this.apiKey, this.model, batchEmbedContentRequest, - this.requestOptions, + generativeModelRequestOptions, ); } } diff --git a/packages/main/src/requests/request.ts b/packages/main/src/requests/request.ts index 9249aba05..0828b65a4 100644 --- a/packages/main/src/requests/request.ts +++ b/packages/main/src/requests/request.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { RequestOptions } from "../../types"; +import { RequestOptions, SingleRequestOptions } from "../../types"; import { GoogleGenerativeAIError, GoogleGenerativeAIFetchError, @@ -116,7 +116,7 @@ export async function constructModelRequest( apiKey: string, stream: boolean, body: string, - requestOptions?: RequestOptions, + requestOptions: SingleRequestOptions, ): Promise<{ url: string; fetchOptions: RequestInit }> { const url = new RequestUrl(model, task, apiKey, stream, requestOptions); return { @@ -136,7 +136,7 @@ export async function makeModelRequest( apiKey: string, stream: boolean, body: string, - requestOptions?: RequestOptions, + requestOptions: SingleRequestOptions = {}, // Allows this to be stubbed for tests fetchFn = fetch, ): Promise { @@ -217,13 +217,19 @@ async function handleResponseNotOk( * @param requestOptions - The user-defined request options. * @returns The generated request options. */ -function buildFetchOptions(requestOptions?: RequestOptions): RequestInit { +function buildFetchOptions(requestOptions?: SingleRequestOptions): RequestInit { const fetchOptions = {} as RequestInit; - if (requestOptions?.timeout >= 0) { - const abortController = new AbortController(); - const signal = abortController.signal; - setTimeout(() => abortController.abort(), requestOptions.timeout); - fetchOptions.signal = signal; + if (requestOptions?.signal !== undefined || requestOptions?.timeout >= 0) { + const controller = new AbortController(); + if (requestOptions?.timeout >= 0) { + setTimeout(() => controller.abort(), requestOptions.timeout); + } + if (requestOptions?.signal) { + requestOptions.signal.addEventListener("abort", () => { + controller.abort(); + }); + } + fetchOptions.signal = controller.signal; } return fetchOptions; } diff --git a/packages/main/src/server/file-manager.ts b/packages/main/src/server/file-manager.ts index e839b8f70..c34abb2b0 100644 --- a/packages/main/src/server/file-manager.ts +++ b/packages/main/src/server/file-manager.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { RequestOptions } from "../../types"; +import { RequestOptions, SingleRequestOptions } from "../../types"; import { readFileSync } from "fs"; import { FilesRequestUrl, getHeaders, makeServerRequest } from "./request"; import { @@ -44,11 +44,11 @@ export interface UploadMetadata { export class GoogleAIFileManager { constructor( public apiKey: string, - private _requestOptions?: RequestOptions, + private _requestOptions: RequestOptions = {}, ) {} /** - * Upload a file + * Upload a file. */ async uploadFile( filePath: string, @@ -93,13 +93,24 @@ export class GoogleAIFileManager { } /** - * List all uploaded files + * List all uploaded files. + * + * Any fields set in the optional {@link SingleRequestOptions} parameter will take + * precedence over the {@link RequestOptions} values provided at the time of the + * {@link GoogleAIFileManager} initialization. */ - async listFiles(listParams?: ListParams): Promise { + async listFiles( + listParams?: ListParams, + requestOptions: SingleRequestOptions = {}, + ): Promise { + const filesRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; const url = new FilesRequestUrl( RpcTask.LIST, this.apiKey, - this._requestOptions, + filesRequestOptions, ); if (listParams?.pageSize) { url.appendParam("pageSize", listParams.pageSize.toString()); @@ -113,13 +124,24 @@ export class GoogleAIFileManager { } /** - * Get metadata for file with given ID + * Get metadata for file with given ID. + * + * Any fields set in the optional {@link SingleRequestOptions} parameter will take + * precedence over the {@link RequestOptions} values provided at the time of the + * {@link GoogleAIFileManager} initialization. */ - async getFile(fileId: string): Promise { + async getFile( + fileId: string, + requestOptions: SingleRequestOptions = {}, + ): Promise { + const filesRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; const url = new FilesRequestUrl( RpcTask.GET, this.apiKey, - this._requestOptions, + filesRequestOptions, ); url.appendPath(parseFileId(fileId)); const uploadHeaders = getHeaders(url); @@ -128,7 +150,7 @@ export class GoogleAIFileManager { } /** - * Delete file with given ID + * Delete file with given ID. */ async deleteFile(fileId: string): Promise { const url = new FilesRequestUrl( diff --git a/packages/main/src/server/request.ts b/packages/main/src/server/request.ts index 1c8a2339d..464c1d2e2 100644 --- a/packages/main/src/server/request.ts +++ b/packages/main/src/server/request.ts @@ -21,7 +21,7 @@ import { getClientHeaders, makeRequest, } from "../requests/request"; -import { RequestOptions } from "../../types"; +import { RequestOptions, SingleRequestOptions } from "../../types"; import { RpcTask } from "./constants"; const taskToMethod = { @@ -38,7 +38,7 @@ export class ServerRequestUrl { constructor( public task: RpcTask, public apiKey: string, - public requestOptions?: RequestOptions, + public requestOptions?: SingleRequestOptions, ) {} appendPath(path: string): void { @@ -118,13 +118,20 @@ export async function makeServerRequest( } /** - * Get AbortSignal if timeout is specified + * Create an AbortSignal based on the timeout and signal in the + * RequestOptions. */ -function getSignal(requestOptions?: RequestOptions): AbortSignal | null { - if (requestOptions?.timeout >= 0) { - const abortController = new AbortController(); - const signal = abortController.signal; - setTimeout(() => abortController.abort(), requestOptions.timeout); - return signal; +function getSignal(requestOptions?: SingleRequestOptions): AbortSignal | null { + if (requestOptions?.signal !== undefined || requestOptions?.timeout >= 0) { + const controller = new AbortController(); + if (requestOptions?.timeout >= 0) { + setTimeout(() => controller.abort(), requestOptions.timeout); + } + if (requestOptions.signal) { + requestOptions.signal.addEventListener("abort", () => { + controller.abort(); + }); + } + return controller.signal; } } diff --git a/packages/main/test-integration/node/abort-signal.test.ts b/packages/main/test-integration/node/abort-signal.test.ts new file mode 100644 index 000000000..d540b2df3 --- /dev/null +++ b/packages/main/test-integration/node/abort-signal.test.ts @@ -0,0 +1,250 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { expect, use } from "chai"; +import * as chaiAsPromised from "chai-as-promised"; +import { + GoogleGenerativeAI, + RequestOptions, + SingleRequestOptions, +} from "../.."; +import { GoogleAIFileManager } from "../../dist/server"; + +use(chaiAsPromised); + +/** + * Integration tests against live backend. + */ +describe("signal", function () { + this.timeout(60e3); + this.slow(10e3); + /* GoogleAIFileManager */ + it("GoogleAIFileManager getFile() SingleRequestOption.timeout", async () => { + // Ensure SingleRequestOptions.timeout takes precendence over the value of + // RequestOptions.timeout configured at construction. Also, a control test + // to ensure that timeout still works without an AbortSignal present. + const requestOptions: RequestOptions = { timeout: 9000 }; + const fileManager = new GoogleAIFileManager( + process.env.GEMINI_API_KEY, + requestOptions, + ); + // Ensure the file isn't hosted on the service. + try { + await fileManager.deleteFile("files/signal"); + } catch (error) {} + const singleRequestOptions: SingleRequestOptions = { timeout: 1 }; + // Use getFile, which should fail with a fetch error since the file + // doesn't exist. This should let us discern if the error was + // a timeout abort, or the fetch failure in our expect() below. + const promise = fileManager.getFile("signal.jpg", singleRequestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager getFile() aborted", async () => { + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = fileManager.getFile("signal.jpg", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager getFile() timeout before signal aborts", async () => { + // Ensure the manually configured timeout works in conjunction with the + // AbortSignal timeout. + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + // Ensure the file isn't hosted on the service. + try { + await fileManager.deleteFile("files/signal"); + } catch (error) {} + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = fileManager.getFile("signal.jpg", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager listFiles() aborted", async () => { + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = fileManager.listFiles(/* listParams= */ {}, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager listFiles() timeout before signal aborts", async () => { + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = fileManager.listFiles(/* listParams= */ {}, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + + /* GenerativeModel */ + it("GenerativeModel generateContent() SingleRequestOption.timeout", async () => { + // Ensure SingleRequestOptions.timeout takes precendence over the value of + // RequestOptions.timeout configured at construction. Also, a control test + // to ensure that timeout still works without an AbortSignal present. + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const requestOptions: RequestOptions = { + timeout: 9000, // This is much longer than a generateContent request. + }; + const model = genAI.getGenerativeModel( + { + model: "gemini-1.5-flash-latest", + }, + requestOptions, + ); + const singleRequestOptions: SingleRequestOptions = { timeout: 1 }; + const promise = model.generateContent( + "This is not an image", + singleRequestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel generateContent() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = model.generateContent( + "This is not an image", + requestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel generateContent() timeout before signal aborts", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = model.generateContent( + "This is not an image", + requestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel countTokens() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = model.countTokens("This is not an image", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel embedContent() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = model.embedContent("This is not an image", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel batchEmbedContent() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const content1 = { + content: { role: "user", parts: [{ text: "embed me" }] }, + }; + const content2 = { + content: { role: "user", parts: [{ text: "embed me" }] }, + }; + const promise = model.batchEmbedContents( + { + requests: [content1, content2], + }, + requestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + + /* ChatSessionManager */ + it("ChatSessionManager sendMessage() SingleRequestOption.timeout", async () => { + // Ensure SingleRequestOptions.timeout takes precendence over the value of + // RequestOptions.timeout configured at construction. Also, a control test + // to ensure that timeout still works without an AbortSignal present. + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const requestOptions: RequestOptions = { + timeout: 9000, // This is much longer than a generateContent request. + }; + const model = genAI.getGenerativeModel( + { + model: "gemini-1.5-flash-latest", + }, + requestOptions, + ); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const singleRequestOptions: SingleRequestOptions = { timeout: 1 }; + const promise = chat.sendMessage(question1, singleRequestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessage() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = chat.sendMessage(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessage() timeout before signal aborts", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = chat.sendMessage(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessageStream() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = chat.sendMessageStream(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessageStream() timeout before signal aborts", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = chat.sendMessageStream(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); +}); diff --git a/packages/main/types/requests.ts b/packages/main/types/requests.ts index 9fefde233..4eec1e9cb 100644 --- a/packages/main/types/requests.ts +++ b/packages/main/types/requests.ts @@ -189,6 +189,22 @@ export interface RequestOptions { customHeaders?: Headers | Record; } +/** + * Params passed to atomic asynchronous operations. + * @public + */ +export interface SingleRequestOptions extends RequestOptions { + /** + * An object that may be used to abort asynchronous requests. The request may + * also be aborted due to the expiration of the timeout value, if provided. + * + * NOTE: AbortSignal is a client-only operation. Using it to cancel an + * operation will not cancel the request in the service. You will still + * be charged usage for any applicable operations. + */ + signal?: AbortSignal; +} + /** * Defines a tool that model can call to access external knowledge. * @public From fd3965fb1aaf4c5318e2103497d9e6ce56b827c1 Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Wed, 24 Jul 2024 10:40:23 -0700 Subject: [PATCH 4/5] Update count_tokens to show before/after on SI and tools (#215) --- samples/count_tokens.js | 62 +++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 24 deletions(-) diff --git a/samples/count_tokens.js b/samples/count_tokens.js index 99a98ae7b..979cf5090 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -53,7 +53,8 @@ async function tokensTextOnly() { // (`promptTokenCount` and `candidatesTokenCount`, respectively), // as well as the combined token count (`totalTokenCount`). console.log(generateResult.response.usageMetadata); - // { promptTokenCount: 11, candidatesTokenCount: 131, totalTokenCount: 142 } + // candidatesTokenCount and totalTokenCount depend on response, may vary + // { promptTokenCount: 11, candidatesTokenCount: 124, totalTokenCount: 135 } // [END tokens_text_only] } @@ -93,7 +94,8 @@ async function tokensChat() { // (`promptTokenCount` and `candidatesTokenCount`, respectively), // as well as the combined token count (`totalTokenCount`). console.log(chatResult.response.usageMetadata); - // { promptTokenCount: 25, candidatesTokenCount: 22, totalTokenCount: 47 } + // candidatesTokenCount and totalTokenCount depend on response, may vary + // { promptTokenCount: 25, candidatesTokenCount: 25, totalTokenCount: 50 } // [END tokens_chat] } @@ -136,6 +138,7 @@ async function tokensMultimodalImageInline() { // (`promptTokenCount` and `candidatesTokenCount`, respectively), // as well as the combined token count (`totalTokenCount`). console.log(generateResult.response.usageMetadata); + // candidatesTokenCount and totalTokenCount depend on response, may vary // { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 } // [END tokens_multimodal_image_inline] } @@ -181,6 +184,7 @@ async function tokensMultimodalImageFileApi() { // (`promptTokenCount` and `candidatesTokenCount`, respectively), // as well as the combined token count (`totalTokenCount`). console.log(generateResult.response.usageMetadata); + // candidatesTokenCount and totalTokenCount depend on response, may vary // { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 } // [END tokens_multimodal_image_file_api] await fileManager.deleteFile(uploadResult.file.name); @@ -244,6 +248,7 @@ async function tokensMultimodalVideoAudioFileApi() { // (`promptTokenCount` and `candidatesTokenCount`, respectively), // as well as the combined token count (`totalTokenCount`). console.log(generateResult.response.usageMetadata); + // candidatesTokenCount and totalTokenCount depend on response, may vary // { promptTokenCount: 302, candidatesTokenCount: 46, totalTokenCount: 348 } // [END tokens_multimodal_video_audio_file_api] await fileManager.deleteFile(uploadVideoResult.file.name); @@ -306,7 +311,7 @@ async function tokensCachedContent() { console.log(generateResult.response.usageMetadata); // { // promptTokenCount: 323396, - // candidatesTokenCount: 113, + // candidatesTokenCount: 113, (depends on response, may vary) // totalTokenCount: 323509, // cachedContentTokenCount: 323386 // } @@ -320,21 +325,26 @@ async function tokensSystemInstruction() { // Make sure to include these imports: // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); - const model = genAI.getGenerativeModel({ + const prompt = "The quick brown fox jumps over the lazy dog."; + const modelNoInstructions = genAI.getGenerativeModel({ + model: "models/gemini-1.5-flash", + }); + + const resultNoInstructions = await modelNoInstructions.countTokens(prompt); + + console.log(resultNoInstructions); + // { totalTokens: 11 } + + const modelWithInstructions = genAI.getGenerativeModel({ model: "models/gemini-1.5-flash", systemInstruction: "You are a cat. Your name is Neko.", }); - const result = await model.countTokens( - "The quick brown fox jumps over the lazy dog.", - ); + const resultWithInstructions = + await modelWithInstructions.countTokens(prompt); - console.log(result); - // { - // totalTokens: 23, - // systemInstructionsTokens: { partTokens: [ 11 ], roleTokens: 1 }, - // contentTokens: [ { partTokens: [Array], roleTokens: 1 } ] - // } + console.log(resultWithInstructions); + // { totalTokens: 23 } // [END tokens_system_instruction] } @@ -343,6 +353,17 @@ async function tokensTools() { // Make sure to include these imports: // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const prompt = + "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"; + + const modelNoTools = genAI.getGenerativeModel({ + model: "models/gemini-1.5-flash", + }); + + const resultNoTools = await modelNoTools.countTokens(prompt); + + console.log(resultNoTools); + // { totalTokens: 23 } const functionDeclarations = [ { name: "add" }, @@ -351,22 +372,15 @@ async function tokensTools() { { name: "divide" }, ]; - const model = genAI.getGenerativeModel({ + const modelWithTools = genAI.getGenerativeModel({ model: "models/gemini-1.5-flash", tools: [{ functionDeclarations }], }); - const result = await model.countTokens( - "I have 57 cats, each owns 44 mittens, how many mittens is that in total?", - ); + const resultWithTools = await modelWithTools.countTokens(prompt); - console.log(result); - // { - // totalTokens: 99, - // systemInstructionsTokens: {}, - // contentTokens: [ { partTokens: [Array], roleTokens: 1 } ], - // toolTokens: [ { functionDeclarationTokens: [Array] } ] - // } + console.log(resultWithTools); + // { totalTokens: 99 } // [END tokens_tools] } From 71905ad51000bff3c5f20104cdfcb9967d48972f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 24 Jul 2024 13:51:11 -0700 Subject: [PATCH 5/5] Version Packages (#213) Co-authored-by: github-actions[bot] --- .changeset/tough-beds-serve.md | 5 ----- packages/main/CHANGELOG.md | 6 ++++++ packages/main/package.json | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) delete mode 100644 .changeset/tough-beds-serve.md diff --git a/.changeset/tough-beds-serve.md b/.changeset/tough-beds-serve.md deleted file mode 100644 index 2b35de938..000000000 --- a/.changeset/tough-beds-serve.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"@google/generative-ai": minor ---- - -Adds `SingleRequestOptions` with `AbortSignal` support to most of the asynchronous methods of `GenerativeModel`, `GoogleAIFileManager` and `ChatSession`. diff --git a/packages/main/CHANGELOG.md b/packages/main/CHANGELOG.md index d03f429a2..71eb80a12 100644 --- a/packages/main/CHANGELOG.md +++ b/packages/main/CHANGELOG.md @@ -1,5 +1,11 @@ # @google/generative-ai +## 0.16.0 + +### Minor Changes + +- d2d42ca: Adds `SingleRequestOptions` with `AbortSignal` support to most of the asynchronous methods of `GenerativeModel`, `GoogleAIFileManager` and `ChatSession`. + ## 0.15.0 ### Minor Changes diff --git a/packages/main/package.json b/packages/main/package.json index e815b4097..3a202b190 100644 --- a/packages/main/package.json +++ b/packages/main/package.json @@ -1,6 +1,6 @@ { "name": "@google/generative-ai", - "version": "0.15.0", + "version": "0.16.0", "description": "Google AI JavaScript SDK", "main": "dist/index.js", "module": "dist/index.mjs",