From ebe67e90b28184948690d8c4438324f21931c8d2 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 2 Apr 2025 17:30:30 +0200 Subject: [PATCH 0001/1307] chore (ui/solid): remove solid js support (#5511) --- .prettierignore | 1 - .../80-send-custom-body-from-use-chat.mdx | 2 +- .../01-navigating-the-library.mdx | 27 +- content/docs/02-guides/20-sonnet-3-7.mdx | 2 +- content/docs/02-guides/21-llama-3_1.mdx | 2 +- content/docs/02-guides/22-gpt-4-5.mdx | 2 +- content/docs/02-guides/23-o1.mdx | 2 +- content/docs/02-guides/24-o3.mdx | 2 +- content/docs/02-guides/25-r1.mdx | 2 +- content/docs/04-ai-sdk-ui/01-overview.mdx | 14 +- .../07-reference/02-ai-sdk-ui/01-use-chat.mdx | 17 +- .../02-ai-sdk-ui/02-use-completion.mdx | 12 +- .../02-ai-sdk-ui/03-use-object.mdx | 3 +- .../docs/07-reference/02-ai-sdk-ui/index.mdx | 14 +- examples/solidstart-openai/.env.example | 1 - examples/solidstart-openai/.gitignore | 28 - examples/solidstart-openai/README.md | 47 - examples/solidstart-openai/app.config.ts | 3 - examples/solidstart-openai/package.json | 31 - examples/solidstart-openai/postcss.config.cjs | 6 - examples/solidstart-openai/public/favicon.ico | Bin 664 -> 0 bytes examples/solidstart-openai/src/app.css | 3 - examples/solidstart-openai/src/app.tsx | 19 - .../solidstart-openai/src/entry-client.tsx | 4 - .../solidstart-openai/src/entry-server.tsx | 21 - examples/solidstart-openai/src/global.d.ts | 1 - .../src/routes/api/chat/index.ts | 21 - .../src/routes/api/completion/index.ts | 17 - .../src/routes/api/use-chat-request/index.ts | 24 - .../routes/api/use-chat-streamdata/index.ts | 39 - .../src/routes/api/use-chat-tools/index.ts | 46 - .../src/routes/api/use-chat-vision/index.ts | 31 - .../src/routes/api/use-object/index.ts | 19 - .../src/routes/api/use-object/schema.ts | 16 - .../src/routes/completion/index.tsx | 41 - .../solidstart-openai/src/routes/index.tsx | 71 - .../src/routes/use-chat-attachments/index.tsx | 146 -- .../src/routes/use-chat-request/index.tsx | 80 - .../src/routes/use-chat-streamdata/index.tsx | 88 -- .../src/routes/use-chat-tools/index.tsx | 208 --- .../src/routes/use-chat-vision/index.tsx | 39 - .../src/routes/use-object/index.tsx | 71 - .../solidstart-openai/tailwind.config.cjs | 8 - examples/solidstart-openai/tsconfig.json | 19 - packages/solid/.eslintrc.js | 7 - packages/solid/CHANGELOG.md | 875 ----------- packages/solid/README.md | 9 - packages/solid/package.json | 79 - packages/solid/src/index.ts | 4 - packages/solid/src/package.json | 10 - packages/solid/src/use-assistant.ts | 288 ---- packages/solid/src/use-assistant.ui.test.tsx | 330 ---- packages/solid/src/use-chat.ts | 554 ------- packages/solid/src/use-chat.ui.test.tsx | 1329 ----------------- packages/solid/src/use-completion.ts | 199 --- packages/solid/src/use-completion.ui.test.tsx | 132 -- packages/solid/src/use-object.ts | 250 ---- packages/solid/src/use-object.ui.test.tsx | 275 ---- .../src/utils/convert-to-accessor-options.ts | 20 - packages/solid/src/utils/reactive-lru.ts | 150 -- packages/solid/tsconfig.json | 14 - packages/solid/tsup.config.ts | 13 - packages/solid/turbo.json | 12 - packages/solid/vitest.config.js | 22 - pnpm-lock.yaml | 1085 +------------- 65 files changed, 52 insertions(+), 6855 deletions(-) delete mode 100644 examples/solidstart-openai/.env.example delete mode 100644 examples/solidstart-openai/.gitignore delete mode 100644 examples/solidstart-openai/README.md delete mode 100644 examples/solidstart-openai/app.config.ts delete mode 100644 examples/solidstart-openai/package.json delete mode 100644 examples/solidstart-openai/postcss.config.cjs delete mode 100644 examples/solidstart-openai/public/favicon.ico delete mode 100644 examples/solidstart-openai/src/app.css delete mode 100644 examples/solidstart-openai/src/app.tsx delete mode 100644 examples/solidstart-openai/src/entry-client.tsx delete mode 100644 examples/solidstart-openai/src/entry-server.tsx delete mode 100644 examples/solidstart-openai/src/global.d.ts delete mode 100644 examples/solidstart-openai/src/routes/api/chat/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/completion/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/use-chat-request/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/use-chat-streamdata/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/use-chat-vision/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/use-object/index.ts delete mode 100644 examples/solidstart-openai/src/routes/api/use-object/schema.ts delete mode 100644 examples/solidstart-openai/src/routes/completion/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/use-chat-attachments/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/use-chat-request/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/use-chat-streamdata/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/use-chat-tools/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/use-chat-vision/index.tsx delete mode 100644 examples/solidstart-openai/src/routes/use-object/index.tsx delete mode 100644 examples/solidstart-openai/tailwind.config.cjs delete mode 100644 examples/solidstart-openai/tsconfig.json delete mode 100644 packages/solid/.eslintrc.js delete mode 100644 packages/solid/CHANGELOG.md delete mode 100644 packages/solid/README.md delete mode 100644 packages/solid/package.json delete mode 100644 packages/solid/src/index.ts delete mode 100644 packages/solid/src/package.json delete mode 100644 packages/solid/src/use-assistant.ts delete mode 100644 packages/solid/src/use-assistant.ui.test.tsx delete mode 100644 packages/solid/src/use-chat.ts delete mode 100644 packages/solid/src/use-chat.ui.test.tsx delete mode 100644 packages/solid/src/use-completion.ts delete mode 100644 packages/solid/src/use-completion.ui.test.tsx delete mode 100644 packages/solid/src/use-object.ts delete mode 100644 packages/solid/src/use-object.ui.test.tsx delete mode 100644 packages/solid/src/utils/convert-to-accessor-options.ts delete mode 100644 packages/solid/src/utils/reactive-lru.ts delete mode 100644 packages/solid/tsconfig.json delete mode 100644 packages/solid/tsup.config.ts delete mode 100644 packages/solid/turbo.json delete mode 100644 packages/solid/vitest.config.js diff --git a/.prettierignore b/.prettierignore index c74a64043bc3..ee60870ab39e 100644 --- a/.prettierignore +++ b/.prettierignore @@ -3,6 +3,5 @@ node_modules dist .svelte-kit -.solid _nuxt __testfixtures__ diff --git a/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx b/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx index 0c270f36c782..85f270c95d67 100644 --- a/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx +++ b/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx @@ -8,7 +8,7 @@ tags: ['next', 'chat'] `experimental_prepareRequestBody` is an experimental feature and only - available in React, Solid and Vue. + available in React, Svelte and Vue. By default, `useChat` sends all messages as well as information from the request to the server. diff --git a/content/docs/02-getting-started/01-navigating-the-library.mdx b/content/docs/02-getting-started/01-navigating-the-library.mdx index ec2de45b6abf..c0d28d67d9a8 100644 --- a/content/docs/02-getting-started/01-navigating-the-library.mdx +++ b/content/docs/02-getting-started/01-navigating-the-library.mdx @@ -17,11 +17,11 @@ Let’s start with a quick overview of the AI SDK, which is comprised of three p When deciding which part of the AI SDK to use, your first consideration should be the environment and existing stack you are working with. Different components of the SDK are tailored to specific frameworks and environments. -| Library | Purpose | Environment Compatibility | -| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| [AI SDK Core](/docs/ai-sdk-core/overview) | Call any LLM with unified API (e.g. [generateText](/docs/reference/ai-sdk-core/generate-text) and [generateObject](/docs/reference/ai-sdk-core/generate-object)) | Any JS environment (e.g. Node.js, Deno, Browser) | -| [AI SDK UI](/docs/ai-sdk-ui/overview) | Build streaming chat and generative UIs (e.g. [useChat](/docs/reference/ai-sdk-ui/use-chat)) | React & Next.js, Vue & Nuxt, Svelte & SvelteKit, Solid.js & SolidStart | -| [AI SDK RSC](/docs/ai-sdk-rsc/overview) | Stream generative UIs from Server to Client (e.g. [streamUI](/docs/reference/ai-sdk-rsc/stream-ui)). Development is currently experimental and we recommend using [AI SDK UI](/docs/ai-sdk-ui/overview). | Any framework that supports React Server Components (e.g. Next.js) | +| Library | Purpose | Environment Compatibility | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | +| [AI SDK Core](/docs/ai-sdk-core/overview) | Call any LLM with unified API (e.g. [generateText](/docs/reference/ai-sdk-core/generate-text) and [generateObject](/docs/reference/ai-sdk-core/generate-object)) | Any JS environment (e.g. Node.js, Deno, Browser) | +| [AI SDK UI](/docs/ai-sdk-ui/overview) | Build streaming chat and generative UIs (e.g. [useChat](/docs/reference/ai-sdk-ui/use-chat)) | React & Next.js, Vue & Nuxt, Svelte & SvelteKit | +| [AI SDK RSC](/docs/ai-sdk-rsc/overview) | Stream generative UIs from Server to Client (e.g. [streamUI](/docs/reference/ai-sdk-rsc/stream-ui)). Development is currently experimental and we recommend using [AI SDK UI](/docs/ai-sdk-ui/overview). | Any framework that supports React Server Components (e.g. Next.js) | ## Environment Compatibility @@ -34,7 +34,6 @@ The following table outlines AI SDK compatibility based on environment: | None / Node.js / Deno | | | | | Vue / Nuxt | | | | | Svelte / SvelteKit | | | | -| Solid.js / SolidStart | | | | | Next.js Pages Router | | | | | Next.js App Router | | | | @@ -49,15 +48,15 @@ AI SDK UI provides a set of framework-agnostic hooks for quickly building **prod ## AI SDK UI Framework Compatibility -AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), [Vue.js](https://vuejs.org/), and [SolidJS](https://www.solidjs.com/). Here is a comparison of the supported functions across these frameworks: +AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), and [Vue.js](https://vuejs.org/). Here is a comparison of the supported functions across these frameworks: -| Function | React | Svelte | Vue.js | SolidJS | -| ---------------------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | | | | -| [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | | | | | -| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | | | | -| [useObject](/docs/reference/ai-sdk-ui/use-object) | | | | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | | +| Function | React | Svelte | Vue.js | +| ---------------------------------------------------------- | ------------------- | ------------------- | ------------------- | +| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | | | +| [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | | | | +| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | | | +| [useObject](/docs/reference/ai-sdk-ui/use-object) | | | | +| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/content/docs/02-guides/20-sonnet-3-7.mdx b/content/docs/02-guides/20-sonnet-3-7.mdx index 1f5c0650f3f8..5e6b4032c920 100644 --- a/content/docs/02-guides/20-sonnet-3-7.mdx +++ b/content/docs/02-guides/20-sonnet-3-7.mdx @@ -69,7 +69,7 @@ console.log(text); // text response ### Building Interactive Interfaces -AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart. +AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit. AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. diff --git a/content/docs/02-guides/21-llama-3_1.mdx b/content/docs/02-guides/21-llama-3_1.mdx index 1e78c2913ba9..e56bffd47b5c 100644 --- a/content/docs/02-guides/21-llama-3_1.mdx +++ b/content/docs/02-guides/21-llama-3_1.mdx @@ -186,7 +186,7 @@ In this example, the agent can use the calculator tool multiple times if needed, ### Building Interactive Interfaces -AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart. +AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit. AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. diff --git a/content/docs/02-guides/22-gpt-4-5.mdx b/content/docs/02-guides/22-gpt-4-5.mdx index db23cfed35dc..76958f923047 100644 --- a/content/docs/02-guides/22-gpt-4-5.mdx +++ b/content/docs/02-guides/22-gpt-4-5.mdx @@ -103,7 +103,7 @@ In this example, the `getWeather` tool allows the model to fetch real-time weath ### Building Interactive Interfaces -AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart. +AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit. AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. diff --git a/content/docs/02-guides/23-o1.mdx b/content/docs/02-guides/23-o1.mdx index 675ea86997ae..d98e569d8ae2 100644 --- a/content/docs/02-guides/23-o1.mdx +++ b/content/docs/02-guides/23-o1.mdx @@ -174,7 +174,7 @@ In this example, the `getWeather` tool allows the model to fetch real-time weath ### Building Interactive Interfaces -AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart. +AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit. AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. diff --git a/content/docs/02-guides/24-o3.mdx b/content/docs/02-guides/24-o3.mdx index a8eba54d350d..12762bc2bd08 100644 --- a/content/docs/02-guides/24-o3.mdx +++ b/content/docs/02-guides/24-o3.mdx @@ -143,7 +143,7 @@ In this example, the `getWeather` tool allows the model to fetch real-time weath ### Building Interactive Interfaces -AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart. +AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit. AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. diff --git a/content/docs/02-guides/25-r1.mdx b/content/docs/02-guides/25-r1.mdx index 6ccafaa089d1..a4ca99eff4ff 100644 --- a/content/docs/02-guides/25-r1.mdx +++ b/content/docs/02-guides/25-r1.mdx @@ -125,7 +125,7 @@ You can use DeepSeek R1 with the AI SDK through various providers. Here's a comp ### Building Interactive Interfaces -AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart. +AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit. AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. diff --git a/content/docs/04-ai-sdk-ui/01-overview.mdx b/content/docs/04-ai-sdk-ui/01-overview.mdx index 3f76066ea0ac..ed42daa2daaf 100644 --- a/content/docs/04-ai-sdk-ui/01-overview.mdx +++ b/content/docs/04-ai-sdk-ui/01-overview.mdx @@ -18,15 +18,15 @@ These hooks are designed to reduce the complexity and time required to implement ## UI Framework Support -AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), [Vue.js](https://vuejs.org/), and [SolidJS](https://www.solidjs.com/) (deprecated). +AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), and [Vue.js](https://vuejs.org/). Here is a comparison of the supported functions across these frameworks: -| Function | React | Svelte | Vue.js | SolidJS (deprecated) | -| --------------------------------------------------------- | ------------------- | ------------------------------------ | ------------------- | -------------------- | -| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | Chat | | | -| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | Completion | | | -| [useObject](/docs/reference/ai-sdk-ui/use-object) | | StructuredObject | | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | | +| Function | React | Svelte | Vue.js | +| --------------------------------------------------------- | ------------------- | ------------------------------------ | ------------------- | +| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | Chat | | +| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | Completion | | +| [useObject](/docs/reference/ai-sdk-ui/use-object) | | StructuredObject | | +| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx b/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx index da64eb04b609..b9718c725c6a 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx @@ -9,7 +9,7 @@ Allows you to easily create a conversational user interface for your chatbot app ## Import - + - + - - - ## API Signature @@ -196,7 +185,7 @@ Allows you to easily create a conversational user interface for your chatbot app type: '(options: { messages: UIMessage[]; requestData?: JSONValue; requestBody?: object, id: string }) => unknown', isOptional: true, description: - 'Experimental (React, Solid & Vue only). When a function is provided, it will be used to prepare the request body for the chat API. This can be useful for customizing the request body based on the messages and data in the chat.', + 'Experimental. When a function is provided, it will be used to prepare the request body for the chat API. This can be useful for customizing the request body based on the messages and data in the chat.', }, { name: 'experimental_throttle', diff --git a/content/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx b/content/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx index a9d3a77e7002..acf196033a11 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx @@ -9,7 +9,7 @@ Allows you to create text completion based capabilities for your application. It ## Import - + @@ -31,13 +31,7 @@ Allows you to create text completion based capabilities for your application. It prompt={false} /> - - - + ## API Signature diff --git a/content/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx b/content/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx index 28dcc9b72e30..a47a6b0f5b20 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx @@ -6,8 +6,7 @@ description: API reference for the useObject hook. # `experimental_useObject()` - `useObject` is an experimental feature and only available in React and - SolidJS. + `useObject` is an experimental feature and only available in React and Svelte. Allows you to consume text streams that represent a JSON object and parse them into a complete object based on a schema. diff --git a/content/docs/07-reference/02-ai-sdk-ui/index.mdx b/content/docs/07-reference/02-ai-sdk-ui/index.mdx index 0d919dc66d7a..b28a073d885e 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/index.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/index.mdx @@ -93,15 +93,15 @@ It also contains the following helper functions: ## UI Framework Support -AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), [Vue.js](https://vuejs.org/), and [SolidJS](https://www.solidjs.com/) (deprecated). +AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), and [Vue.js](https://vuejs.org/). Here is a comparison of the supported functions across these frameworks: -| Function | React | Svelte | Vue.js | SolidJS (deprecated) | -| --------------------------------------------------------- | ------------------- | ------------------------------------ | ------------------- | -------------------- | -| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | Chat | | | -| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | Completion | | | -| [useObject](/docs/reference/ai-sdk-ui/use-object) | | StructuredObject | | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | | +| Function | React | Svelte | Vue.js | +| --------------------------------------------------------- | ------------------- | ------------------------------------ | ------------------- | +| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | Chat | | +| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | Completion | | +| [useObject](/docs/reference/ai-sdk-ui/use-object) | | StructuredObject | | +| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/examples/solidstart-openai/.env.example b/examples/solidstart-openai/.env.example deleted file mode 100644 index a180c7596809..000000000000 --- a/examples/solidstart-openai/.env.example +++ /dev/null @@ -1 +0,0 @@ -OPENAI_API_KEY=xxxxxxx diff --git a/examples/solidstart-openai/.gitignore b/examples/solidstart-openai/.gitignore deleted file mode 100644 index ff04496935f1..000000000000 --- a/examples/solidstart-openai/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ - -dist -.solid -.output -.vercel -.netlify -.vinxi - -# Environment -.env -.env*.local - -# dependencies -/node_modules - -# IDEs and editors -/.idea -.project -.classpath -*.launch -.settings/ - -# Temp -gitignore - -# System Files -.DS_Store -Thumbs.db diff --git a/examples/solidstart-openai/README.md b/examples/solidstart-openai/README.md deleted file mode 100644 index 427d8f67d37d..000000000000 --- a/examples/solidstart-openai/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# AI SDK, Solid.js, SolidStart and OpenAI Chat Example - -This example shows how to use the [AI SDK](https://sdk.vercel.ai/docs) with [Solid](https://solidjs.com/), [SolidStart](https://start.solidjs.com), and [OpenAI](https://openai.com) to create a ChatGPT-like AI-powered streaming chat bot. - -## Deploy your own - -Deploy the example using [Vercel](https://vercel.com?utm_source=github&utm_medium=readme&utm_campaign=ai-sdk-example): - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fvercel%2Fai%2Ftree%2Fmain%2Fexamples%2Fsolidstart-openai&env=OPENAI_API_KEY&envDescription=OpenAI%20API%20Key&envLink=https%3A%2F%2Fplatform.openai.com%2Faccount%2Fapi-keys&project-name=ai-chat&repository-name=solid-ai-chat) - -## How to use - -## Creating a project - -```bash -# create a new project in the current directory -npm init solid@latest - -# create a new project in my-app -npm init solid@latest my-app -``` - -## Developing - -Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: - -```bash -npm run dev - -# or start the server and open the app in a new browser tab -npm run dev -- --open -``` - -## Building - -Solid apps are built with _adapters_, which optimise your project for deployment to different environments. - -By default, `npm run build` will generate a Node app that you can run with `npm start`. To use a different adapter, add it to the `devDependencies` in `package.json` and specify in your `vite.config.js`. - -## Learn More - -To learn more about OpenAI, Nuxt, and the AI SDK take a look at the following resources: - -- [AI SDK docs](https://sdk.vercel.ai/docs) - learn mode about the AI SDK -- [Vercel AI Playground](https://play.vercel.ai) - compare and tune 20+ AI models side-by-side -- [OpenAI Documentation](https://platform.openai.com/docs) - learn about OpenAI features and API. -- [SolidStart Documentation](https://start.solidjs.com) - learn about SolidStart. diff --git a/examples/solidstart-openai/app.config.ts b/examples/solidstart-openai/app.config.ts deleted file mode 100644 index b3c737efe5ba..000000000000 --- a/examples/solidstart-openai/app.config.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { defineConfig } from '@solidjs/start/config'; - -export default defineConfig({}); diff --git a/examples/solidstart-openai/package.json b/examples/solidstart-openai/package.json deleted file mode 100644 index f364905d9791..000000000000 --- a/examples/solidstart-openai/package.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "name": "solidstart-openai", - "private": true, - "type": "module", - "scripts": { - "dev": "vinxi dev", - "build": "vinxi build", - "start": "vinxi start" - }, - "devDependencies": { - "autoprefixer": "^10.4.19", - "postcss": "^8.4.49", - "tailwindcss": "^3.4.15", - "vinxi": "^0.4.3" - }, - "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/solid": "1.2.6", - "@ai-sdk/ui-utils": "1.2.4", - "@solidjs/meta": "0.29.4", - "@solidjs/router": "^0.15.1", - "@solidjs/start": "^1.0.10", - "ai": "4.2.10", - "solid-js": "^1.9.3", - "zod": "^3.23.8" - }, - "engines": { - "node": ">=18" - }, - "version": "0.0.0" -} diff --git a/examples/solidstart-openai/postcss.config.cjs b/examples/solidstart-openai/postcss.config.cjs deleted file mode 100644 index 33ad091d26d8..000000000000 --- a/examples/solidstart-openai/postcss.config.cjs +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -} diff --git a/examples/solidstart-openai/public/favicon.ico b/examples/solidstart-openai/public/favicon.ico deleted file mode 100644 index fb282da0719ef6ab4c1732df93be6216b0d85520..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 664 zcmV;J0%!e+P)m9ebk1R zejT~~6f_`?;`cEd!+`7(hw@%%2;?RN8gX-L?z6cM( zKoG@&w+0}f@Pfvwc+deid)qgE!L$ENKYjViZC_Zcr>L(`2oXUT8f0mRQ(6-=HN_Ai zeBBEz3WP+1Cw`m!49Wf!MnZzp5bH8VkR~BcJ1s-j90TAS2Yo4j!J|KodxYR%3Numw zA?gq6e`5@!W~F$_De3yt&uspo&2yLb$(NwcPPI-4LGc!}HdY%jfq@AFs8LiZ4k(p} zZ!c9o+qbWYs-Mg zgdyTALzJX&7QXHdI_DPTFL33;w}88{e6Zk)MX0kN{3DX9uz#O_L58&XRH$Nvvu;fO zf&)7@?C~$z1K<>j0ga$$MIg+5xN;eQ?1-CA=`^Y169@Ab6!vcaNP=hxfKN%@Ly^R* zK1iv*s1Yl6_dVyz8>ZqYhz6J4|3fQ@2LQeX@^%W(B~8>=MoEmBEGGD1;gHXlpX>!W ym)!leA2L@`cpb^hy)P75=I!`pBYxP7<2VfQ3j76qLgzIA0000 ( - <> - {props.children} - - )} - > - - - ); -} diff --git a/examples/solidstart-openai/src/entry-client.tsx b/examples/solidstart-openai/src/entry-client.tsx deleted file mode 100644 index 7dac7a59f16b..000000000000 --- a/examples/solidstart-openai/src/entry-client.tsx +++ /dev/null @@ -1,4 +0,0 @@ -// @refresh reload -import { mount, StartClient } from '@solidjs/start/client'; - -mount(() => , document.getElementById('app')!); diff --git a/examples/solidstart-openai/src/entry-server.tsx b/examples/solidstart-openai/src/entry-server.tsx deleted file mode 100644 index c10a6f0f6c41..000000000000 --- a/examples/solidstart-openai/src/entry-server.tsx +++ /dev/null @@ -1,21 +0,0 @@ -// @refresh reload -import { createHandler, StartServer } from '@solidjs/start/server'; - -export default createHandler(() => ( - ( - - - - - - {assets} - - -
{children}
- {scripts} - - - )} - /> -)); diff --git a/examples/solidstart-openai/src/global.d.ts b/examples/solidstart-openai/src/global.d.ts deleted file mode 100644 index dc6f10c226c0..000000000000 --- a/examples/solidstart-openai/src/global.d.ts +++ /dev/null @@ -1 +0,0 @@ -/// diff --git a/examples/solidstart-openai/src/routes/api/chat/index.ts b/examples/solidstart-openai/src/routes/api/chat/index.ts deleted file mode 100644 index da5490c56e3c..000000000000 --- a/examples/solidstart-openai/src/routes/api/chat/index.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { APIEvent } from '@solidjs/start/server'; -import { streamText } from 'ai'; - -export const POST = async (event: APIEvent) => { - // Extract the `messages` from the body of the request - const { messages } = await event.request.json(); - - // Call the language model - const result = streamText({ - model: openai('gpt-4-turbo'), - messages, - async onFinish({ text, toolCalls, toolResults, usage, finishReason }) { - // implement your own logic here, e.g. for storing messages - // or recording token usage - }, - }); - - // Respond with the stream - return result.toDataStreamResponse(); -}; diff --git a/examples/solidstart-openai/src/routes/api/completion/index.ts b/examples/solidstart-openai/src/routes/api/completion/index.ts deleted file mode 100644 index 8702a54695d4..000000000000 --- a/examples/solidstart-openai/src/routes/api/completion/index.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { APIEvent } from '@solidjs/start/server'; -import { streamText } from 'ai'; - -export const POST = async (event: APIEvent) => { - // Extract the `prompt` from the body of the request - const { prompt } = await event.request.json(); - - // Ask OpenAI for a streaming chat completion given the prompt - const result = streamText({ - model: openai('gpt-4o'), - prompt, - }); - - // Respond with the stream - return result.toDataStreamResponse(); -}; diff --git a/examples/solidstart-openai/src/routes/api/use-chat-request/index.ts b/examples/solidstart-openai/src/routes/api/use-chat-request/index.ts deleted file mode 100644 index 4847f9755bbd..000000000000 --- a/examples/solidstart-openai/src/routes/api/use-chat-request/index.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { streamText, Message } from 'ai'; -import { APIEvent } from '@solidjs/start/server'; - -export const POST = async (event: APIEvent) => { - // Extract the `messages` from the body of the request - const { message } = await event.request.json(); - - // Implement your own logic here to add message history - const previousMessages: Message[] = []; - const messages = [...previousMessages, message]; - - // Call the language model - const result = streamText({ - model: openai('gpt-4o-mini'), - messages, - async onFinish({ text, toolCalls, toolResults, usage, finishReason }) { - // Implement your own logic here, e.g. for storing messages - }, - }); - - // Respond with the stream - return result.toDataStreamResponse(); -}; diff --git a/examples/solidstart-openai/src/routes/api/use-chat-streamdata/index.ts b/examples/solidstart-openai/src/routes/api/use-chat-streamdata/index.ts deleted file mode 100644 index b01632e964a1..000000000000 --- a/examples/solidstart-openai/src/routes/api/use-chat-streamdata/index.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { APIEvent } from '@solidjs/start/server'; -import { createDataStreamResponse, generateId, streamText } from 'ai'; - -export const POST = async (event: APIEvent) => { - const { messages } = await event.request.json(); - - // immediately start streaming (solves RAG issues with status, etc.) - return createDataStreamResponse({ - execute: dataStream => { - dataStream.writeData('initialized call'); - - const result = streamText({ - model: openai('gpt-4o'), - messages, - onChunk() { - dataStream.writeMessageAnnotation({ chunk: '123' }); - }, - onFinish() { - // message annotation: - dataStream.writeMessageAnnotation({ - id: generateId(), // e.g. id from saved DB record - other: 'information', - }); - - // call annotation: - dataStream.writeData('call completed'); - }, - }); - - result.mergeIntoDataStream(dataStream); - }, - onError: error => { - // Error messages are masked by default for security reasons. - // If you want to expose the error message to the client, you can do so here: - return error instanceof Error ? error.message : String(error); - }, - }); -}; diff --git a/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts b/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts deleted file mode 100644 index 65be7b857ec3..000000000000 --- a/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts +++ /dev/null @@ -1,46 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { APIEvent } from '@solidjs/start/server'; -import { streamText } from 'ai'; -import { z } from 'zod'; - -export const POST = async (event: APIEvent) => { - const { messages } = await event.request.json(); - - const result = streamText({ - model: openai('gpt-4o'), - messages, - toolCallStreaming: true, - maxSteps: 5, // multi-steps for server-side tools - tools: { - // server-side tool with execute function: - getWeatherInformation: { - description: 'show the weather in a given city to the user', - parameters: z.object({ city: z.string() }), - execute: async ({ city }: { city: string }) => { - // Add artificial delay of 2 seconds - await new Promise(resolve => setTimeout(resolve, 2000)); - - const weatherOptions = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy']; - return weatherOptions[ - Math.floor(Math.random() * weatherOptions.length) - ]; - }, - }, - // client-side tool that starts user interaction: - askForConfirmation: { - description: 'Ask the user for confirmation.', - parameters: z.object({ - message: z.string().describe('The message to ask for confirmation.'), - }), - }, - // client-side tool that is automatically executed on the client: - getLocation: { - description: - 'Get the user location. Always ask for confirmation before using this tool.', - parameters: z.object({}), - }, - }, - }); - - return result.toDataStreamResponse(); -}; diff --git a/examples/solidstart-openai/src/routes/api/use-chat-vision/index.ts b/examples/solidstart-openai/src/routes/api/use-chat-vision/index.ts deleted file mode 100644 index f280a3a3a404..000000000000 --- a/examples/solidstart-openai/src/routes/api/use-chat-vision/index.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { APIEvent } from '@solidjs/start/server'; -import { streamText } from 'ai'; - -export const POST = async (event: APIEvent) => { - // 'data' contains the additional data that you have sent: - const { messages, data } = await event.request.json(); - - const initialMessages = messages.slice(0, -1); - const currentMessage = messages[messages.length - 1]; - - const result = streamText({ - model: openai('gpt-4o-mini'), - messages: [ - ...initialMessages, - { - ...currentMessage, - content: [ - { type: 'text', text: currentMessage.content }, - { - type: 'image', - image: new URL(data.imageUrl), - }, - ], - }, - ], - }); - - // Respond with the stream - return result.toDataStreamResponse(); -}; diff --git a/examples/solidstart-openai/src/routes/api/use-object/index.ts b/examples/solidstart-openai/src/routes/api/use-object/index.ts deleted file mode 100644 index a6eeba077dbe..000000000000 --- a/examples/solidstart-openai/src/routes/api/use-object/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { streamObject } from 'ai'; -import { notificationSchema } from './schema'; -import { APIHandler } from '@solidjs/start/server'; - -// Allow streaming responses up to 30 seconds -export const maxDuration = 30; - -export const POST: APIHandler = async ({ request }) => { - const context = await request.json(); - - const result = streamObject({ - model: openai('gpt-4o'), - prompt: `Generate 3 notifications for a messages app in this context: ${context}`, - schema: notificationSchema, - }); - - return result.toTextStreamResponse(); -}; diff --git a/examples/solidstart-openai/src/routes/api/use-object/schema.ts b/examples/solidstart-openai/src/routes/api/use-object/schema.ts deleted file mode 100644 index e63dc963f8cf..000000000000 --- a/examples/solidstart-openai/src/routes/api/use-object/schema.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { DeepPartial } from 'ai'; -import { z } from 'zod'; - -// define a schema for the notifications -export const notificationSchema = z.object({ - notifications: z.array( - z.object({ - name: z.string().describe('Name of a fictional person.'), - message: z.string().describe('Message. Do not use emojis or links.'), - minutesAgo: z.number(), - }), - ), -}); - -// define a type for the partial notifications during generation -export type PartialNotification = DeepPartial; diff --git a/examples/solidstart-openai/src/routes/completion/index.tsx b/examples/solidstart-openai/src/routes/completion/index.tsx deleted file mode 100644 index 19263bb1149f..000000000000 --- a/examples/solidstart-openai/src/routes/completion/index.tsx +++ /dev/null @@ -1,41 +0,0 @@ -import { useCompletion } from '@ai-sdk/solid'; -import { JSX } from 'solid-js'; - -export default function Chat() { - const { completion, input, setInput, handleSubmit, error, data } = - useCompletion(); - - const handleInputChange: JSX.ChangeEventHandlerUnion< - HTMLInputElement, - Event - > = e => { - setInput(e.target.value); - }; - - return ( -
- {data() && ( -
-          {JSON.stringify(data(), null, 2)}
-        
- )} - - {error() && ( -
- {error()?.message} -
- )} - - {completion()} - -
- -
-
- ); -} diff --git a/examples/solidstart-openai/src/routes/index.tsx b/examples/solidstart-openai/src/routes/index.tsx deleted file mode 100644 index 334ac1fccfeb..000000000000 --- a/examples/solidstart-openai/src/routes/index.tsx +++ /dev/null @@ -1,71 +0,0 @@ -import { For, Show } from 'solid-js'; -import { useChat } from '@ai-sdk/solid'; - -export default function Chat() { - const { - error, - input, - status, - handleInputChange, - handleSubmit, - messages, - reload, - stop, - } = useChat({ - onFinish(message, { usage, finishReason }) { - console.log('Usage', usage); - console.log('FinishReason', finishReason); - }, - }); - - return ( -
- - {m => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- - -
- -
Loading...
-
- -
-
- - -
-
An error occurred.
- -
-
- -
- -
-
- ); -} diff --git a/examples/solidstart-openai/src/routes/use-chat-attachments/index.tsx b/examples/solidstart-openai/src/routes/use-chat-attachments/index.tsx deleted file mode 100644 index cf07481a35c9..000000000000 --- a/examples/solidstart-openai/src/routes/use-chat-attachments/index.tsx +++ /dev/null @@ -1,146 +0,0 @@ -/* eslint-disable @next/next/no-img-element */ -import { For, Show, createSignal } from 'solid-js'; -import { useChat } from '@ai-sdk/solid'; -import { getTextFromDataUrl } from '@ai-sdk/ui-utils'; - -export default function Chat() { - const [files, setFiles] = createSignal(undefined); - let fileInputRef: HTMLInputElement | undefined; - - const { - error, - input, - status, - handleInputChange, - handleSubmit, - messages, - reload, - stop, - } = useChat({ - onFinish(message, { usage, finishReason }) { - console.log('Usage', usage); - console.log('FinishReason', finishReason); - }, - }); - - const onSend = async (e: Event) => { - e.preventDefault(); - handleSubmit(e, { - experimental_attachments: files(), - }); - - setFiles(undefined); - if (fileInputRef) { - fileInputRef.value = ''; - } - }; - - return ( -
-
- - {message => ( -
-
- {`${message.role}: `} -
- -
- {message.content} - -
- - {(attachment, index) => - attachment.contentType?.includes('image/') ? ( - {attachment.name} - ) : attachment.contentType?.includes('text/') ? ( -
- {getTextFromDataUrl(attachment.url)} -
- ) : null - } -
-
-
-
- )} -
-
- - -
- -
Loading...
-
- -
-
- - {error() && ( -
-
An error occurred.
- -
- )} - -
-
- - {attachment => { - const type = attachment.type; - return type.startsWith('image/') ? ( -
- {attachment.name} - {attachment.name} -
- ) : type.startsWith('text/') ? ( -
-
- {attachment.name} -
- ) : null; - }} - -
- - setFiles(e.currentTarget.files ?? undefined)} - multiple - ref={fileInputRef} - /> - - - -
- ); -} diff --git a/examples/solidstart-openai/src/routes/use-chat-request/index.tsx b/examples/solidstart-openai/src/routes/use-chat-request/index.tsx deleted file mode 100644 index 6489843ac629..000000000000 --- a/examples/solidstart-openai/src/routes/use-chat-request/index.tsx +++ /dev/null @@ -1,80 +0,0 @@ -/* eslint-disable @next/next/no-img-element */ -import { For, Show } from 'solid-js'; -import { useChat } from '@ai-sdk/solid'; -import { createIdGenerator } from 'ai'; - -export default function Chat() { - const { - input, - messages, - handleInputChange, - handleSubmit, - status, - error, - stop, - reload, - } = useChat({ - api: '/api/use-chat-request', - sendExtraMessageFields: true, - generateId: createIdGenerator({ prefix: 'msgc', size: 16 }), - - experimental_prepareRequestBody({ messages }) { - return { - message: messages[messages.length - 1], - }; - }, - }); - - return ( -
-
- - {message => ( -
- {message.role === 'user' ? 'User: ' : 'AI: '} - {message.content} -
- )} -
-
- - -
- -
Loading...
-
- -
-
- - {error() && ( -
-
An error occurred.
- -
- )} - -
- -
-
- ); -} diff --git a/examples/solidstart-openai/src/routes/use-chat-streamdata/index.tsx b/examples/solidstart-openai/src/routes/use-chat-streamdata/index.tsx deleted file mode 100644 index 1a4b318ed559..000000000000 --- a/examples/solidstart-openai/src/routes/use-chat-streamdata/index.tsx +++ /dev/null @@ -1,88 +0,0 @@ -import { For, Show } from 'solid-js'; -import { useChat } from '@ai-sdk/solid'; - -export default function Chat() { - const { - messages, - input, - handleInputChange, - handleSubmit, - data, - setData, - status, - error, - stop, - reload, - } = useChat({ api: '/api/use-chat-streamdata' }); - - return ( -
- -
-          {JSON.stringify(data(), null, 2)}
-        
- -
- - - {m => ( -
- {`${m.role}: `} - {m.content} -
- - Annotations: -
-                {JSON.stringify(m.annotations, null, 2)}
-              
-
-
-
-
- )} -
- - -
- -
Loading...
-
- -
-
- - -
-
An error occurred.
- -
-
- -
- -
-
- ); -} diff --git a/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx b/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx deleted file mode 100644 index a63e166338dc..000000000000 --- a/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx +++ /dev/null @@ -1,208 +0,0 @@ -/* eslint-disable react/jsx-key */ -import { useChat } from '@ai-sdk/solid'; -import { TextUIPart, ToolInvocationUIPart } from '@ai-sdk/ui-utils'; -import { For, Show } from 'solid-js'; - -export default function Chat() { - const { messages, input, handleInputChange, handleSubmit, addToolResult } = - useChat({ - api: '/api/use-chat-tools', - maxSteps: 5, - - // run client-side tools that are automatically executed: - async onToolCall({ toolCall }) { - if (toolCall.toolName === 'getLocation') { - const cities = [ - 'New York', - 'Los Angeles', - 'Chicago', - 'San Francisco', - ]; - return cities[Math.floor(Math.random() * cities.length)]; - } - }, - }); - - return ( -
- - {message => ( -
- {`${message.role}: `} - - {part => ( - <> - - {(part as TextUIPart).text} - - - { - <> - - -
- { - (part as ToolInvocationUIPart).toolInvocation - .args.message - } -
- - -
-
-
- -
- Location access allowed:{' '} - {(part as any).toolInvocation.result} -
-
-
- - - -
Getting location...
-
- -
- Location: {(part as any).toolInvocation.result} -
-
-
- - - -
-                              {JSON.stringify(
-                                (part as ToolInvocationUIPart).toolInvocation,
-                                null,
-                                2,
-                              )}
-                            
-
- -
- Getting weather information for{' '} - { - (part as ToolInvocationUIPart).toolInvocation - .args.city - } - ... -
-
- - -
- Weather in{' '} - {(part as any).toolInvocation.args.city}:{' '} - {(part as any).toolInvocation.result} -
-
-
- - } -
- - )} -
-
-
- )} -
- -
- -
-
- ); -} diff --git a/examples/solidstart-openai/src/routes/use-chat-vision/index.tsx b/examples/solidstart-openai/src/routes/use-chat-vision/index.tsx deleted file mode 100644 index c9c77316fdf3..000000000000 --- a/examples/solidstart-openai/src/routes/use-chat-vision/index.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import { useChat } from '@ai-sdk/solid'; -import { For } from 'solid-js'; - -export default function Chat() { - const { messages, input, handleInputChange, handleSubmit } = useChat(() => ({ - api: '/api/use-chat-vision', - })); - - return ( -
- - {m => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
- handleSubmit(e, { - data: { - imageUrl: - 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Field_sparrow_in_CP_%2841484%29_%28cropped%29.jpg/733px-Field_sparrow_in_CP_%2841484%29_%28cropped%29.jpg', - }, - }) - } - > - -
-
- ); -} diff --git a/examples/solidstart-openai/src/routes/use-object/index.tsx b/examples/solidstart-openai/src/routes/use-object/index.tsx deleted file mode 100644 index ca38c97555d4..000000000000 --- a/examples/solidstart-openai/src/routes/use-object/index.tsx +++ /dev/null @@ -1,71 +0,0 @@ -import { experimental_useObject as useObject } from '@ai-sdk/solid'; -import { notificationSchema } from '../api/use-object/schema'; -import { For, Show } from 'solid-js'; - -export default function Page() { - const { submit, isLoading, object, stop, error } = useObject({ - api: '/api/use-object', - schema: notificationSchema, - }); - - return ( -
- - - - {error => ( -
- An error occurred. {error()?.message} -
- )} -
- - -
-
Loading...
- -
-
- -
- - {(notification, index) => ( -
-
-
-

- {notification?.name} -

-

- {notification?.minutesAgo} - {notification?.minutesAgo != null ? ' minutes ago' : ''} -

-
-

- {notification?.message} -

-
-
- )} -
-
-
- ); -} diff --git a/examples/solidstart-openai/tailwind.config.cjs b/examples/solidstart-openai/tailwind.config.cjs deleted file mode 100644 index 14d3e24e3ba5..000000000000 --- a/examples/solidstart-openai/tailwind.config.cjs +++ /dev/null @@ -1,8 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: ["./src/**/*.{html,js,jsx,ts,tsx}"], - theme: { - extend: {} - }, - plugins: [] -}; diff --git a/examples/solidstart-openai/tsconfig.json b/examples/solidstart-openai/tsconfig.json deleted file mode 100644 index 3ad477f77c1e..000000000000 --- a/examples/solidstart-openai/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "compilerOptions": { - "target": "ESNext", - "module": "ESNext", - "moduleResolution": "bundler", - "allowSyntheticDefaultImports": true, - "esModuleInterop": true, - "jsx": "preserve", - "jsxImportSource": "solid-js", - "allowJs": true, - "noEmit": true, - "strict": true, - "types": ["vinxi/types/client"], - "isolatedModules": true, - "paths": { - "~/*": ["./src/*"] - } - } -} diff --git a/packages/solid/.eslintrc.js b/packages/solid/.eslintrc.js deleted file mode 100644 index df54bdc52cd0..000000000000 --- a/packages/solid/.eslintrc.js +++ /dev/null @@ -1,7 +0,0 @@ -module.exports = { - root: true, - extends: ['vercel-ai'], - rules: { - 'react-hooks/rules-of-hooks': 'off', - }, -}; diff --git a/packages/solid/CHANGELOG.md b/packages/solid/CHANGELOG.md deleted file mode 100644 index 0e72353d950d..000000000000 --- a/packages/solid/CHANGELOG.md +++ /dev/null @@ -1,875 +0,0 @@ -# @ai-sdk/solid - -## 1.2.6 - -### Patch Changes - -- a043b14: fix (ui): prevent early addToolResult submission -- Updated dependencies [28be004] - - @ai-sdk/provider-utils@2.2.3 - - @ai-sdk/ui-utils@1.2.4 - -## 1.2.5 - -### Patch Changes - -- Updated dependencies [b01120e] - - @ai-sdk/provider-utils@2.2.2 - - @ai-sdk/ui-utils@1.2.3 - -## 1.2.4 - -### Patch Changes - -- Updated dependencies [65243ce] - - @ai-sdk/ui-utils@1.2.2 - -## 1.2.3 - -### Patch Changes - -- d92fa29: feat: add credentials support to experimental useObject and StructuredObject - -## 1.2.2 - -### Patch Changes - -- 5185bce: chore (ui/solid): deprecate ai-sdk/solid - -## 1.2.1 - -### Patch Changes - -- Updated dependencies [f10f0fa] - - @ai-sdk/provider-utils@2.2.1 - - @ai-sdk/ui-utils@1.2.1 - -## 1.2.0 - -### Minor Changes - -- 5bc638d: AI SDK 4.2 - -### Patch Changes - -- Updated dependencies [5bc638d] - - @ai-sdk/provider-utils@2.2.0 - - @ai-sdk/ui-utils@1.2.0 - -## 1.1.25 - -### Patch Changes - -- Updated dependencies [d0c4659] - - @ai-sdk/provider-utils@2.1.15 - - @ai-sdk/ui-utils@1.1.21 - -## 1.1.24 - -### Patch Changes - -- @ai-sdk/provider-utils@2.1.14 -- @ai-sdk/ui-utils@1.1.20 - -## 1.1.23 - -### Patch Changes - -- @ai-sdk/provider-utils@2.1.13 -- @ai-sdk/ui-utils@1.1.19 - -## 1.1.22 - -### Patch Changes - -- Updated dependencies [1531959] - - @ai-sdk/provider-utils@2.1.12 - - @ai-sdk/ui-utils@1.1.18 - -## 1.1.21 - -### Patch Changes - -- @ai-sdk/provider-utils@2.1.11 -- @ai-sdk/ui-utils@1.1.17 - -## 1.1.20 - -### Patch Changes - -- Updated dependencies [ddf9740] - - @ai-sdk/ui-utils@1.1.16 - - @ai-sdk/provider-utils@2.1.10 - -## 1.1.19 - -### Patch Changes - -- @ai-sdk/provider-utils@2.1.9 -- @ai-sdk/ui-utils@1.1.15 - -## 1.1.18 - -### Patch Changes - -- 60c3220: fix (ui): set status to ready after stream was aborted - -## 1.1.17 - -### Patch Changes - -- c43df41: feat (ui): add useChat status - -## 1.1.16 - -### Patch Changes - -- Updated dependencies [2e898b4] - - @ai-sdk/provider-utils@2.1.8 - - @ai-sdk/ui-utils@1.1.14 - -## 1.1.15 - -### Patch Changes - -- Updated dependencies [3ff4ef8] - - @ai-sdk/provider-utils@2.1.7 - - @ai-sdk/ui-utils@1.1.13 - -## 1.1.14 - -### Patch Changes - -- Updated dependencies [166e09e] - - @ai-sdk/ui-utils@1.1.12 - -## 1.1.13 - -### Patch Changes - -- Updated dependencies [318b351] - - @ai-sdk/ui-utils@1.1.11 - -## 1.1.12 - -### Patch Changes - -- 244dd77: feat (ui/solid): add support for prepareRequestBody - -## 1.1.11 - -### Patch Changes - -- bcc61d4: feat (ui): introduce message parts for useChat -- Updated dependencies [bcc61d4] - - @ai-sdk/ui-utils@1.1.10 - -## 1.1.10 - -### Patch Changes - -- Updated dependencies [6b8cc14] - - @ai-sdk/ui-utils@1.1.9 - -## 1.1.9 - -### Patch Changes - -- @ai-sdk/provider-utils@2.1.6 -- @ai-sdk/ui-utils@1.1.8 - -## 1.1.8 - -### Patch Changes - -- 0d2d9bf: fix (ui): empty submits (with allowEmptySubmit) create user messages -- 0d2d9bf: fix (ui): single assistant message with multiple tool steps -- Updated dependencies [0d2d9bf] - - @ai-sdk/ui-utils@1.1.7 - -## 1.1.7 - -### Patch Changes - -- c73423e: feat (ui/solid): experimental attachment support - -## 1.1.6 - -### Patch Changes - -- Updated dependencies [3a602ca] - - @ai-sdk/provider-utils@2.1.5 - - @ai-sdk/ui-utils@1.1.6 - -## 1.1.5 - -### Patch Changes - -- Updated dependencies [066206e] - - @ai-sdk/provider-utils@2.1.4 - - @ai-sdk/ui-utils@1.1.5 - -## 1.1.4 - -### Patch Changes - -- Updated dependencies [39e5c1f] - - @ai-sdk/provider-utils@2.1.3 - - @ai-sdk/ui-utils@1.1.4 - -## 1.1.3 - -### Patch Changes - -- Updated dependencies [9ce598c] - - @ai-sdk/ui-utils@1.1.3 - -## 1.1.2 - -### Patch Changes - -- Updated dependencies [ed012d2] - - @ai-sdk/provider-utils@2.1.2 - - @ai-sdk/ui-utils@1.1.2 - -## 1.1.1 - -### Patch Changes - -- Updated dependencies [e7a9ec9] -- Updated dependencies [0a699f1] - - @ai-sdk/ui-utils@1.1.1 - - @ai-sdk/provider-utils@2.1.1 - -## 1.1.0 - -### Minor Changes - -- 62ba5ad: release: AI SDK 4.1 - -### Patch Changes - -- Updated dependencies [62ba5ad] - - @ai-sdk/provider-utils@2.1.0 - - @ai-sdk/ui-utils@1.1.0 - -## 1.0.13 - -### Patch Changes - -- Updated dependencies [33592d2] - - @ai-sdk/ui-utils@1.0.12 - -## 1.0.12 - -### Patch Changes - -- Updated dependencies [00114c5] -- Updated dependencies [00114c5] - - @ai-sdk/provider-utils@2.0.8 - - @ai-sdk/ui-utils@1.0.11 - -## 1.0.11 - -### Patch Changes - -- 37f4510: feat (ui): expose useChat id and send it to the server -- Updated dependencies [37f4510] - - @ai-sdk/ui-utils@1.0.10 - -## 1.0.10 - -### Patch Changes - -- Updated dependencies [2495973] -- Updated dependencies [2495973] - - @ai-sdk/ui-utils@1.0.9 - -## 1.0.9 - -### Patch Changes - -- Updated dependencies [90fb95a] -- Updated dependencies [e6dfef4] -- Updated dependencies [6636db6] - - @ai-sdk/provider-utils@2.0.7 - - @ai-sdk/ui-utils@1.0.8 - -## 1.0.8 - -### Patch Changes - -- Updated dependencies [19a2ce7] -- Updated dependencies [6337688] - - @ai-sdk/provider-utils@2.0.6 - - @ai-sdk/ui-utils@1.0.7 - -## 1.0.7 - -### Patch Changes - -- Updated dependencies [5ed5e45] - - @ai-sdk/provider-utils@2.0.5 - - @ai-sdk/ui-utils@1.0.6 - -## 1.0.6 - -### Patch Changes - -- @ai-sdk/provider-utils@2.0.4 -- @ai-sdk/ui-utils@1.0.5 - -## 1.0.5 - -### Patch Changes - -- Updated dependencies [0984f0b] - - @ai-sdk/provider-utils@2.0.3 - - @ai-sdk/ui-utils@1.0.4 - -## 1.0.4 - -### Patch Changes - -- Updated dependencies [953469c] -- Updated dependencies [a3dd2ed] - - @ai-sdk/ui-utils@1.0.3 - -## 1.0.3 - -### Patch Changes - -- 630ac31: fix (ui): set tool invocation state to "result" when calling addToolResult - -## 1.0.2 - -### Patch Changes - -- 88b364b: feat (ui/solid): add useObject -- 88b364b: feat (ui/solid): add useAssistant -- 88b364b: fix (ui/solid): fix useChat deep object updates -- Updated dependencies [88b364b] - - @ai-sdk/ui-utils@1.0.2 - - @ai-sdk/provider-utils@2.0.2 - -## 1.0.1 - -### Patch Changes - -- Updated dependencies [c3ab5de] - - @ai-sdk/provider-utils@2.0.1 - - @ai-sdk/ui-utils@1.0.1 - -## 1.0.0 - -### Major Changes - -- e117b54: chore (ui): remove deprecated useChat roundtrip options -- 8bf5756: chore: remove legacy function/tool calling -- 7814c4b: chore (ui): remove streamMode setting from useChat & useCompletion -- fe4f109: chore (ui): set default value of useChat keepLastMessageOnError to true -- 84edae5: chore (release): bump ui package versions for 4.0 release - -### Patch Changes - -- 79c6dd9: fix (ui): remove unnecessary calls to mutateStreamData in useChat -- Updated dependencies [8bf5756] -- Updated dependencies [b469a7e] -- Updated dependencies [9f81e66] -- Updated dependencies [70f28f6] -- Updated dependencies [dce4158] -- Updated dependencies [7814c4b] -- Updated dependencies [fe4f109] -- Updated dependencies [b1da952] -- Updated dependencies [04d3747] -- Updated dependencies [dce4158] -- Updated dependencies [7e89ccb] -- Updated dependencies [8426f55] -- Updated dependencies [db46ce5] -- Updated dependencies [b053413] - - @ai-sdk/ui-utils@1.0.0 - - @ai-sdk/provider-utils@2.0.0 - -## 1.0.0-canary.9 - -### Patch Changes - -- 79c6dd9: fix (ui): remove unnecessary calls to mutateStreamData in useChat -- Updated dependencies [04d3747] - - @ai-sdk/ui-utils@1.0.0-canary.9 - -## 1.0.0-canary.8 - -### Patch Changes - -- Updated dependencies [b053413] - - @ai-sdk/ui-utils@1.0.0-canary.8 - -## 1.0.0-canary.7 - -### Major Changes - -- fe4f109: chore (ui): set default value of useChat keepLastMessageOnError to true - -### Patch Changes - -- Updated dependencies [fe4f109] - - @ai-sdk/ui-utils@1.0.0-canary.7 - -## 1.0.0-canary.6 - -### Patch Changes - -- Updated dependencies [70f28f6] - - @ai-sdk/ui-utils@1.0.0-canary.6 - -## 1.0.0-canary.5 - -### Patch Changes - -- Updated dependencies [9f81e66] -- Updated dependencies [8426f55] - - @ai-sdk/ui-utils@1.0.0-canary.5 - - @ai-sdk/provider-utils@2.0.0-canary.3 - -## 1.0.0-canary.4 - -### Patch Changes - -- Updated dependencies [dce4158] -- Updated dependencies [dce4158] - - @ai-sdk/provider-utils@2.0.0-canary.2 - - @ai-sdk/ui-utils@1.0.0-canary.4 - -## 1.0.0-canary.3 - -### Patch Changes - -- Updated dependencies [b1da952] - - @ai-sdk/provider-utils@2.0.0-canary.1 - - @ai-sdk/ui-utils@1.0.0-canary.3 - -## 1.0.0-canary.2 - -### Major Changes - -- e117b54: chore (ui): remove deprecated useChat roundtrip options -- 7814c4b: chore (ui): remove streamMode setting from useChat & useCompletion - -### Patch Changes - -- Updated dependencies [b469a7e] -- Updated dependencies [7814c4b] -- Updated dependencies [db46ce5] - - @ai-sdk/provider-utils@2.0.0-canary.0 - - @ai-sdk/ui-utils@1.0.0-canary.2 - -## 1.0.0-canary.1 - -### Major Changes - -- 8bf5756: chore: remove legacy function/tool calling - -### Patch Changes - -- Updated dependencies [8bf5756] - - @ai-sdk/ui-utils@1.0.0-canary.1 - -## 1.0.0-canary.0 - -### Major Changes - -- 84edae5: chore (release): bump ui package versions for 4.0 release - -### Patch Changes - -- Updated dependencies [7e89ccb] - - @ai-sdk/ui-utils@1.0.0-canary.0 - -## 0.0.54 - -### Patch Changes - -- Updated dependencies [a85c965] - - @ai-sdk/ui-utils@0.0.50 - -## 0.0.53 - -### Patch Changes - -- Updated dependencies [3bf8da0] - - @ai-sdk/ui-utils@0.0.49 - -## 0.0.52 - -### Patch Changes - -- Updated dependencies [aa98cdb] -- Updated dependencies [7b937c5] -- Updated dependencies [811a317] - - @ai-sdk/provider-utils@1.0.22 - - @ai-sdk/ui-utils@0.0.48 - -## 0.0.51 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.21 -- @ai-sdk/ui-utils@0.0.47 - -## 0.0.50 - -### Patch Changes - -- caedcda: feat (ai/ui): add setData helper to useChat - -## 0.0.49 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.20 -- @ai-sdk/ui-utils@0.0.46 - -## 0.0.48 - -### Patch Changes - -- Updated dependencies [cd77c5d] - - @ai-sdk/ui-utils@0.0.45 - -## 0.0.47 - -### Patch Changes - -- Updated dependencies [273f696] - - @ai-sdk/provider-utils@1.0.19 - - @ai-sdk/ui-utils@0.0.44 - -## 0.0.46 - -### Patch Changes - -- Updated dependencies [1f590ef] - - @ai-sdk/ui-utils@0.0.43 - -## 0.0.45 - -### Patch Changes - -- Updated dependencies [14210d5] - - @ai-sdk/ui-utils@0.0.42 - -## 0.0.44 - -### Patch Changes - -- Updated dependencies [03313cd] - - @ai-sdk/provider-utils@1.0.18 - - @ai-sdk/ui-utils@0.0.41 - -## 0.0.43 - -### Patch Changes - -- Updated dependencies [aa2dc58] - - @ai-sdk/ui-utils@0.0.40 - -## 0.0.42 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.17 -- @ai-sdk/ui-utils@0.0.39 - -## 0.0.41 - -### Patch Changes - -- Updated dependencies [d151349] - - @ai-sdk/ui-utils@0.0.38 - -## 0.0.40 - -### Patch Changes - -- Updated dependencies [09f895f] - - @ai-sdk/provider-utils@1.0.16 - - @ai-sdk/ui-utils@0.0.37 - -## 0.0.39 - -### Patch Changes - -- Updated dependencies [b5a82b7] - - @ai-sdk/ui-utils@0.0.36 - -## 0.0.38 - -### Patch Changes - -- Updated dependencies [d67fa9c] - - @ai-sdk/provider-utils@1.0.15 - - @ai-sdk/ui-utils@0.0.35 - -## 0.0.37 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.14 -- @ai-sdk/ui-utils@0.0.34 - -## 0.0.36 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.13 -- @ai-sdk/ui-utils@0.0.33 - -## 0.0.35 - -### Patch Changes - -- Updated dependencies [dd712ac] - - @ai-sdk/provider-utils@1.0.12 - - @ai-sdk/ui-utils@0.0.32 - -## 0.0.34 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.11 -- @ai-sdk/ui-utils@0.0.31 - -## 0.0.33 - -### Patch Changes - -- Updated dependencies [e9c891d] -- Updated dependencies [4bd27a9] -- Updated dependencies [845754b] - - @ai-sdk/ui-utils@0.0.30 - - @ai-sdk/provider-utils@1.0.10 - -## 0.0.32 - -### Patch Changes - -- Updated dependencies [e5b58f3] - - @ai-sdk/ui-utils@0.0.29 - -## 0.0.31 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.9 -- @ai-sdk/ui-utils@0.0.28 - -## 0.0.30 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.8 -- @ai-sdk/ui-utils@0.0.27 - -## 0.0.29 - -### Patch Changes - -- @ai-sdk/provider-utils@1.0.7 -- @ai-sdk/ui-utils@0.0.26 - -## 0.0.28 - -### Patch Changes - -- Updated dependencies [9614584] -- Updated dependencies [0762a22] - - @ai-sdk/provider-utils@1.0.6 - - @ai-sdk/ui-utils@0.0.25 - -## 0.0.27 - -### Patch Changes - -- Updated dependencies [5be25124] - - @ai-sdk/ui-utils@0.0.24 - -## 0.0.26 - -### Patch Changes - -- Updated dependencies [fea7b604] - - @ai-sdk/ui-utils@0.0.23 - -## 0.0.25 - -### Patch Changes - -- Updated dependencies [1d93d716] - - @ai-sdk/ui-utils@0.0.22 - -## 0.0.24 - -### Patch Changes - -- c450fcf7: feat (ui): invoke useChat onFinish with finishReason and tokens -- e4a1719f: chore (ai/ui): rename streamMode to streamProtocol -- Updated dependencies [c450fcf7] -- Updated dependencies [e4a1719f] - - @ai-sdk/ui-utils@0.0.21 - -## 0.0.23 - -### Patch Changes - -- b2bee4c5: fix (ai/ui): send data, body, headers in useChat().reload - -## 0.0.22 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.20 - -## 0.0.21 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.19 - -## 0.0.20 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.18 - -## 0.0.19 - -### Patch Changes - -- f63829fe: feat (ai/ui): add allowEmptySubmit flag to handleSubmit -- 4b2c09d9: feat (ai/ui): add mutator function support to useChat / setMessages -- Updated dependencies [f63829fe] - - @ai-sdk/ui-utils@0.0.17 - -## 0.0.18 - -### Patch Changes - -- Updated dependencies [5b7b3bbe] - - @ai-sdk/ui-utils@0.0.16 - -## 0.0.17 - -### Patch Changes - -- Updated dependencies [1f67fe49] - - @ai-sdk/ui-utils@0.0.15 - -## 0.0.16 - -### Patch Changes - -- Updated dependencies [99ddbb74] - - @ai-sdk/ui-utils@0.0.14 - -## 0.0.15 - -### Patch Changes - -- a6cb2c8b: feat (ai/ui): add keepLastMessageOnError option to useChat -- Updated dependencies [a6cb2c8b] - - @ai-sdk/ui-utils@0.0.13 - -## 0.0.14 - -### Patch Changes - -- 56bbc2a7: feat (ai/ui): set body and headers directly on options for handleSubmit and append -- Updated dependencies [56bbc2a7] - - @ai-sdk/ui-utils@0.0.12 - -## 0.0.13 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.11 - -## 0.0.12 - -### Patch Changes - -- 3db90c3d: allow empty handleSubmit submissions for useChat - - @ai-sdk/ui-utils@0.0.10 - -## 0.0.11 - -### Patch Changes - -- Updated dependencies [1894f811] - - @ai-sdk/ui-utils@0.0.9 - -## 0.0.10 - -### Patch Changes - -- d3100b9c: feat (ai/ui): support custom fetch function in useChat, useCompletion, useAssistant, useObject -- Updated dependencies [d3100b9c] - - @ai-sdk/ui-utils@0.0.8 - -## 0.0.9 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.7 - -## 0.0.8 - -### Patch Changes - -- c908f741: chore (ui/solid): update solidjs useChat and useCompletion to feature parity with React - -## 0.0.7 - -### Patch Changes - -- Updated dependencies [54bf4083] - - @ai-sdk/ui-utils@0.0.6 - -## 0.0.6 - -### Patch Changes - -- d42b8907: feat (ui): make event in handleSubmit optional - -## 0.0.5 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.5 - -## 0.0.4 - -### Patch Changes - -- Updated dependencies [008725ec] - - @ai-sdk/ui-utils@0.0.4 - -## 0.0.3 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.3 - -## 0.0.2 - -### Patch Changes - -- @ai-sdk/ui-utils@0.0.2 - -## 0.0.1 - -### Patch Changes - -- 85f209a4: chore: extracted ui library support into separate modules -- Updated dependencies [85f209a4] - - @ai-sdk/ui-utils@0.0.1 diff --git a/packages/solid/README.md b/packages/solid/README.md deleted file mode 100644 index 66daa903ae3f..000000000000 --- a/packages/solid/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# AI SDK: Solid.js provider - -> **Warning** `@ai-sdk/solid` has been deprecated and will be removed in AI SDK 5 - -[Solid.js](https://www.solidjs.com/) UI components for the [AI SDK](https://sdk.vercel.ai/docs): - -- [`useChat`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-chat) hook -- [`useCompletion`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-completion) hook -- [`useObject`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-object) hook diff --git a/packages/solid/package.json b/packages/solid/package.json deleted file mode 100644 index 166f376f324c..000000000000 --- a/packages/solid/package.json +++ /dev/null @@ -1,79 +0,0 @@ -{ - "name": "@ai-sdk/solid", - "version": "1.2.6", - "license": "Apache-2.0", - "sideEffects": false, - "main": "./dist/index.js", - "module": "./dist/index.mjs", - "types": "./dist/index.d.ts", - "scripts": { - "build": "tsup", - "build:watch": "tsup --watch", - "clean": "rm -rf dist", - "lint": "eslint \"./**/*.ts*\"", - "type-check": "tsc --noEmit", - "prettier-check": "prettier --check \"./**/*.ts*\"", - "test": "vitest --config vitest.config.js --run", - "test:watch": "vitest --config vitest.config.js" - }, - "exports": { - "./package.json": "./package.json", - ".": { - "types": "./dist/index.d.ts", - "import": "./dist/index.mjs", - "require": "./dist/index.js" - } - }, - "files": [ - "dist/**/*", - "CHANGELOG.md" - ], - "dependencies": { - "@ai-sdk/provider-utils": "2.2.3", - "@ai-sdk/ui-utils": "1.2.4", - "@solid-primitives/trigger": "^1.1.0", - "zod": "^3.23.8" - }, - "devDependencies": { - "@solidjs/testing-library": "0.8.10", - "@testing-library/jest-dom": "^6.6.3", - "@testing-library/user-event": "^14.5.2", - "@types/node": "20.17.24", - "@vercel/ai-tsconfig": "workspace:*", - "@vitejs/plugin-vue": "5.2.0", - "eslint": "8.57.1", - "eslint-config-vercel-ai": "workspace:*", - "jsdom": "^24.0.0", - "msw": "2.6.4", - "tsup": "^7.2.0", - "typescript": "5.6.3", - "vite-plugin-solid": "2.7.2", - "vitest": "2.1.4" - }, - "peerDependencies": { - "solid-js": "^1.7.7" - }, - "peerDependenciesMeta": { - "solid-js": { - "optional": true - } - }, - "engines": { - "node": ">=18" - }, - "publishConfig": { - "access": "public" - }, - "homepage": "https://sdk.vercel.ai/docs", - "repository": { - "type": "git", - "url": "git+https://github.com/vercel/ai.git" - }, - "bugs": { - "url": "https://github.com/vercel/ai/issues" - }, - "keywords": [ - "ai", - "solid" - ] -} diff --git a/packages/solid/src/index.ts b/packages/solid/src/index.ts deleted file mode 100644 index f727f17d3ac3..000000000000 --- a/packages/solid/src/index.ts +++ /dev/null @@ -1,4 +0,0 @@ -export * from './use-chat'; -export * from './use-completion'; -export * from './use-object'; -export * from './use-assistant'; diff --git a/packages/solid/src/package.json b/packages/solid/src/package.json deleted file mode 100644 index 747dbe273fb6..000000000000 --- a/packages/solid/src/package.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "main": "./dist/index.js", - "module": "./dist/index.mjs", - "types": "./dist/index.d.ts", - "exports": "./dist/index.mjs", - "private": true, - "peerDependencies": { - "solid-js": "*" - } -} diff --git a/packages/solid/src/use-assistant.ts b/packages/solid/src/use-assistant.ts deleted file mode 100644 index 5eeb81c1a7ed..000000000000 --- a/packages/solid/src/use-assistant.ts +++ /dev/null @@ -1,288 +0,0 @@ -import { isAbortError } from '@ai-sdk/provider-utils'; -import { - AssistantStatus, - CreateMessage, - generateId, - Message, - processAssistantStream, - UseAssistantOptions, -} from '@ai-sdk/ui-utils'; -import { Accessor, createMemo, createSignal, JSX, Setter } from 'solid-js'; -import { createStore, SetStoreFunction, Store } from 'solid-js/store'; -import { convertToAccessorOptions } from './utils/convert-to-accessor-options'; - -// use function to allow for mocking in tests: -const getOriginalFetch = () => fetch; - -export type UseAssistantHelpers = { - /** - * The current array of chat messages. - */ - messages: Store; - - /** - * Update the message store with a new array of messages. - */ - setMessages: SetStoreFunction; - - /** - * The current thread ID. - */ - threadId: Accessor; - - /** - * Set the current thread ID. Specifying a thread ID will switch to that thread, if it exists. If set to 'undefined', a new thread will be created. For both cases, `threadId` will be updated with the new value and `messages` will be cleared. - */ - setThreadId: (threadId: string | undefined) => void; - - /** - * The current value of the input field. - */ - input: Accessor; - - /** - * Append a user message to the chat list. This triggers the API call to fetch - * the assistant's response. - * @param message The message to append - * @param requestOptions Additional options to pass to the API call - */ - append: ( - message: Message | CreateMessage, - requestOptions?: { - data?: Record; - }, - ) => Promise; - - /** -Abort the current request immediately, keep the generated tokens if any. - */ - stop: () => void; - - /** - * setState-powered method to update the input value. - */ - setInput: Setter; - - /** - * Handler for the `onChange` event of the input field to control the input's value. - */ - handleInputChange: JSX.ChangeEventHandlerUnion< - HTMLInputElement | HTMLTextAreaElement, - Event - >; - - /** - * Form submission handler that automatically resets the input field and appends a user message. - */ - submitMessage: ( - event?: SubmitEvent, - requestOptions?: { - data?: Record; - }, - ) => Promise; - - /** - * The current status of the assistant. This can be used to show a loading indicator. - */ - status: Accessor; - - /** - * The error thrown during the assistant message processing, if any. - */ - error: Accessor; -}; - -/** - * @deprecated `@ai-sdk/solid` has been deprecated and will be removed in AI SDK 5. - */ -export function useAssistant( - rawUseAssistantOptions: UseAssistantOptions | Accessor, -): UseAssistantHelpers { - const useAssistantOptions = createMemo(() => - convertToAccessorOptions(rawUseAssistantOptions), - ); - - const [messages, setMessages] = createStore([]); - const [input, setInput] = createSignal(''); - const [currentThreadId, setCurrentThreadId] = createSignal(); - const [status, setStatus] = createSignal('awaiting_message'); - const [error, setError] = createSignal(); - - const handleInputChange: JSX.ChangeEventHandlerUnion< - HTMLInputElement | HTMLTextAreaElement, - Event - > = event => { - setInput(event.target.value); - }; - - // Abort controller to cancel the current API call. - let abortControllerRef: AbortController | null = null; - - const stop = () => { - if (abortControllerRef) { - abortControllerRef?.abort(); - abortControllerRef = null; - } - }; - - const append = async ( - message: Message | CreateMessage, - requestOptions?: { - data?: Record; - }, - ) => { - setStatus('in_progress'); - - setMessages(messages => [ - ...messages, - { - ...message, - id: message.id ?? generateId(), - }, - ]); - - setInput(''); - - const abortController = new AbortController(); - - try { - abortControllerRef = abortController; - - const actualFetch = fetch ?? getOriginalFetch(); - const response = await actualFetch(useAssistantOptions().api(), { - method: 'POST', - credentials: useAssistantOptions().credentials?.(), - signal: abortController.signal, - headers: { - 'Content-Type': 'application/json', - ...useAssistantOptions().headers?.(), - }, - body: JSON.stringify({ - ...useAssistantOptions().body?.(), - // always use user-provided threadId when available: - threadId: useAssistantOptions().threadId?.() ?? currentThreadId(), - message: message.content, - - // optional request data: - data: requestOptions?.data, - }), - }); - - if (!response.ok) { - throw new Error( - (await response.text()) ?? 'Failed to fetch the assistant response.', - ); - } - - if (response.body == null) { - throw new Error('The response body is empty.'); - } - - await processAssistantStream({ - stream: response.body, - onAssistantMessagePart(value) { - setMessages(messages => [ - ...messages, - { - id: value.id, - role: value.role, - content: value.content[0].text.value, - parts: [], - }, - ]); - }, - onTextPart(value) { - // text delta - add to last message: - setMessages(messages => { - const lastMessage = messages[messages.length - 1]; - return [ - ...messages.slice(0, messages.length - 1), - { - id: lastMessage.id, - role: lastMessage.role, - content: lastMessage.content + value, - parts: lastMessage.parts, - }, - ]; - }); - }, - onAssistantControlDataPart(value) { - setCurrentThreadId(value.threadId); - - // set id of last message: - setMessages(messages => { - const lastMessage = messages[messages.length - 1]; - lastMessage.id = value.messageId; - return [...messages.slice(0, messages.length - 1), lastMessage]; - }); - }, - onDataMessagePart(value) { - setMessages(messages => [ - ...messages, - { - id: value.id ?? generateId(), - role: 'data', - content: '', - data: value.data, - parts: [], - }, - ]); - }, - onErrorPart(value) { - setError(new Error(value)); - }, - }); - } catch (error) { - // Ignore abort errors as they are expected when the user cancels the request: - if (isAbortError(error) && abortController.signal.aborted) { - abortControllerRef = null; - return; - } - - const onError = useAssistantOptions().onError?.(); - if (onError && error instanceof Error) { - onError(error); - } - - setError(error as Error); - } finally { - abortControllerRef = null; - setStatus('awaiting_message'); - } - }; - - const submitMessage = async ( - event?: SubmitEvent, - requestOptions?: { - data?: Record; - }, - ) => { - event?.preventDefault?.(); - - if (input() === '') { - return; - } - - append({ role: 'user', content: input(), parts: [] }, requestOptions); - }; - - const setThreadId = (threadId: string | undefined) => { - setCurrentThreadId(threadId); - setMessages([]); - }; - - return { - append, - messages, - setMessages, - threadId: currentThreadId, - setThreadId, - input, - setInput, - handleInputChange, - submitMessage, - status, - error, - stop, - }; -} diff --git a/packages/solid/src/use-assistant.ui.test.tsx b/packages/solid/src/use-assistant.ui.test.tsx deleted file mode 100644 index e5dfd5ba5b79..000000000000 --- a/packages/solid/src/use-assistant.ui.test.tsx +++ /dev/null @@ -1,330 +0,0 @@ -import { formatAssistantStreamPart } from '@ai-sdk/ui-utils'; -import { - mockFetchDataStream, - mockFetchDataStreamWithGenerator, - mockFetchError, -} from '@ai-sdk/ui-utils/test'; -import '@testing-library/jest-dom/vitest'; -import { cleanup, findByText, render, screen } from '@solidjs/testing-library'; -import userEvent from '@testing-library/user-event'; -import { useAssistant } from './use-assistant'; -import { For, Show } from 'solid-js'; - -describe('stream data stream', () => { - const TestComponent = () => { - const { status, messages, error, append } = useAssistant({ - api: '/api/assistant', - }); - - return ( -
-
{status()}
- - {error =>
{error().toString()}
} -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
-
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('should show streamed response', async () => { - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual(JSON.stringify({ message: 'hi' })); - }); - - it('should show error response', async () => { - mockFetchError({ statusCode: 500, errorMessage: 'Internal Error' }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toHaveTextContent( - 'Error: Internal Error', - ); - }); - - describe('loading state', () => { - it('should show loading state', async () => { - let finishGeneration: ((value?: unknown) => void) | undefined; - const finishGenerationPromise = new Promise(resolve => { - finishGeneration = resolve; - }); - - mockFetchDataStreamWithGenerator({ - url: 'https://example.com/api/chat', - chunkGenerator: (async function* generate() { - const encoder = new TextEncoder(); - - yield encoder.encode( - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm1', - }), - ); - - yield encoder.encode( - formatAssistantStreamPart('assistant_message', { - id: 'm1', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - ); - - yield encoder.encode('0:"Hello"\n'); - - await finishGenerationPromise; - })(), - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('status'); - expect(screen.getByTestId('status')).toHaveTextContent('in_progress'); - - finishGeneration?.(); - - await findByText(await screen.findByTestId('status'), 'awaiting_message'); - expect(screen.getByTestId('status')).toHaveTextContent( - 'awaiting_message', - ); - }); - }); -}); - -describe('thread management', () => { - const TestComponent = () => { - const { status, messages, error, append, setThreadId, threadId } = - useAssistant({ - api: '/api/assistant', - }); - - return ( -
-
{status()}
-
{threadId() || 'undefined'}
- - {error =>
{error().toString()}
} -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('create new thread', async () => { - await screen.findByTestId('thread-id'); - expect(screen.getByTestId('thread-id')).toHaveTextContent('undefined'); - }); - - it('should show streamed response', async () => { - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t0'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual(JSON.stringify({ message: 'hi' })); - }); - - it('should switch to new thread on setting undefined threadId', async () => { - await userEvent.click(screen.getByTestId('do-new-thread')); - - expect(screen.queryByTestId('message-0')).toBeNull(); - expect(screen.queryByTestId('message-1')).toBeNull(); - - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't1', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t1'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual(JSON.stringify({ message: 'hi' })); - }); - - it('should switch to thread on setting previously created threadId', async () => { - await userEvent.click(screen.getByTestId('do-thread-3')); - - expect(screen.queryByTestId('message-0')).toBeNull(); - expect(screen.queryByTestId('message-1')).toBeNull(); - - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't3', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t3'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual( - JSON.stringify({ - threadId: 't3', - message: 'hi', - }), - ); - }); -}); diff --git a/packages/solid/src/use-chat.ts b/packages/solid/src/use-chat.ts deleted file mode 100644 index 09cb7e469861..000000000000 --- a/packages/solid/src/use-chat.ts +++ /dev/null @@ -1,554 +0,0 @@ -import { FetchFunction } from '@ai-sdk/provider-utils'; -import type { - ChatRequest, - ChatRequestOptions, - CreateMessage, - JSONValue, - Message, - UseChatOptions as SharedUseChatOptions, - UIMessage, -} from '@ai-sdk/ui-utils'; -import { - callChatApi, - extractMaxToolInvocationStep, - fillMessageParts, - generateId as generateIdFunc, - getMessageParts, - isAssistantMessageWithCompletedToolCalls, - prepareAttachmentsForRequest, - shouldResubmitMessages, - updateToolCallResult, -} from '@ai-sdk/ui-utils'; -import { - Accessor, - createEffect, - createMemo, - createSignal, - JSX, - Setter, -} from 'solid-js'; -import { createStore, reconcile, Store } from 'solid-js/store'; -import { convertToAccessorOptions } from './utils/convert-to-accessor-options'; -import { ReactiveLRU } from './utils/reactive-lru'; - -export type { CreateMessage, Message }; - -export type UseChatHelpers = { - /** - * Current messages in the chat as a SolidJS store. - */ - messages: () => Store; - - /** The error object of the API request */ - error: Accessor; - /** - * Append a user message to the chat list. This triggers the API call to fetch - * the assistant's response. - * @param message The message to append - * @param options Additional options to pass to the API call - */ - append: ( - message: Message | CreateMessage, - chatRequestOptions?: ChatRequestOptions, - ) => Promise; - /** - * Reload the last AI chat response for the given chat history. If the last - * message isn't from the assistant, it will request the API to generate a - * new response. - */ - reload: ( - chatRequestOptions?: ChatRequestOptions, - ) => Promise; - /** - * Abort the current request immediately, keep the generated tokens if any. - */ - stop: () => void; - /** - * Update the `messages` state locally. This is useful when you want to - * edit the messages on the client, and then trigger the `reload` method - * manually to regenerate the AI response. - */ - setMessages: ( - messages: Message[] | ((messages: Message[]) => Message[]), - ) => void; - /** The current value of the input */ - input: Accessor; - /** Signal setter to update the input value */ - setInput: Setter; - /** An input/textarea-ready onChange handler to control the value of the input */ - handleInputChange: JSX.ChangeEventHandlerUnion< - HTMLInputElement | HTMLTextAreaElement, - Event - >; - /** Form submission handler to automatically reset input and append a user message */ - handleSubmit: ( - event?: { preventDefault?: () => void }, - chatRequestOptions?: ChatRequestOptions, - ) => void; - - /** - * Whether the API request is in progress - * - * @deprecated use `status` instead - */ - isLoading: Accessor; - - /** - * Hook status: - * - * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream. - * - `streaming`: The response is actively streaming in from the API, receiving chunks of data. - * - `ready`: The full response has been received and processed; a new user message can be submitted. - * - `error`: An error occurred during the API request, preventing successful completion. - */ - status: Accessor<'submitted' | 'streaming' | 'ready' | 'error'>; - - /** Additional data added on the server via StreamData */ - data: Accessor; - /** Set the data of the chat. You can use this to transform or clear the chat data. */ - setData: ( - data: - | JSONValue[] - | undefined - | ((data: JSONValue[] | undefined) => JSONValue[] | undefined), - ) => void; - - /** -Custom fetch implementation. You can use it as a middleware to intercept requests, -or to provide a custom fetch implementation for e.g. testing. - */ - fetch?: FetchFunction; - - addToolResult: ({ - toolCallId, - result, - }: { - toolCallId: string; - result: any; - }) => void; - - /** The id of the chat */ - id: string; -}; - -const chatCache = new ReactiveLRU(); - -export type UseChatOptions = SharedUseChatOptions & { - /** -Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1. - -A maximum number is required to prevent infinite loops in the case of misconfigured tools. - -By default, it's set to 1, which means that only a single LLM call is made. -*/ - maxSteps?: number; - - /** - * Experimental (SolidJS only). When a function is provided, it will be used - * to prepare the request body for the chat API. This can be useful for - * customizing the request body based on the messages and data in the chat. - * - * @param id The chat ID - * @param messages The current messages in the chat - * @param requestData The data object passed in the chat request - * @param requestBody The request body object passed in the chat request - */ - experimental_prepareRequestBody?: (options: { - id: string; - messages: UIMessage[]; - requestData?: JSONValue; - requestBody?: object; - }) => unknown; -}; - -/** - * @deprecated `@ai-sdk/solid` has been deprecated and will be removed in AI SDK 5. - */ -export function useChat( - rawUseChatOptions: UseChatOptions | Accessor = {}, -): UseChatHelpers { - const resolvedOptions = createMemo(() => - convertToAccessorOptions(rawUseChatOptions), - ); - const prepareFn = createMemo(() => { - const opts = resolvedOptions(); - return opts.experimental_prepareRequestBody?.(); - }); - const useChatOptions = createMemo(() => ({ - ...resolvedOptions(), - experimental_prepareRequestBody: prepareFn, - })); - - const api = createMemo(() => useChatOptions().api?.() ?? '/api/chat'); - const generateId = createMemo( - () => useChatOptions().generateId?.() ?? generateIdFunc, - ); - const chatId = createMemo(() => useChatOptions().id?.() ?? generateId()()); - const chatKey = createMemo(() => `${api()}|${chatId()}|messages`); - - const _messages = createMemo( - () => - chatCache.get(chatKey()) ?? useChatOptions().initialMessages?.() ?? [], - ); - - const [messagesStore, setMessagesStore] = createStore( - fillMessageParts(_messages()), - ); - createEffect(() => { - setMessagesStore(reconcile(fillMessageParts(_messages()), { merge: true })); - }); - - const mutate = (messages: UIMessage[]) => { - chatCache.set(chatKey(), messages); - }; - - const [error, setError] = createSignal(undefined); - const [streamData, setStreamData] = createSignal( - undefined, - ); - const [status, setStatus] = createSignal< - 'submitted' | 'streaming' | 'ready' | 'error' - >('ready'); - - let messagesRef: UIMessage[] = fillMessageParts(_messages()) || []; - createEffect(() => { - messagesRef = fillMessageParts(_messages()) || []; - }); - - let abortController: AbortController | null = null; - - let extraMetadata = { - credentials: useChatOptions().credentials?.(), - headers: useChatOptions().headers?.(), - body: useChatOptions().body?.(), - }; - createEffect(() => { - extraMetadata = { - credentials: useChatOptions().credentials?.(), - headers: useChatOptions().headers?.(), - body: useChatOptions().body?.(), - }; - }); - - const triggerRequest = async (chatRequest: ChatRequest) => { - setError(undefined); - setStatus('submitted'); - - const messageCount = messagesRef.length; - const maxStep = extractMaxToolInvocationStep( - chatRequest.messages[chatRequest.messages.length - 1]?.toolInvocations, - ); - - try { - abortController = new AbortController(); - - const streamProtocol = useChatOptions().streamProtocol?.() ?? 'data'; - - const onFinish = useChatOptions().onFinish?.(); - const onResponse = useChatOptions().onResponse?.(); - const onToolCall = useChatOptions().onToolCall?.(); - - const sendExtraMessageFields = - useChatOptions().sendExtraMessageFields?.(); - - const keepLastMessageOnError = - useChatOptions().keepLastMessageOnError?.() ?? true; - - const experimental_prepareRequestBody = - useChatOptions().experimental_prepareRequestBody?.(); - - // Do an optimistic update to the chat state to show the updated messages - // immediately. - const previousMessages = messagesRef; - const chatMessages = fillMessageParts(chatRequest.messages); - - mutate(chatMessages); - - const existingStreamData = streamData() ?? []; - - const constructedMessagesPayload = sendExtraMessageFields - ? chatMessages - : chatMessages.map( - ({ - role, - content, - experimental_attachments, - data, - annotations, - toolInvocations, - parts, - }) => ({ - role, - content, - ...(experimental_attachments !== undefined && { - experimental_attachments, - }), - ...(data !== undefined && { data }), - ...(annotations !== undefined && { annotations }), - ...(toolInvocations !== undefined && { toolInvocations }), - ...(parts !== undefined && { parts }), - }), - ); - - await callChatApi({ - api: api(), - body: experimental_prepareRequestBody?.({ - id: chatId(), - messages: chatMessages, - requestData: chatRequest.data, - requestBody: chatRequest.body, - }) ?? { - id: chatId(), - messages: constructedMessagesPayload, - data: chatRequest.data, - ...extraMetadata.body, - ...chatRequest.body, - }, - streamProtocol, - credentials: extraMetadata.credentials, - headers: { - ...extraMetadata.headers, - ...chatRequest.headers, - }, - abortController: () => abortController, - restoreMessagesOnFailure() { - if (!keepLastMessageOnError) { - mutate(previousMessages); - } - }, - onResponse, - onUpdate({ message, data, replaceLastMessage }) { - setStatus('streaming'); - - mutate([ - ...(replaceLastMessage - ? chatMessages.slice(0, chatMessages.length - 1) - : chatMessages), - message, - ]); - - if (data?.length) { - setStreamData([...existingStreamData, ...data]); - } - }, - onToolCall, - onFinish, - generateId: generateId(), - fetch: useChatOptions().fetch?.(), - lastMessage: chatMessages[chatMessages.length - 1], - }); - - abortController = null; - setStatus('ready'); - } catch (err) { - // Ignore abort errors as they are expected. - if ((err as any).name === 'AbortError') { - abortController = null; - setStatus('ready'); - return null; - } - - const onError = useChatOptions().onError?.(); - if (onError && err instanceof Error) { - onError(err); - } - - setError(err as Error); - setStatus('error'); - } - - const maxSteps = useChatOptions().maxSteps?.() ?? 1; - - // auto-submit when all tool calls in the last assistant message have results: - const messages = messagesRef; - if ( - shouldResubmitMessages({ - originalMaxToolInvocationStep: maxStep, - originalMessageCount: messageCount, - maxSteps, - messages, - }) - ) { - await triggerRequest({ messages }); - } - }; - - const append: UseChatHelpers['append'] = async ( - message, - { data, headers, body, experimental_attachments } = {}, - ) => { - const attachmentsForRequest = await prepareAttachmentsForRequest( - experimental_attachments, - ); - - const messages = messagesRef.concat({ - ...message, - id: message.id ?? generateId()(), - createdAt: message.createdAt ?? new Date(), - experimental_attachments: - attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined, - parts: getMessageParts(message), - }); - - return triggerRequest({ - messages, - headers, - body, - data, - }); - }; - - const reload: UseChatHelpers['reload'] = async ({ - data, - headers, - body, - } = {}) => { - if (messagesRef.length === 0) { - return null; - } - - // Remove last assistant message and retry last user message. - const lastMessage = messagesRef[messagesRef.length - 1]; - return triggerRequest({ - messages: - lastMessage.role === 'assistant' - ? messagesRef.slice(0, -1) - : messagesRef, - headers, - body, - data, - }); - }; - - const stop = () => { - if (abortController) { - abortController.abort(); - abortController = null; - } - }; - - const setMessages = ( - messagesArg: Message[] | ((messages: Message[]) => Message[]), - ) => { - if (typeof messagesArg === 'function') { - messagesArg = messagesArg(messagesRef); - } - - const messagesWithParts = fillMessageParts(messagesArg); - mutate(messagesWithParts); - messagesRef = messagesWithParts; - }; - - const setData = ( - dataArg: - | JSONValue[] - | undefined - | ((data: JSONValue[] | undefined) => JSONValue[] | undefined), - ) => { - if (typeof dataArg === 'function') { - dataArg = dataArg(streamData()); - } - - setStreamData(dataArg); - }; - - const [input, setInput] = createSignal( - useChatOptions().initialInput?.() || '', - ); - - const handleSubmit: UseChatHelpers['handleSubmit'] = async ( - event, - options = {}, - metadata?: Object, - ) => { - event?.preventDefault?.(); - const inputValue = input(); - - if (!inputValue && !options.allowEmptySubmit) return; - - const attachmentsForRequest = await prepareAttachmentsForRequest( - options.experimental_attachments, - ); - - if (metadata) { - extraMetadata = { - ...extraMetadata, - ...metadata, - }; - } - - triggerRequest({ - messages: messagesRef.concat({ - id: generateId()(), - role: 'user', - content: inputValue, - createdAt: new Date(), - experimental_attachments: - attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined, - parts: [{ type: 'text', text: inputValue }], - }), - headers: options.headers, - body: options.body, - data: options.data, - }); - - setInput(''); - }; - - const handleInputChange: UseChatHelpers['handleInputChange'] = e => { - setInput(e.target.value); - }; - - const addToolResult = ({ - toolCallId, - result, - }: { - toolCallId: string; - result: any; - }) => { - const currentMessages = messagesRef ?? []; - - updateToolCallResult({ - messages: currentMessages, - toolCallId, - toolResult: result, - }); - - mutate(currentMessages); - - // when the request is ongoing, the auto-submit will be triggered after the request is finished - if (status() === 'submitted' || status() === 'streaming') { - return; - } - - // auto-submit when all tool calls in the last assistant message have results: - const lastMessage = currentMessages[currentMessages.length - 1]; - if (isAssistantMessageWithCompletedToolCalls(lastMessage)) { - triggerRequest({ messages: currentMessages }); - } - }; - - const isLoading = createMemo( - () => status() === 'submitted' || status() === 'streaming', - ); - - return { - // TODO next major release: replace with direct message store access (breaking change) - messages: () => messagesStore, - id: chatId(), - append, - error, - reload, - stop, - setMessages, - input, - setInput, - handleInputChange, - handleSubmit, - isLoading, - status, - data: streamData, - setData, - addToolResult, - }; -} diff --git a/packages/solid/src/use-chat.ui.test.tsx b/packages/solid/src/use-chat.ui.test.tsx deleted file mode 100644 index c17e48a7f2fa..000000000000 --- a/packages/solid/src/use-chat.ui.test.tsx +++ /dev/null @@ -1,1329 +0,0 @@ -/** @jsxImportSource solid-js */ -import { withTestServer } from '@ai-sdk/provider-utils/test'; -import { formatDataStreamPart, Message } from '@ai-sdk/ui-utils'; -import { mockFetchDataStream } from '@ai-sdk/ui-utils/test'; -import { - cleanup, - findByText, - render, - screen, - fireEvent, - waitFor, -} from '@solidjs/testing-library'; -import '@testing-library/jest-dom'; -import userEvent from '@testing-library/user-event'; -import { createSignal, For } from 'solid-js'; -import { useChat } from './use-chat'; - -describe('prepareRequestBody', () => { - let bodyOptions: any; - - const TestComponent = () => { - const { messages, append, status } = useChat({ - experimental_prepareRequestBody: options => { - bodyOptions = options; - return 'test-request-body'; - }, - }); - - return ( -
-
{status()}
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
- ); - }; - - beforeEach(async () => { - await render(() => ); - }); - - afterEach(() => { - bodyOptions = undefined; - vi.restoreAllMocks(); - }); - - it('should use prepared request body', () => - withTestServer( - { - url: '/api/chat', - type: 'stream-values', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - async ({ call }) => { - fireEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(bodyOptions).toStrictEqual({ - id: expect.any(String), - messages: [ - { - role: 'user', - content: 'hi', - id: expect.any(String), - experimental_attachments: undefined, - createdAt: expect.any(Date), - }, - ], - requestData: { 'test-data-key': 'test-data-value' }, - requestBody: { 'request-body-key': 'request-body-value' }, - }); - - expect(await call(0).getRequestBodyJson()).toBe('test-request-body'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - }, - )); -}); - -describe('file attachments with data url', () => { - const TestComponent = () => { - const { messages, handleSubmit, handleInputChange, isLoading, input } = - useChat(); - - const [attachments, setAttachments] = createSignal(); - let fileInputRef: HTMLInputElement | undefined; - - return ( -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} - - {attachment => - attachment.contentType?.startsWith('text/') ? ( -
- {atob(attachment.url.split(',')[1])} -
- ) : null - } -
-
- )} -
- -
{ - handleSubmit(e, { - experimental_attachments: attachments(), - }); - setAttachments(undefined); - if (fileInputRef) fileInputRef.value = ''; - }} - data-testid="chat-form" - > - setAttachments(e.currentTarget.files || undefined)} - multiple - ref={fileInputRef} - data-testid="file-input" - /> - - -
-
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it( - 'should handle text file attachment and submission', - withTestServer( - { - url: '/api/chat', - type: 'stream-values', - content: ['0:"Response to message with text attachment"\n'], - }, - async ({ call }) => { - const file = new File(['test file content'], 'test.txt', { - type: 'text/plain', - }); - - const fileInput = screen.getByTestId('file-input'); - await userEvent.upload(fileInput, file); - - const messageInput = screen.getByTestId('message-input'); - await userEvent.type(messageInput, 'Message with text attachment'); - - const submitButton = screen.getByTestId('submit-button'); - await userEvent.click(submitButton); - - await waitFor(() => { - expect(screen.getByTestId('message-0')).toHaveTextContent( - 'User: Message with text attachment', - ); - }); - - await waitFor(() => { - expect(screen.getByTestId('attachment-0')).toHaveTextContent( - 'test file content', - ); - }); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Response to message with text attachment', - ); - }); - - expect(await call(0).getRequestBodyJson()).toStrictEqual({ - id: expect.any(String), - messages: [ - { - role: 'user', - content: 'Message with text attachment', - experimental_attachments: [ - { - name: 'test.txt', - contentType: 'text/plain', - url: 'data:text/plain;base64,dGVzdCBmaWxlIGNvbnRlbnQ=', - }, - ], - parts: [{ text: 'Message with text attachment', type: 'text' }], - }, - ], - }); - }, - ), - ); -}); - -describe('data protocol stream', () => { - let onFinishCalls: Array<{ - message: Message; - options: { - finishReason: string; - usage: { - completionTokens: number; - promptTokens: number; - totalTokens: number; - }; - }; - }> = []; - - const TestComponent = () => { - const [id, setId] = createSignal('first-id'); - const { messages, append, error, data, status, setData } = useChat(() => ({ - id: id(), - onFinish: (message, options) => { - onFinishCalls.push({ message, options }); - }, - })); - - return ( -
-
{status()}
-
{error()?.toString()}
-
- {data() != null ? JSON.stringify(data()) : ''} -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
-
- ); - }; - - beforeEach(() => { - render(() => ); - onFinishCalls = []; - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - onFinishCalls = []; - }); - - it( - 'should show streamed response', - withTestServer( - { - type: 'stream-values', - url: '/api/chat', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - }, - ), - ); - - it( - 'should show streamed response with data', - withTestServer( - { - type: 'stream-values', - url: '/api/chat', - content: ['2:[{"t1":"v1"}]\n', '0:"Hello"\n'], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('data'); - expect(screen.getByTestId('data')).toHaveTextContent('[{"t1":"v1"}]'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent('AI: Hello'); - }, - ), - ); - - describe('setData', () => { - it('should set data', async () => { - await userEvent.click(screen.getByTestId('do-set-data')); - - await screen.findByTestId('data'); - expect(screen.getByTestId('data')).toHaveTextContent('[{"t1":"set"}]'); - }); - - it( - 'should clear data', - withTestServer( - { - type: 'stream-values', - url: '/api/chat', - content: ['2:[{"t1":"v1"}]\n', '0:"Hello"\n'], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('data'); - expect(screen.getByTestId('data')).toHaveTextContent('[{"t1":"v1"}]'); - - await userEvent.click(screen.getByTestId('do-clear-data')); - - await screen.findByTestId('data'); - expect(screen.getByTestId('data')).toHaveTextContent(''); - }, - ), - ); - }); - - it( - 'should show error response', - withTestServer( - { type: 'error', url: '/api/chat', status: 404, content: 'Not found' }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toHaveTextContent( - 'Error: Not found', - ); - }, - ), - ); - - describe('status', () => { - it( - 'should show status', - withTestServer( - { url: '/api/chat', type: 'controlled-stream' }, - async ({ streamController }) => { - await userEvent.click(screen.getByTestId('do-append')); - - await waitFor(() => { - expect(screen.getByTestId('status')).toHaveTextContent('submitted'); - }); - - streamController.enqueue('0:"Hello"\n'); - - await waitFor(() => { - expect(screen.getByTestId('status')).toHaveTextContent('streaming'); - }); - - streamController.close(); - - await waitFor(() => { - expect(screen.getByTestId('status')).toHaveTextContent('ready'); - }); - }, - ), - ); - - it( - 'should set status to error when there is a server error', - withTestServer( - { type: 'error', url: '/api/chat', status: 404, content: 'Not found' }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await waitFor(() => { - expect(screen.getByTestId('status')).toHaveTextContent('error'); - }); - }, - ), - ); - }); - - it( - 'should invoke onFinish when the stream finishes', - withTestServer( - { - url: '/api/chat', - type: 'stream-values', - content: [ - formatDataStreamPart('text', 'Hello'), - formatDataStreamPart('text', ','), - formatDataStreamPart('text', ' world'), - formatDataStreamPart('text', '.'), - formatDataStreamPart('finish_message', { - finishReason: 'stop', - usage: { completionTokens: 1, promptTokens: 3 }, - }), - ], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-1'); - - expect(onFinishCalls).toStrictEqual([ - { - message: { - id: expect.any(String), - createdAt: expect.any(Date), - role: 'assistant', - content: 'Hello, world.', - parts: [{ text: 'Hello, world.', type: 'text' }], - }, - options: { - finishReason: 'stop', - usage: { - completionTokens: 1, - promptTokens: 3, - totalTokens: 4, - }, - }, - }, - ]); - }, - ), - ); - - describe('id', () => { - it( - 'should clear out messages when the id changes', - withTestServer( - { - url: '/api/chat', - type: 'stream-values', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - await userEvent.click(screen.getByTestId('do-change-id')); - - expect(screen.queryByTestId('message-0')).not.toBeInTheDocument(); - }, - ), - ); - }); -}); - -describe('text stream', () => { - let onFinishCalls: Array<{ - message: Message; - options: { - finishReason: string; - usage: { - completionTokens: number; - promptTokens: number; - totalTokens: number; - }; - }; - }> = []; - - const TestComponent = () => { - const { messages, append } = useChat(() => ({ - streamProtocol: 'text', - onFinish: (message, options) => { - onFinishCalls.push({ message, options }); - }, - })); - - return ( -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
- ); - }; - - beforeEach(() => { - render(() => ); - onFinishCalls = []; - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - onFinishCalls = []; - }); - - it( - 'should show streamed response', - withTestServer( - { - url: '/api/chat', - type: 'stream-values', - content: ['Hello', ',', ' world', '.'], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append-text-stream')); - - await screen.findByTestId('message-0-text-stream'); - expect(screen.getByTestId('message-0-text-stream')).toHaveTextContent( - 'User: hi', - ); - - await screen.findByTestId('message-1-text-stream'); - expect(screen.getByTestId('message-1-text-stream')).toHaveTextContent( - 'AI: Hello, world.', - ); - }, - ), - ); - - it( - 'should invoke onFinish when the stream finishes', - withTestServer( - { - url: '/api/chat', - type: 'stream-values', - content: ['Hello', ',', ' world', '.'], - }, - async () => { - await userEvent.click(screen.getByTestId('do-append-text-stream')); - - await screen.findByTestId('message-1-text-stream'); - - expect(onFinishCalls).toStrictEqual([ - { - message: { - id: expect.any(String), - createdAt: expect.any(Date), - role: 'assistant', - content: 'Hello, world.', - parts: [{ text: 'Hello, world.', type: 'text' }], - }, - options: { - finishReason: 'unknown', - usage: { - completionTokens: NaN, - promptTokens: NaN, - totalTokens: NaN, - }, - }, - }, - ]); - }, - ), - ); -}); - -describe('onToolCall', () => { - let resolve: () => void; - let toolCallPromise: Promise; - - const TestComponent = () => { - const { messages, append } = useChat(() => ({ - async onToolCall({ toolCall }) { - await toolCallPromise; - return `test-tool-response: ${toolCall.toolName} ${ - toolCall.toolCallId - } ${JSON.stringify(toolCall.args)}`; - }, - })); - - return ( -
- - {(m, idx) => ( -
- - {(toolInvocation, toolIdx) => ( -
- {JSON.stringify(toolInvocation)} -
- )} -
-
- )} -
- -
- ); - }; - - beforeEach(() => { - toolCallPromise = new Promise(resolveArg => { - resolve = resolveArg; - }); - - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it("should invoke onToolCall when a tool call is received from the server's response", async () => { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: [ - formatDataStreamPart('tool_call', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - args: { testArg: 'test-value' }, - }), - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - `{"state":"call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"}}`, - ); - - resolve(); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - `{"state":"result","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"},"result":"test-tool-response: test-tool tool-call-0 {\\"testArg\\":\\"test-value\\"}"}`, - ); - }); - }); -}); - -describe('tool invocations', () => { - const TestComponent = () => { - const { messages, append, addToolResult } = useChat(); - return ( -
- - {(m, idx) => ( -
- - {(toolInvocation, toolIdx) => ( - <> -
- {JSON.stringify(toolInvocation)} -
- {toolInvocation.state === 'call' && ( -
- )} -
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it( - 'should display partial tool call, tool call, and tool result', - withTestServer( - { url: '/api/chat', type: 'controlled-stream' }, - async ({ streamController }) => { - await userEvent.click(screen.getByTestId('do-append')); - - streamController.enqueue( - formatDataStreamPart('tool_call_streaming_start', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - }), - ); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"partial-call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool"}', - ); - }); - - streamController.enqueue( - formatDataStreamPart('tool_call_delta', { - toolCallId: 'tool-call-0', - argsTextDelta: '{"testArg":"t', - }), - ); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"partial-call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"t"}}', - ); - }); - - streamController.enqueue( - formatDataStreamPart('tool_call_delta', { - toolCallId: 'tool-call-0', - argsTextDelta: 'est-value"}}', - }), - ); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"partial-call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"}}', - ); - }); - - streamController.enqueue( - formatDataStreamPart('tool_call', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - args: { testArg: 'test-value' }, - }), - ); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"}}', - ); - }); - - streamController.enqueue( - formatDataStreamPart('tool_result', { - toolCallId: 'tool-call-0', - result: 'test-result', - }), - ); - streamController.close(); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"result","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"},"result":"test-result"}', - ); - }); - }, - ), - ); - - it( - 'should display partial tool call and tool result (when there is no tool call streaming)', - withTestServer( - { url: '/api/chat', type: 'controlled-stream' }, - async ({ streamController }) => { - await userEvent.click(screen.getByTestId('do-append')); - - streamController.enqueue( - formatDataStreamPart('tool_call', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - args: { testArg: 'test-value' }, - }), - ); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"}}', - ); - }); - - streamController.enqueue( - formatDataStreamPart('tool_result', { - toolCallId: 'tool-call-0', - result: 'test-result', - }), - ); - streamController.close(); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"result","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"},"result":"test-result"}', - ); - }); - }, - ), - ); - - it( - 'should update tool call to result when addToolResult is called', - withTestServer( - [ - { - url: '/api/chat', - type: 'stream-values', - content: [ - formatDataStreamPart('tool_call', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - args: { testArg: 'test-value' }, - }), - ], - }, - ], - async () => { - await userEvent.click(screen.getByTestId('do-append')); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"call","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"}}', - ); - }); - - await userEvent.click(screen.getByTestId('add-result-0')); - - await waitFor(() => { - expect(screen.getByTestId('message-1')).toHaveTextContent( - '{"state":"result","step":0,"toolCallId":"tool-call-0","toolName":"test-tool","args":{"testArg":"test-value"},"result":"test-result"}', - ); - }); - }, - ), - ); -}); - -describe('maxSteps', () => { - describe('two steps with automatic tool call', () => { - const TestComponent = () => { - const { messages, append } = useChat(() => ({ - async onToolCall({ toolCall }) { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: [formatDataStreamPart('text', 'final result')], - }); - - return `test-tool-response: ${toolCall.toolName} ${ - toolCall.toolCallId - } ${JSON.stringify(toolCall.args)}`; - }, - maxSteps: 5, - })); - - return ( -
- - {(m, idx) => ( -
{m.content}
- )} -
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('should automatically call api when tool call gets executed via onToolCall', async () => { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: [ - formatDataStreamPart('tool_call', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - args: { testArg: 'test-value' }, - }), - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent('final result'); - }); - }); - - describe('two steps with error response', () => { - const TestComponent = () => { - const { messages, append, error } = useChat(() => ({ - async onToolCall({ toolCall }) { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: [formatDataStreamPart('error', 'some failure')], - maxCalls: 1, - }); - - return `test-tool-response: ${toolCall.toolName} ${ - toolCall.toolCallId - } ${JSON.stringify(toolCall.args)}`; - }, - maxSteps: 5, - })); - - return ( -
-
{error()?.toString()}
- - - {(m, idx) => ( -
- - {(toolInvocation, toolIdx) => - 'result' in toolInvocation ? ( -
- {toolInvocation.result} -
- ) : null - } -
-
- )} -
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('should automatically call api when tool call gets executed via onToolCall', async () => { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: [ - formatDataStreamPart('tool_call', { - toolCallId: 'tool-call-0', - toolName: 'test-tool', - args: { testArg: 'test-value' }, - }), - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toHaveTextContent( - 'Error: Too many calls', - ); - }); - }); -}); - -describe('form actions', () => { - const TestComponent = () => { - const { messages, handleSubmit, handleInputChange, status, input } = - useChat(); - - return ( -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
- -
-
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('should show streamed response using handleSubmit', async () => { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: ['Hello', ',', ' world', '.'].map(token => - formatDataStreamPart('text', token), - ), - }); - - const input = screen.getByTestId('do-input'); - await userEvent.type(input, 'hi'); - await userEvent.keyboard('{Enter}'); - expect(input).toHaveValue(''); - - // Wait for the user message to appear - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - // Wait for the AI response to complete - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: ['How', ' can', ' I', ' help', ' you', '?'].map(token => - formatDataStreamPart('text', token), - ), - }); - - await userEvent.click(input); - await userEvent.keyboard('{Enter}'); - - expect(screen.queryByTestId('message-2')).not.toBeInTheDocument(); - }); -}); - -describe('form actions (with options)', () => { - const TestComponent = () => { - const { messages, handleSubmit, handleInputChange, status, input } = - useChat(); - - return ( -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
{ - handleSubmit(event, { - allowEmptySubmit: true, - }); - }} - > - -
-
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('allowEmptySubmit', async () => { - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: ['Hello', ',', ' world', '.'].map(token => - formatDataStreamPart('text', token), - ), - }); - - const input = screen.getByTestId('do-input'); - await userEvent.type(input, 'hi'); - await userEvent.keyboard('{Enter}'); - expect(input).toHaveValue(''); - - // Wait for the user message to appear - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - // Wait for the AI response to complete - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: ['How', ' can', ' I', ' help', ' you', '?'].map(token => - formatDataStreamPart('text', token), - ), - }); - - await userEvent.click(input); - await userEvent.keyboard('{Enter}'); - - await screen.findByTestId('message-2'); - expect(screen.getByTestId('message-2')).toHaveTextContent('User:'); - - await screen.findByTestId('message-3'); - expect(screen.getByTestId('message-3')).toHaveTextContent( - 'AI: How can I help you?', - ); - - mockFetchDataStream({ - url: 'https://example.com/api/chat', - chunks: ['The', ' sky', ' is', ' blue.'].map(token => - formatDataStreamPart('text', token), - ), - }); - - await userEvent.type(input, 'what color is the sky?'); - await userEvent.keyboard('{Enter}'); - - await screen.findByTestId('message-4'); - expect(screen.getByTestId('message-4')).toHaveTextContent( - 'User: what color is the sky?', - ); - - await screen.findByTestId('message-5'); - expect(screen.getByTestId('message-5')).toHaveTextContent( - 'AI: The sky is blue.', - ); - }); -}); - -describe('reload', () => { - const TestComponent = () => { - const { messages, append, reload } = useChat(); - - return ( -
- - {(m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- )} -
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it( - 'should show streamed response', - withTestServer( - [ - { - url: '/api/chat', - type: 'stream-values', - content: ['0:"first response"\n'], - }, - { - url: '/api/chat', - type: 'stream-values', - content: ['0:"second response"\n'], - }, - ], - async ({ call }) => { - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - await screen.findByTestId('message-1'); - - // setup done, click reload: - await userEvent.click(screen.getByTestId('do-reload')); - - expect(await call(1).getRequestBodyJson()).toStrictEqual({ - id: expect.any(String), - messages: [ - { - content: 'hi', - role: 'user', - parts: [{ text: 'hi', type: 'text' }], - }, - ], - data: { 'test-data-key': 'test-data-value' }, - 'request-body-key': 'request-body-value', - }); - - expect(call(1).getRequestHeaders()).toStrictEqual({ - 'content-type': 'application/json', - 'header-key': 'header-value', - }); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: second response', - ); - }, - ), - ); -}); diff --git a/packages/solid/src/use-completion.ts b/packages/solid/src/use-completion.ts deleted file mode 100644 index 9af729731efc..000000000000 --- a/packages/solid/src/use-completion.ts +++ /dev/null @@ -1,199 +0,0 @@ -import { FetchFunction } from '@ai-sdk/provider-utils'; -import type { - JSONValue, - Message, - RequestOptions, - UseCompletionOptions, -} from '@ai-sdk/ui-utils'; -import { callCompletionApi } from '@ai-sdk/ui-utils'; -import { - Accessor, - JSX, - Setter, - createEffect, - createMemo, - createSignal, - createUniqueId, -} from 'solid-js'; -import { ReactiveLRU } from './utils/reactive-lru'; -import { convertToAccessorOptions } from './utils/convert-to-accessor-options'; - -export type { UseCompletionOptions }; - -export type UseCompletionHelpers = { - /** The current completion result */ - completion: Accessor; - /** The error object of the API request */ - error: Accessor; - /** - * Send a new prompt to the API endpoint and update the completion state. - */ - complete: ( - prompt: string, - options?: RequestOptions, - ) => Promise; - /** - * Abort the current API request but keep the generated tokens. - */ - stop: () => void; - /** - * Update the `completion` state locally. - */ - setCompletion: (completion: string) => void; - /** The current value of the input */ - input: Accessor; - /** Signal Setter to update the input value */ - setInput: Setter; - - /** An input/textarea-ready onChange handler to control the value of the input */ - handleInputChange: JSX.ChangeEventHandlerUnion< - HTMLInputElement | HTMLTextAreaElement, - Event - >; - /** - * Form submission handler to automatically reset input and append a user message - * @example - * ```jsx - *
- * - *
- * ``` - */ - handleSubmit: (event?: { preventDefault?: () => void }) => void; - /** Whether the API request is in progress */ - isLoading: Accessor; - /** Additional data added on the server via StreamData */ - data: Accessor; - - /** -Custom fetch implementation. You can use it as a middleware to intercept requests, -or to provide a custom fetch implementation for e.g. testing. - */ - fetch?: FetchFunction; -}; - -const completionCache = new ReactiveLRU(); - -/** - * @deprecated `@ai-sdk/solid` has been deprecated and will be removed in AI SDK 5. - */ -export function useCompletion( - rawUseCompletionOptions: - | UseCompletionOptions - | Accessor = {}, -): UseCompletionHelpers { - const useCompletionOptions = createMemo(() => - convertToAccessorOptions(rawUseCompletionOptions), - ); - - const api = createMemo( - () => useCompletionOptions().api?.() ?? '/api/completion', - ); - // Generate an unique id for the completion if not provided. - const idKey = createMemo( - () => useCompletionOptions().id?.() ?? `completion-${createUniqueId()}`, - ); - const completionKey = createMemo(() => `${api()}|${idKey()}|completion`); - - const completion = createMemo( - () => - completionCache.get(completionKey()) ?? - useCompletionOptions().initialCompletion?.() ?? - '', - ); - - const mutate = (data: string) => { - completionCache.set(completionKey(), data); - }; - - const [error, setError] = createSignal(undefined); - const [streamData, setStreamData] = createSignal( - undefined, - ); - const [isLoading, setIsLoading] = createSignal(false); - - const [abortController, setAbortController] = - createSignal(null); - - let extraMetadata = { - credentials: useCompletionOptions().credentials?.(), - headers: useCompletionOptions().headers?.(), - body: useCompletionOptions().body?.(), - }; - createEffect(() => { - extraMetadata = { - credentials: useCompletionOptions().credentials?.(), - headers: useCompletionOptions().headers?.(), - body: useCompletionOptions().body?.(), - }; - }); - - const complete: UseCompletionHelpers['complete'] = async ( - prompt: string, - options?: RequestOptions, - ) => { - const existingData = streamData() ?? []; - return callCompletionApi({ - api: api(), - prompt, - credentials: useCompletionOptions().credentials?.(), - headers: { ...extraMetadata.headers, ...options?.headers }, - body: { - ...extraMetadata.body, - ...options?.body, - }, - streamProtocol: useCompletionOptions().streamProtocol?.(), - setCompletion: mutate, - setLoading: setIsLoading, - setError, - setAbortController, - onResponse: useCompletionOptions().onResponse?.(), - onFinish: useCompletionOptions().onFinish?.(), - onError: useCompletionOptions().onError?.(), - onData: data => { - setStreamData([...existingData, ...(data ?? [])]); - }, - fetch: useCompletionOptions().fetch?.(), - }); - }; - - const stop = () => { - if (abortController()) { - abortController()!.abort(); - } - }; - - const setCompletion = (completion: string) => { - mutate(completion); - }; - - const [input, setInput] = createSignal( - useCompletionOptions().initialInput?.() ?? '', - ); - - const handleInputChange: UseCompletionHelpers['handleInputChange'] = - event => { - setInput(event.target.value); - }; - - const handleSubmit: UseCompletionHelpers['handleSubmit'] = event => { - event?.preventDefault?.(); - - const inputValue = input(); - return inputValue ? complete(inputValue) : undefined; - }; - - return { - completion, - complete, - error, - stop, - setCompletion, - input, - setInput, - handleInputChange, - handleSubmit, - isLoading, - data: streamData, - }; -} diff --git a/packages/solid/src/use-completion.ui.test.tsx b/packages/solid/src/use-completion.ui.test.tsx deleted file mode 100644 index 9404a4d45326..000000000000 --- a/packages/solid/src/use-completion.ui.test.tsx +++ /dev/null @@ -1,132 +0,0 @@ -/** @jsxImportSource solid-js */ -import { - mockFetchDataStream, - mockFetchDataStreamWithGenerator, - mockFetchError, -} from '@ai-sdk/ui-utils/test'; -import { cleanup, findByText, render, screen } from '@solidjs/testing-library'; -import '@testing-library/jest-dom'; -import userEvent from '@testing-library/user-event'; -import { useCompletion } from './use-completion'; - -describe('stream data stream', () => { - const TestComponent = () => { - const { completion, complete, error, isLoading } = useCompletion(); - - return ( -
-
{isLoading().toString()}
-
{error()?.toString()}
- -
{completion()}
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('should render complex text stream', async () => { - mockFetchDataStream({ - url: 'https://example.com/api/completion', - chunks: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }); - - await userEvent.click(screen.getByTestId('button')); - - await screen.findByTestId('completion'); - expect(screen.getByTestId('completion')).toHaveTextContent('Hello, world.'); - }); - - describe('loading state', () => { - it('should show loading state', async () => { - let finishGeneration: ((value?: unknown) => void) | undefined; - const finishGenerationPromise = new Promise(resolve => { - finishGeneration = resolve; - }); - - mockFetchDataStreamWithGenerator({ - url: 'https://example.com/api/chat', - chunkGenerator: (async function* generate() { - const encoder = new TextEncoder(); - yield encoder.encode('0:"Hello"\n'); - await finishGenerationPromise; - })(), - }); - - await userEvent.click(screen.getByTestId('button')); - - await screen.findByTestId('loading'); - expect(screen.getByTestId('loading')).toHaveTextContent('true'); - - finishGeneration?.(); - - await findByText(await screen.findByTestId('loading'), 'false'); - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }); - - it('should reset loading state on error', async () => { - mockFetchError({ statusCode: 404, errorMessage: 'Not found' }); - - await userEvent.click(screen.getByTestId('button')); - - await screen.findByTestId('loading'); - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }); - }); -}); - -describe('text stream', () => { - const TestComponent = () => { - const { completion, complete } = useCompletion({ streamProtocol: 'text' }); - - return ( -
-
{completion()}
- -
- ); - }; - - beforeEach(() => { - render(() => ); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('should render stream', async () => { - mockFetchDataStream({ - url: 'https://example.com/api/completion', - chunks: ['Hello', ',', ' world', '.'], - }); - - await userEvent.click(screen.getByTestId('button-text-stream')); - - await screen.findByTestId('completion-text-stream'); - expect(screen.getByTestId('completion-text-stream')).toHaveTextContent( - 'Hello, world.', - ); - }); -}); diff --git a/packages/solid/src/use-object.ts b/packages/solid/src/use-object.ts deleted file mode 100644 index 7c94c0f26c55..000000000000 --- a/packages/solid/src/use-object.ts +++ /dev/null @@ -1,250 +0,0 @@ -import { - FetchFunction, - isAbortError, - safeValidateTypes, -} from '@ai-sdk/provider-utils'; -import { - asSchema, - DeepPartial, - isDeepEqualData, - parsePartialJson, - Schema, -} from '@ai-sdk/ui-utils'; -import { Accessor, createMemo, createSignal, createUniqueId } from 'solid-js'; -import z from 'zod'; -import { convertToAccessorOptions } from './utils/convert-to-accessor-options'; -import { ReactiveLRU } from './utils/reactive-lru'; - -// use function to allow for mocking in tests: -const getOriginalFetch = () => fetch; - -export type Experimental_UseObjectOptions = { - /** - * The API endpoint. It should stream JSON that matches the schema as chunked text. - */ - api: string; - - /** - * A Zod schema that defines the shape of the complete object. - */ - schema: z.Schema | Schema; - - /** - * An unique identifier. If not provided, a random one will be - * generated. When provided, the `useObject` hook with the same `id` will - * have shared states across components. - */ - id?: string; - - /** - * An optional value for the initial object. - */ - initialValue?: DeepPartial; - - /** - Custom fetch implementation. You can use it as a middleware to intercept requests, - or to provide a custom fetch implementation for e.g. testing. - */ - fetch?: FetchFunction; - - /** - Callback that is called when the stream has finished. - */ - onFinish?: (event: { - /** - The generated object (typed according to the schema). - Can be undefined if the final object does not match the schema. - */ - object: RESULT | undefined; - - /** - Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema. - */ - error: Error | undefined; - }) => Promise | void; - - /** - * Callback function to be called when an error is encountered. - */ - onError?: (error: Error) => void; - - /** - * Additional HTTP headers to be included in the request. - */ - headers?: Record | Headers; - - /** - * The credentials mode to be used for the fetch request. - * Possible values are: 'omit', 'same-origin', 'include'. - * Defaults to 'same-origin'. - */ - credentials?: RequestCredentials; -}; - -export type Experimental_UseObjectHelpers = { - /** - * Calls the API with the provided input as JSON body. - */ - submit: (input: INPUT) => void; - - /** - * The current value for the generated object. Updated as the API streams JSON chunks. - */ - object: Accessor | undefined>; - - /** - * The error object of the API request if any. - */ - error: Accessor; - - /** - * Flag that indicates whether an API request is in progress. - */ - isLoading: Accessor; - - /** - * Abort the current request immediately, keep the current partial object if any. - */ - stop: () => void; -}; - -const objectCache = new ReactiveLRU>(); - -/** - * @deprecated `@ai-sdk/solid` has been deprecated and will be removed in AI SDK 5. - */ -function useObject( - rawUseObjectOptions: - | Experimental_UseObjectOptions - | Accessor>, -): Experimental_UseObjectHelpers { - const useObjectOptions = createMemo(() => - convertToAccessorOptions(rawUseObjectOptions), - ); - - // Generate an unique id for the completion if not provided. - const idKey = createMemo( - () => useObjectOptions().id?.() ?? `object-${createUniqueId()}`, - ); - - const data = createMemo( - () => - (objectCache.get(idKey()) ?? useObjectOptions().initialValue?.()) as - | DeepPartial - | undefined, - ); - - const mutate = (value: DeepPartial | undefined) => { - objectCache.set(idKey(), value); - }; - - const [error, setError] = createSignal(); - const [isLoading, setIsLoading] = createSignal(false); - - // Abort controller to cancel the current API call. - let abortControllerRef: AbortController | null = null; - - const stop = () => { - try { - abortControllerRef?.abort(); - } catch (ignored) { - } finally { - setIsLoading(false); - abortControllerRef = null; - } - }; - - const submit = async (input: INPUT) => { - try { - mutate(undefined); // reset the data - setIsLoading(true); - setError(undefined); - - const abortController = new AbortController(); - abortControllerRef = abortController; - - const actualFetch = fetch ?? getOriginalFetch(); - const response = await actualFetch(useObjectOptions().api(), { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...useObjectOptions().headers?.(), - }, - credentials: useObjectOptions().credentials?.(), - signal: abortController.signal, - body: JSON.stringify(input), - }); - - if (!response.ok) { - throw new Error( - (await response.text()) ?? 'Failed to fetch the response.', - ); - } - - if (response.body == null) { - throw new Error('The response body is empty.'); - } - - let accumulatedText = ''; - let latestObject: DeepPartial | undefined = undefined; - - await response.body.pipeThrough(new TextDecoderStream()).pipeTo( - new WritableStream({ - write(chunk) { - accumulatedText += chunk; - - const { value } = parsePartialJson(accumulatedText); - const currentObject = value as DeepPartial; - - if (!isDeepEqualData(latestObject, currentObject)) { - latestObject = currentObject; - - mutate(currentObject); - } - }, - - close() { - setIsLoading(false); - abortControllerRef = null; - - const onFinish = useObjectOptions().onFinish?.(); - if (onFinish != null) { - const validationResult = safeValidateTypes({ - value: latestObject, - schema: asSchema(useObjectOptions().schema()), - }); - - onFinish( - validationResult.success - ? { object: validationResult.value, error: undefined } - : { object: undefined, error: validationResult.error }, - ); - } - }, - }), - ); - } catch (error) { - if (isAbortError(error)) { - return; - } - - const onError = useObjectOptions().onError?.(); - if (onError && error instanceof Error) { - onError(error); - } - - setIsLoading(false); - setError(error instanceof Error ? error : new Error(String(error))); - } - }; - - return { - submit, - object: data, - error, - isLoading, - stop, - }; -} - -export const experimental_useObject = useObject; diff --git a/packages/solid/src/use-object.ui.test.tsx b/packages/solid/src/use-object.ui.test.tsx deleted file mode 100644 index fe2d08f7679e..000000000000 --- a/packages/solid/src/use-object.ui.test.tsx +++ /dev/null @@ -1,275 +0,0 @@ -import { - describeWithTestServer, - withTestServer, -} from '@ai-sdk/provider-utils/test'; -import { cleanup, render, screen, waitFor } from '@solidjs/testing-library'; -import '@testing-library/jest-dom/vitest'; -import userEvent from '@testing-library/user-event'; -import { z } from 'zod'; -import { experimental_useObject } from './use-object'; - -describe('text stream', () => { - let onErrorResult: Error | undefined; - let onFinishCalls: Array<{ - object: { content: string } | undefined; - error: Error | undefined; - }> = []; - - const TestComponent = (props: { - headers?: Record | Headers; - credentials?: RequestCredentials; - }) => { - const { object, error, submit, isLoading, stop } = experimental_useObject( - () => ({ - api: '/api/use-object', - schema: z.object({ content: z.string() }), - onError(error) { - onErrorResult = error; - }, - onFinish(event) { - onFinishCalls.push(event); - }, - headers: props.headers, - credentials: props.credentials, - }), - ); - - return ( -
-
{isLoading().toString()}
-
{JSON.stringify(object())}
-
{error()?.toString()}
- - -
- ); - }; - - beforeEach(() => { - onErrorResult = undefined; - onFinishCalls = []; - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - onErrorResult = undefined; - onFinishCalls = []; - }); - - describe('basic component', () => { - beforeEach(() => { - render(() => ); - }); - describeWithTestServer( - "when the API returns 'Hello, world!'", - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"'], - }, - ({ call }) => { - beforeEach(async () => { - await userEvent.click(screen.getByTestId('submit-button')); - }); - - it('should render stream', async () => { - await screen.findByTestId('object'); - expect(screen.getByTestId('object')).toHaveTextContent( - JSON.stringify({ content: 'Hello, world!' }), - ); - }); - - it("should send 'test' to the API", async () => { - expect(await call(0).getRequestBodyJson()).toBe('test-input'); - }); - - it('should not have an error', async () => { - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toBeEmptyDOMElement(); - expect(onErrorResult).toBeUndefined(); - }); - }, - ); - - describe('isLoading', async () => { - it( - 'should be true while loading', - withTestServer( - { url: '/api/use-object', type: 'controlled-stream' }, - async ({ streamController }) => { - streamController.enqueue('{"content": '); - - await userEvent.click(screen.getByTestId('submit-button')); - - // wait for element "loading" to have text content "true": - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('true'); - }); - - streamController.enqueue('"Hello, world!"}'); - streamController.close(); - - // wait for element "loading" to have text content "false": - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }); - }, - ), - ); - }); - - describe('stop', async () => { - it( - 'should abort the stream and not consume any more data', - withTestServer( - { url: '/api/use-object', type: 'controlled-stream' }, - async ({ streamController }) => { - streamController.enqueue('{"content": "h'); - - userEvent.click(screen.getByTestId('submit-button')); - - // wait for element "loading" and "object" to have text content: - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('true'); - }); - await waitFor(() => { - expect(screen.getByTestId('object')).toHaveTextContent( - '{"content":"h"}', - ); - }); - - // click stop button: - await userEvent.click(screen.getByTestId('stop-button')); - - // wait for element "loading" to have text content "false": - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }); - - // this should not be consumed any more: - streamController.enqueue('ello, world!"}'); - streamController.close(); - - // should only show start of object: - expect(screen.getByTestId('object')).toHaveTextContent( - '{"content":"h"}', - ); - }, - ), - ); - }); - - describe('when the API returns a 404', () => { - it( - 'should render error', - withTestServer( - { - url: '/api/use-object', - type: 'error', - status: 404, - content: 'Not found', - }, - async () => { - await userEvent.click(screen.getByTestId('submit-button')); - - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toHaveTextContent('Not found'); - expect(onErrorResult).toBeInstanceOf(Error); - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }, - ), - ); - }); - - describe('onFinish', () => { - it( - 'should be called with an object when the stream finishes and the object matches the schema', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async () => { - await userEvent.click(screen.getByTestId('submit-button')); - - expect(onFinishCalls).toStrictEqual([ - { object: { content: 'Hello, world!' }, error: undefined }, - ]); - }, - ), - ); - }); - - it( - 'should be called with an error when the stream finishes and the object does not match the schema', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content-wrong": "Hello, ', 'world', '!"', '}'], - }, - async () => { - await userEvent.click(screen.getByTestId('submit-button')); - - expect(onFinishCalls).toStrictEqual([ - { object: undefined, error: expect.any(Error) }, - ]); - }, - ), - ); - }); - - it( - 'should send custom headers', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async ({ call }) => { - render(() => ( - - )); - - await userEvent.click(screen.getByTestId('submit-button')); - - expect(call(0).getRequestHeaders()).toStrictEqual({ - 'content-type': 'application/json', - authorization: 'Bearer TEST_TOKEN', - 'x-custom-header': 'CustomValue', - }); - }, - ), - ); - - it( - 'should send custom credentials', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Authenticated ', 'content', '!"', '}'], - }, - async ({ call }) => { - render(() => ); - await userEvent.click(screen.getByTestId('submit-button')); - expect(call(0).getRequestCredentials()).toBe('include'); - }, - ), - ); -}); diff --git a/packages/solid/src/utils/convert-to-accessor-options.ts b/packages/solid/src/utils/convert-to-accessor-options.ts deleted file mode 100644 index 5051ade2a6db..000000000000 --- a/packages/solid/src/utils/convert-to-accessor-options.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { Accessor, createMemo } from 'solid-js'; - -/** - * Handle reactive and non-reactive useChatOptions - */ -export function convertToAccessorOptions( - options: T | Accessor, -) { - const resolvedOptions = typeof options === 'function' ? options() : options; - - return Object.entries(resolvedOptions).reduce( - (reactiveOptions, [key, value]) => { - reactiveOptions[key as keyof T] = createMemo(() => value) as any; - return reactiveOptions; - }, - {} as { - [K in keyof T]: Accessor; - }, - ); -} diff --git a/packages/solid/src/utils/reactive-lru.ts b/packages/solid/src/utils/reactive-lru.ts deleted file mode 100644 index 51903f47f323..000000000000 --- a/packages/solid/src/utils/reactive-lru.ts +++ /dev/null @@ -1,150 +0,0 @@ -import { batch } from 'solid-js'; -import { TriggerCache } from '@solid-primitives/trigger'; - -const $KEYS = Symbol('track-keys'); - -/** - * A reactive LRU (Least Recently Used) cache implementation based on Map. - * All reads and writes are reactive signals. - * @param maxSize maximum number of entries to store before evicting least recently used - * @param initial initial entries of the reactive LRU cache - */ -export class ReactiveLRU extends Map { - #keyTriggers = new TriggerCache(); - #valueTriggers = new TriggerCache(); - #maxSize: number; - #accessList: K[] = []; - - constructor(maxSize = 10, initial?: Iterable | null) { - super(); - this.#maxSize = maxSize; - if (initial) { - for (const [key, value] of initial) { - this.set(key, value); - } - } - } - - #recordAccess(key: K) { - const index = this.#accessList.indexOf(key); - if (index > -1) { - this.#accessList.splice(index, 1); - } - this.#accessList.push(key); - if (this.#accessList.length > this.#maxSize) { - const lru = this.#accessList.shift()!; - this.delete(lru); - } - } - - // reads - has(key: K): boolean { - this.#keyTriggers.track(key); - const exists = super.has(key); - if (exists) { - this.#recordAccess(key); - } - return exists; - } - - get(key: K): V | undefined { - this.#valueTriggers.track(key); - const value = super.get(key); - if (value !== undefined) { - this.#recordAccess(key); - } - return value; - } - - get size(): number { - this.#keyTriggers.track($KEYS); - return super.size; - } - - *keys(): MapIterator { - for (const key of super.keys()) { - this.#keyTriggers.track(key); - yield key; - } - this.#keyTriggers.track($KEYS); - } - - *values(): MapIterator { - for (const [key, v] of super.entries()) { - this.#valueTriggers.track(key); - yield v; - } - this.#keyTriggers.track($KEYS); - } - - *entries(): MapIterator<[K, V]> { - for (const entry of super.entries()) { - this.#valueTriggers.track(entry[0]); - yield entry; - } - this.#keyTriggers.track($KEYS); - } - - // writes - set(key: K, value: V): this { - batch(() => { - if (super.has(key)) { - if (super.get(key)! === value) { - this.#recordAccess(key); - return; - } - } else { - this.#keyTriggers.dirty(key); - this.#keyTriggers.dirty($KEYS); - } - this.#valueTriggers.dirty(key); - super.set(key, value); - this.#recordAccess(key); - }); - return this; - } - - delete(key: K): boolean { - const r = super.delete(key); - if (r) { - batch(() => { - this.#keyTriggers.dirty(key); - this.#keyTriggers.dirty($KEYS); - this.#valueTriggers.dirty(key); - const index = this.#accessList.indexOf(key); - if (index > -1) { - this.#accessList.splice(index, 1); - } - }); - } - return r; - } - - clear(): void { - if (super.size) { - batch(() => { - for (const v of super.keys()) { - this.#keyTriggers.dirty(v); - this.#valueTriggers.dirty(v); - } - super.clear(); - this.#accessList = []; - this.#keyTriggers.dirty($KEYS); - }); - } - } - - // callback - forEach(callbackfn: (value: V, key: K, map: this) => void) { - this.#keyTriggers.track($KEYS); - for (const [key, v] of super.entries()) { - this.#valueTriggers.track(key); - this.#recordAccess(key); - callbackfn(v, key, this); - } - } - - [Symbol.iterator](): MapIterator<[K, V]> { - return this.entries(); - } -} diff --git a/packages/solid/tsconfig.json b/packages/solid/tsconfig.json deleted file mode 100644 index 765e1a6f4f9c..000000000000 --- a/packages/solid/tsconfig.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "extends": "./node_modules/@vercel/ai-tsconfig/react-library.json", - "compilerOptions": { - "jsx": "preserve", - "jsxImportSource": "solid-js", - "lib": ["DOM", "DOM.Iterable", "ESNext"], - "target": "ESNext", - "module": "ESNext", - "stripInternal": true, - "moduleResolution": "Bundler" - }, - "include": ["."], - "exclude": ["*/dist", "dist", "build", "node_modules"] -} diff --git a/packages/solid/tsup.config.ts b/packages/solid/tsup.config.ts deleted file mode 100644 index ea9ff789873c..000000000000 --- a/packages/solid/tsup.config.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { defineConfig } from 'tsup'; - -export default defineConfig([ - { - entry: ['src/index.ts'], - outDir: 'dist', - banner: {}, - format: ['cjs', 'esm'], - external: ['vue'], - dts: true, - sourcemap: true, - }, -]); diff --git a/packages/solid/turbo.json b/packages/solid/turbo.json deleted file mode 100644 index 620b8380e744..000000000000 --- a/packages/solid/turbo.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "extends": [ - "//" - ], - "tasks": { - "build": { - "outputs": [ - "**/dist/**" - ] - } - } -} diff --git a/packages/solid/vitest.config.js b/packages/solid/vitest.config.js deleted file mode 100644 index 30d3b059319b..000000000000 --- a/packages/solid/vitest.config.js +++ /dev/null @@ -1,22 +0,0 @@ -import solidPlugin from 'vite-plugin-solid'; -import { defineConfig } from 'vite'; - -// https://vitejs.dev/config/ -export default defineConfig({ - plugins: [solidPlugin()], - server: { port: 3000 }, - build: { target: 'esnext' }, - test: { - environment: 'jsdom', - globals: true, - include: ['src/**/*.ui.test.ts', 'src/**/*.ui.test.tsx'], - deps: { - registerNodeLoader: true, - inline: [/solid-js/], - }, - transformMode: { web: [/\.[jt]sx?$/] }, - }, - resolve: { - conditions: ['development', 'browser'], - }, -}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2df8ced9166f..f142f577594a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1004,49 +1004,6 @@ importers: specifier: 4.5.0 version: 4.5.0(vue@3.5.13(typescript@5.6.3)) - examples/solidstart-openai: - dependencies: - '@ai-sdk/openai': - specifier: 1.3.6 - version: link:../../packages/openai - '@ai-sdk/solid': - specifier: 1.2.6 - version: link:../../packages/solid - '@ai-sdk/ui-utils': - specifier: 1.2.4 - version: link:../../packages/ui-utils - '@solidjs/meta': - specifier: 0.29.4 - version: 0.29.4(solid-js@1.9.3) - '@solidjs/router': - specifier: ^0.15.1 - version: 0.15.1(solid-js@1.9.3) - '@solidjs/start': - specifier: ^1.0.10 - version: 1.0.10(@testing-library/jest-dom@6.6.3)(solid-js@1.9.3)(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3))(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) - ai: - specifier: 4.2.10 - version: link:../../packages/ai - solid-js: - specifier: ^1.9.3 - version: 1.9.3 - zod: - specifier: ^3.23.8 - version: 3.23.8 - devDependencies: - autoprefixer: - specifier: ^10.4.19 - version: 10.4.19(postcss@8.4.49) - postcss: - specifier: ^8.4.49 - version: 8.4.49 - tailwindcss: - specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@22.7.4)(typescript@5.6.3)) - vinxi: - specifier: ^0.4.3 - version: 0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3) - examples/sveltekit-openai: devDependencies: '@ai-sdk/openai': @@ -1859,67 +1816,6 @@ importers: specifier: 3.23.8 version: 3.23.8 - packages/solid: - dependencies: - '@ai-sdk/provider-utils': - specifier: 2.2.3 - version: link:../provider-utils - '@ai-sdk/ui-utils': - specifier: 1.2.4 - version: link:../ui-utils - '@solid-primitives/trigger': - specifier: ^1.1.0 - version: 1.1.0(solid-js@1.8.7) - solid-js: - specifier: ^1.7.7 - version: 1.8.7 - zod: - specifier: ^3.23.8 - version: 3.23.8 - devDependencies: - '@solidjs/testing-library': - specifier: 0.8.10 - version: 0.8.10(@solidjs/router@0.15.1(solid-js@1.8.7))(solid-js@1.8.7) - '@testing-library/jest-dom': - specifier: ^6.6.3 - version: 6.6.3 - '@testing-library/user-event': - specifier: ^14.5.2 - version: 14.5.2(@testing-library/dom@10.4.0) - '@types/node': - specifier: 20.17.24 - version: 20.17.24 - '@vercel/ai-tsconfig': - specifier: workspace:* - version: link:../../tools/tsconfig - '@vitejs/plugin-vue': - specifier: 5.2.0 - version: 5.2.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) - eslint: - specifier: 8.57.1 - version: 8.57.1 - eslint-config-vercel-ai: - specifier: workspace:* - version: link:../../tools/eslint-config - jsdom: - specifier: ^24.0.0 - version: 24.0.0 - msw: - specifier: 2.6.4 - version: 2.6.4(@types/node@20.17.24)(typescript@5.6.3) - tsup: - specifier: ^7.2.0 - version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) - typescript: - specifier: 5.6.3 - version: 5.6.3 - vite-plugin-solid: - specifier: 2.7.2 - version: 2.7.2(solid-js@1.8.7) - vitest: - specifier: 2.1.4 - version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@24.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(terser@5.31.3) - packages/svelte: dependencies: '@ai-sdk/provider-utils': @@ -2400,10 +2296,6 @@ packages: resolution: {integrity: sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ==} engines: {node: '>=6.9.0'} - '@babel/helper-module-imports@7.18.6': - resolution: {integrity: sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==} - engines: {node: '>=6.9.0'} - '@babel/helper-module-imports@7.25.9': resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} engines: {node: '>=6.9.0'} @@ -2799,12 +2691,6 @@ packages: peerDependencies: postcss-selector-parser: ^7.0.0 - '@deno/shim-deno-test@0.5.0': - resolution: {integrity: sha512-4nMhecpGlPi0cSzT67L+Tm+GOJqvuk8gqHBziqcUQOarnuIax1z96/gJHCSIz2Z0zhxE6Rzwb3IZXPtFh51j+w==} - - '@deno/shim-deno@0.19.2': - resolution: {integrity: sha512-q3VTHl44ad8T2Tw2SpeAvghdGOjlnLPDNO2cpOxwMrBE/PVas6geWpbpIgrM+czOCH0yejp0yi8OaTuB+NU40Q==} - '@edge-runtime/primitives@6.0.0': resolution: {integrity: sha512-FqoxaBT+prPBHBwE1WXS1ocnu/VLTQyZ6NMUBAdbP7N2hsFTTxMC/jMu2D/8GAlMQfxeuppcPuCUk/HO3fpIvA==} engines: {node: '>=18'} @@ -2822,12 +2708,6 @@ packages: cpu: [ppc64] os: [aix] - '@esbuild/aix-ppc64@0.20.2': - resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==} - engines: {node: '>=12'} - cpu: [ppc64] - os: [aix] - '@esbuild/aix-ppc64@0.21.5': resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} engines: {node: '>=12'} @@ -2870,12 +2750,6 @@ packages: cpu: [arm64] os: [android] - '@esbuild/android-arm64@0.20.2': - resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==} - engines: {node: '>=12'} - cpu: [arm64] - os: [android] - '@esbuild/android-arm64@0.21.5': resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} engines: {node: '>=12'} @@ -2918,12 +2792,6 @@ packages: cpu: [arm] os: [android] - '@esbuild/android-arm@0.20.2': - resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==} - engines: {node: '>=12'} - cpu: [arm] - os: [android] - '@esbuild/android-arm@0.21.5': resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} engines: {node: '>=12'} @@ -2966,12 +2834,6 @@ packages: cpu: [x64] os: [android] - '@esbuild/android-x64@0.20.2': - resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==} - engines: {node: '>=12'} - cpu: [x64] - os: [android] - '@esbuild/android-x64@0.21.5': resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} engines: {node: '>=12'} @@ -3014,12 +2876,6 @@ packages: cpu: [arm64] os: [darwin] - '@esbuild/darwin-arm64@0.20.2': - resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==} - engines: {node: '>=12'} - cpu: [arm64] - os: [darwin] - '@esbuild/darwin-arm64@0.21.5': resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} engines: {node: '>=12'} @@ -3062,12 +2918,6 @@ packages: cpu: [x64] os: [darwin] - '@esbuild/darwin-x64@0.20.2': - resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==} - engines: {node: '>=12'} - cpu: [x64] - os: [darwin] - '@esbuild/darwin-x64@0.21.5': resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} engines: {node: '>=12'} @@ -3110,12 +2960,6 @@ packages: cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-arm64@0.20.2': - resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==} - engines: {node: '>=12'} - cpu: [arm64] - os: [freebsd] - '@esbuild/freebsd-arm64@0.21.5': resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} engines: {node: '>=12'} @@ -3158,12 +3002,6 @@ packages: cpu: [x64] os: [freebsd] - '@esbuild/freebsd-x64@0.20.2': - resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==} - engines: {node: '>=12'} - cpu: [x64] - os: [freebsd] - '@esbuild/freebsd-x64@0.21.5': resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} engines: {node: '>=12'} @@ -3206,12 +3044,6 @@ packages: cpu: [arm64] os: [linux] - '@esbuild/linux-arm64@0.20.2': - resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==} - engines: {node: '>=12'} - cpu: [arm64] - os: [linux] - '@esbuild/linux-arm64@0.21.5': resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} engines: {node: '>=12'} @@ -3254,12 +3086,6 @@ packages: cpu: [arm] os: [linux] - '@esbuild/linux-arm@0.20.2': - resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==} - engines: {node: '>=12'} - cpu: [arm] - os: [linux] - '@esbuild/linux-arm@0.21.5': resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} engines: {node: '>=12'} @@ -3302,12 +3128,6 @@ packages: cpu: [ia32] os: [linux] - '@esbuild/linux-ia32@0.20.2': - resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==} - engines: {node: '>=12'} - cpu: [ia32] - os: [linux] - '@esbuild/linux-ia32@0.21.5': resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} engines: {node: '>=12'} @@ -3350,12 +3170,6 @@ packages: cpu: [loong64] os: [linux] - '@esbuild/linux-loong64@0.20.2': - resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==} - engines: {node: '>=12'} - cpu: [loong64] - os: [linux] - '@esbuild/linux-loong64@0.21.5': resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} engines: {node: '>=12'} @@ -3398,12 +3212,6 @@ packages: cpu: [mips64el] os: [linux] - '@esbuild/linux-mips64el@0.20.2': - resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==} - engines: {node: '>=12'} - cpu: [mips64el] - os: [linux] - '@esbuild/linux-mips64el@0.21.5': resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} engines: {node: '>=12'} @@ -3446,12 +3254,6 @@ packages: cpu: [ppc64] os: [linux] - '@esbuild/linux-ppc64@0.20.2': - resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==} - engines: {node: '>=12'} - cpu: [ppc64] - os: [linux] - '@esbuild/linux-ppc64@0.21.5': resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} engines: {node: '>=12'} @@ -3494,12 +3296,6 @@ packages: cpu: [riscv64] os: [linux] - '@esbuild/linux-riscv64@0.20.2': - resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==} - engines: {node: '>=12'} - cpu: [riscv64] - os: [linux] - '@esbuild/linux-riscv64@0.21.5': resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} engines: {node: '>=12'} @@ -3542,12 +3338,6 @@ packages: cpu: [s390x] os: [linux] - '@esbuild/linux-s390x@0.20.2': - resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==} - engines: {node: '>=12'} - cpu: [s390x] - os: [linux] - '@esbuild/linux-s390x@0.21.5': resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} engines: {node: '>=12'} @@ -3590,12 +3380,6 @@ packages: cpu: [x64] os: [linux] - '@esbuild/linux-x64@0.20.2': - resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==} - engines: {node: '>=12'} - cpu: [x64] - os: [linux] - '@esbuild/linux-x64@0.21.5': resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} engines: {node: '>=12'} @@ -3644,12 +3428,6 @@ packages: cpu: [x64] os: [netbsd] - '@esbuild/netbsd-x64@0.20.2': - resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==} - engines: {node: '>=12'} - cpu: [x64] - os: [netbsd] - '@esbuild/netbsd-x64@0.21.5': resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} engines: {node: '>=12'} @@ -3716,12 +3494,6 @@ packages: cpu: [x64] os: [openbsd] - '@esbuild/openbsd-x64@0.20.2': - resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==} - engines: {node: '>=12'} - cpu: [x64] - os: [openbsd] - '@esbuild/openbsd-x64@0.21.5': resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} engines: {node: '>=12'} @@ -3764,12 +3536,6 @@ packages: cpu: [x64] os: [sunos] - '@esbuild/sunos-x64@0.20.2': - resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==} - engines: {node: '>=12'} - cpu: [x64] - os: [sunos] - '@esbuild/sunos-x64@0.21.5': resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} engines: {node: '>=12'} @@ -3812,12 +3578,6 @@ packages: cpu: [arm64] os: [win32] - '@esbuild/win32-arm64@0.20.2': - resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==} - engines: {node: '>=12'} - cpu: [arm64] - os: [win32] - '@esbuild/win32-arm64@0.21.5': resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} engines: {node: '>=12'} @@ -3860,12 +3620,6 @@ packages: cpu: [ia32] os: [win32] - '@esbuild/win32-ia32@0.20.2': - resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==} - engines: {node: '>=12'} - cpu: [ia32] - os: [win32] - '@esbuild/win32-ia32@0.21.5': resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} engines: {node: '>=12'} @@ -3908,12 +3662,6 @@ packages: cpu: [x64] os: [win32] - '@esbuild/win32-x64@0.20.2': - resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==} - engines: {node: '>=12'} - cpu: [x64] - os: [win32] - '@esbuild/win32-x64@0.21.5': resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} engines: {node: '>=12'} @@ -6143,12 +5891,6 @@ packages: cpu: [x64] os: [linux] - '@parcel/watcher-wasm@2.3.0': - resolution: {integrity: sha512-ejBAX8H0ZGsD8lSICDNyMbSEtPMWgDL0WFCt/0z7hyf5v8Imz4rAM8xY379mBsECkq/Wdqa5WEDLqtjZ+6NxfA==} - engines: {node: '>= 10.0.0'} - bundledDependencies: - - napi-wasm - '@parcel/watcher-wasm@2.4.1': resolution: {integrity: sha512-/ZR0RxqxU/xxDGzbzosMjh4W6NdYFMqq2nvo2b8SLi7rsl/4jkL8S5stIikorNkdR50oVDvqb/3JT05WM+CRRA==} engines: {node: '>= 10.0.0'} @@ -7116,39 +6858,6 @@ packages: resolution: {integrity: sha512-b+zebfKCfRdgNJDknHCob3O7FpeYQN6ZG6YLExMcasDHsCXlsXCEuiPZeLnJLpwa5dvPetGlnGCiMHuLwGvFow==} engines: {node: '>=18.0.0'} - '@solid-primitives/trigger@1.1.0': - resolution: {integrity: sha512-00BbAiXV66WwjHuKZc3wr0+GLb9C24mMUmi3JdTpNFgHBbrQGrIHubmZDg36c5/7wH+E0GQtOOanwQS063PO+A==} - peerDependencies: - solid-js: ^1.6.12 - - '@solid-primitives/utils@6.2.3': - resolution: {integrity: sha512-CqAwKb2T5Vi72+rhebSsqNZ9o67buYRdEJrIFzRXz3U59QqezuuxPsyzTSVCacwS5Pf109VRsgCJQoxKRoECZQ==} - peerDependencies: - solid-js: ^1.6.12 - - '@solidjs/meta@0.29.4': - resolution: {integrity: sha512-zdIWBGpR9zGx1p1bzIPqF5Gs+Ks/BH8R6fWhmUa/dcK1L2rUC8BAcZJzNRYBQv74kScf1TSOs0EY//Vd/I0V8g==} - peerDependencies: - solid-js: '>=1.8.4' - - '@solidjs/router@0.15.1': - resolution: {integrity: sha512-lb5BRBqQqii/1dQCglx2K68xLkgu7QcrcajWKuuEx6FHTsK/hp5IgVhjy6RzPMLj+SFyrrRi/ldirCFNxtzh0Q==} - peerDependencies: - solid-js: ^1.8.6 - - '@solidjs/start@1.0.10': - resolution: {integrity: sha512-3yg4KraSxc4rXs9dy/3kkqjDhU0JCPsZFLmDl5n6hHRPwtLLac6WUhs2k5VxGzitHaaJM/ZQCfT7i544Mf+4tw==} - - '@solidjs/testing-library@0.8.10': - resolution: {integrity: sha512-qdeuIerwyq7oQTIrrKvV0aL9aFeuwTd86VYD3afdq5HYEwoox1OBTJy4y8A3TFZr8oAR0nujYgCzY/8wgHGfeQ==} - engines: {node: '>= 14'} - peerDependencies: - '@solidjs/router': '>=0.9.0' - solid-js: '>=1.0.0' - peerDependenciesMeta: - '@solidjs/router': - optional: true - '@sveltejs/acorn-typescript@1.0.5': resolution: {integrity: sha512-IwQk4yfwLdibDlrXVE04jTZYlLnwsTT2PIOQQGNLWfjavGifnk1JD1LcZjZaBTRcxZu2FfPfNLOE04DSu9lqtQ==} peerDependencies: @@ -7295,9 +7004,6 @@ packages: '@types/body-parser@1.19.5': resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==} - '@types/braces@3.0.4': - resolution: {integrity: sha512-0WR3b8eaISjEW7RpZnclONaLFDf7buaowRHdqLp4vLj54AsSAYWfh3DRbfiYJY9XDxMgx1B4sE1Afw2PGpuHOA==} - '@types/bunyan@1.8.9': resolution: {integrity: sha512-ZqS9JGpBxVOvsawzmVt30sP++gSQMTejCkIAQ3VdadOcRE8izTyW66hufvwLeH+YEGP6Js2AW7Gz+RMyvrEbmw==} @@ -7385,9 +7091,6 @@ packages: '@types/methods@1.1.4': resolution: {integrity: sha512-ymXWVrDiCxTBE3+RIrrP533E70eA+9qu7zdWoHuOmGujkYtzf4HQF96b8nwHLqhuf4ykX61IGRIB38CC6/sImQ==} - '@types/micromatch@4.0.8': - resolution: {integrity: sha512-hAe0Hc9yJWESaUVl5NM09E9IdAk/0k5njp0gV3yEI5SkmjWuFYh9IexqX3/eTC0y+J+RMx2WIBqkfuzvTRnVmg==} - '@types/mime@1.3.5': resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} @@ -7708,25 +7411,6 @@ packages: '@opentelemetry/sdk-metrics': ^1.19.0 '@opentelemetry/sdk-trace-base': ^1.19.0 - '@vinxi/listhen@1.5.6': - resolution: {integrity: sha512-WSN1z931BtasZJlgPp704zJFnQFRg7yzSjkm3MzAWQYe4uXFXlFr1hc5Ac2zae5/HDOz5x1/zDM5Cb54vTCnWw==} - hasBin: true - - '@vinxi/plugin-directives@0.4.3': - resolution: {integrity: sha512-Ey+TRIwyk8871PKhQel8NyZ9B6N0Tvhjo1QIttTyrV0d7BfUpri5GyGygmBY7fHClSE/vqaNCCZIKpTL3NJAEg==} - peerDependencies: - vinxi: ^0.4.3 - - '@vinxi/server-components@0.4.3': - resolution: {integrity: sha512-KVEnQtb+ZlXIEKaUw4r4WZl/rqFeZqSyIRklY1wFiPw7GCJUxbXzISpsJ+HwDhYi9k4n8uZJyQyLHGkoiEiolg==} - peerDependencies: - vinxi: ^0.4.3 - - '@vinxi/server-functions@0.4.3': - resolution: {integrity: sha512-kVYrOrCMHwGvHRwpaeW2/PE7URcGtz4Rk/hIHa2xjt5PGopzzB/Y5GC8YgZjtqSRqo0ElAKsEik7UE6CXH3HXA==} - peerDependencies: - vinxi: ^0.4.3 - '@vitejs/plugin-react@4.3.3': resolution: {integrity: sha512-NooDe9GpHGqNns1i8XDERg0Vsg5SSYRhRxxyTGogUdkdNt47jal+fbuYi+Yfq6pzRCKXyoPcWisfxE6RIM3GKA==} engines: {node: ^14.18.0 || >=16.0.0} @@ -8018,11 +7702,6 @@ packages: resolution: {integrity: sha512-M0EUka6rb+QC4l9Z3T0nJEzNOO7JcoJlYMrBlyBCiFSXRyxjLKayd4TbQs2FDRWQU1h9FR7QVNHt+PEaoNL5rQ==} engines: {node: '>=0.4.0'} - acorn-typescript@1.4.13: - resolution: {integrity: sha512-xsc9Xv0xlVfwp2o7sQ+GCQ1PgbkdcpWdTzrwXxO3xDMTAywVS3oXVOcOHuRjAPkS4P9b+yc/qNF15460v+jp4Q==} - peerDependencies: - acorn: '>=8.9.0' - acorn-walk@8.3.3: resolution: {integrity: sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==} engines: {node: '>=0.4.0'} @@ -8088,9 +7767,6 @@ packages: ajv@8.17.1: resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} - ansi-align@3.0.1: - resolution: {integrity: sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==} - ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} @@ -8271,10 +7947,6 @@ packages: resolution: {integrity: sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==} engines: {node: '>=8'} - astring@1.8.6: - resolution: {integrity: sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==} - hasBin: true - async-retry@1.3.3: resolution: {integrity: sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==} @@ -8357,11 +8029,6 @@ packages: resolution: {integrity: sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - babel-plugin-jsx-dom-expressions@0.37.8: - resolution: {integrity: sha512-nVHH6g7541aaAQJAsyWHvjH7GCXZ+8tuF3Qu4y9W9aKwonRbcJL+yyMatDJLvjC54iIuGowiiZM6Rm3AVJczGg==} - peerDependencies: - '@babel/core': ^7.20.12 - babel-preset-current-node-syntax@1.1.0: resolution: {integrity: sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==} peerDependencies: @@ -8373,11 +8040,6 @@ packages: peerDependencies: '@babel/core': ^7.0.0 - babel-preset-solid@1.8.4: - resolution: {integrity: sha512-TfI09EOFHsbhVqoM+svop3zY4zOUIBlZsGU16Rgd4NsYVXw6lv2VEn7dmlpczMMQy0IeO3PFiXlMQZWutB+uAQ==} - peerDependencies: - '@babel/core': ^7.0.0 - bail@2.0.2: resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==} @@ -8440,10 +8102,6 @@ packages: bowser@2.11.0: resolution: {integrity: sha512-AlcaJBi/pqqJBIQ8U9Mcpc9i8Aqxn88Skv5d+xBX006BY5u8N3mGLHa5Lgppa7L/HfwgwLgZ6NYs+Ag6uUmJRA==} - boxen@7.1.1: - resolution: {integrity: sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==} - engines: {node: '>=14.16'} - brace-expansion@1.1.11: resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} @@ -8575,10 +8233,6 @@ packages: resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} engines: {node: '>=10'} - camelcase@7.0.1: - resolution: {integrity: sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==} - engines: {node: '>=14.16'} - caniuse-api@3.0.0: resolution: {integrity: sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==} @@ -8698,10 +8352,6 @@ packages: clear@0.1.0: resolution: {integrity: sha512-qMjRnoL+JDPJHeLePZJuao6+8orzHMGP04A8CdwCNsKhRbOnKRjefxONR7bwILT3MHecxKBjHkKL/tkZ8r4Uzw==} - cli-boxes@3.0.0: - resolution: {integrity: sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==} - engines: {node: '>=10'} - cli-cursor@3.1.0: resolution: {integrity: sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==} engines: {node: '>=8'} @@ -8979,14 +8629,6 @@ packages: resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} - crossws@0.2.4: - resolution: {integrity: sha512-DAxroI2uSOgUKLz00NX6A8U/8EE3SZHmIND+10jkVSaypvyt57J5JEOxAQOL6lQxyzi/wZbTIwssU1uy69h5Vg==} - peerDependencies: - uWebSockets.js: '*' - peerDependenciesMeta: - uWebSockets.js: - optional: true - crossws@0.3.1: resolution: {integrity: sha512-HsZgeVYaG+b5zA+9PbIPGq4+J/CJynJuearykPsXx4V/eMhyQ5EDVg3Ak2FBZtVXCiOLu/U7IiwDHTr9MA+IKw==} @@ -9055,9 +8697,6 @@ packages: resolution: {integrity: sha512-9+vem03dMXG7gDmZ62uqmRiMRNtinIZ9ZyuF6BdxzfOD+FdN5hretzynkn0ReS2DO2GSw76RWHs0UmJPI2zUjw==} engines: {node: '>=18'} - csstype@3.1.2: - resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} - csstype@3.1.3: resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} @@ -9084,9 +8723,6 @@ packages: resolution: {integrity: sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==} engines: {node: '>= 0.4'} - dax-sh@0.39.2: - resolution: {integrity: sha512-gpuGEkBQM+5y6p4cWaw9+ePy5TNon+fdwFVtTI8leU3UhwhsBfPewRxMXGuQNC+M2b/MDGMlfgpqynkcd0C3FQ==} - db0@0.2.1: resolution: {integrity: sha512-BWSFmLaCkfyqbSEZBQINMVNjCVfrogi7GQ2RSy1tmtfK9OXlsup6lUMwLsqSD7FbAjD04eWFdXowSHHUp6SE/Q==} peerDependencies: @@ -9439,9 +9075,6 @@ packages: error-stack-parser-es@0.1.5: resolution: {integrity: sha512-xHku1X40RO+fO8yJ8Wh2f2rZWVjqyhb1zgq1yZ8aZRQkv6OOKhKWRUaht3eSCUbAOBaKIgM+ykwFLE+QUxgGeg==} - error-stack-parser@2.1.4: - resolution: {integrity: sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==} - errx@0.1.0: resolution: {integrity: sha512-fZmsRiDNv07K6s2KkKFTiD2aIvECa7++PKyD5NC32tpRw46qZA3sOz+aM+/V9V0GDHxVTKLziveV4JhzBHDp9Q==} @@ -9507,11 +9140,6 @@ packages: engines: {node: '>=12'} hasBin: true - esbuild@0.20.2: - resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==} - engines: {node: '>=12'} - hasBin: true - esbuild@0.21.5: resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} engines: {node: '>=12'} @@ -10033,15 +9661,6 @@ packages: resolution: {integrity: sha512-z8hKPUjZ33VLn4HVntifqmEhmolUMopysnMNzazoDqo1GLUkBsreLNsxETlKJMPotUWStQnen6SGvUNe1j4Hlg==} engines: {node: '>=0.4.0'} - follow-redirects@1.15.3: - resolution: {integrity: sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==} - engines: {node: '>=4.0'} - peerDependencies: - debug: '*' - peerDependenciesMeta: - debug: - optional: true - follow-redirects@1.15.9: resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==} engines: {node: '>=4.0'} @@ -10344,9 +9963,6 @@ packages: resolution: {integrity: sha512-O1Ld7Dr+nqPnmGpdhzLmMTQ4vAsD+rHwMm1NLUmoUFFymBOMKxCCrtDxqdBRYXdeEPEi3SyoR4TizJLQrnKBNA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - h3@1.11.1: - resolution: {integrity: sha512-AbaH6IDnZN6nmbnJOH72y3c5Wwh9P97soSVdGSBbcDACRdkC0FEWf25pzx4f/NuOCK6quHmW18yF2Wx+G4Zi1A==} - h3@1.13.0: resolution: {integrity: sha512-vFEAu/yf8UMUcB4s43OaDaigcqpQd14yanmOsn+NcRX3/guSKncyE2rOYhq8RIchgJrPSs/QiIddnTTR1ddiAg==} @@ -10421,9 +10037,6 @@ packages: resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==} engines: {node: '>=18'} - html-entities@2.3.3: - resolution: {integrity: sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==} - html-escaper@2.0.2: resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} @@ -10431,9 +10044,6 @@ packages: resolution: {integrity: sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==} engines: {node: '>=8'} - html-to-image@1.11.11: - resolution: {integrity: sha512-9gux8QhvjRO/erSnDPv28noDZcPZmYE7e1vFsBLKLlRlKDSqNJYebj6Qz1TGd5lsRV+X+xYyjCKjuZdABinWjA==} - html-url-attributes@3.0.1: resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==} @@ -10461,10 +10071,6 @@ packages: resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} engines: {node: '>= 14'} - http-proxy@1.18.1: - resolution: {integrity: sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==} - engines: {node: '>=8.0.0'} - http-shutdown@1.2.2: resolution: {integrity: sha512-S9wWkJ/VSY9/k4qcjG318bqJNruzE4HySUhFYknwmu6LBP97KLLfwNf+n4V1BHurvFNkSKLFnK/RsuUnRTf9Vw==} engines: {iojs: '>= 1.0.0', node: '>= 0.12.0'} @@ -10883,10 +10489,6 @@ packages: isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} - isexe@3.1.1: - resolution: {integrity: sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==} - engines: {node: '>=16'} - isobject@3.0.1: resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} engines: {node: '>=0.10.0'} @@ -11614,9 +11216,6 @@ packages: resolution: {integrity: sha512-ISQTe55T2ao7XtlAStud6qwYPZjE4GK1S/BeVPus4jrq6JuOnQ00YKQC581RWhR122W7msZV263KzVeLoqidyQ==} engines: {node: '>=12'} - magicast@0.2.11: - resolution: {integrity: sha512-6saXbRDA1HMkqbsvHOU6HBjCVgZT460qheRkLhJQHWAbhXoWESI3Kn/dGGXyKs15FFKR85jsUqFx2sMK0wy/5g==} - magicast@0.3.5: resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==} @@ -11692,10 +11291,6 @@ packages: resolution: {integrity: sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==} engines: {node: '>= 4.0.0'} - merge-anything@5.1.7: - resolution: {integrity: sha512-eRtbOb1N5iyH0tkQDAoQ4Ipsp/5qSR79Dzrz8hEPxRX10RWWR/iQXdoKmBSRCThY1Fh5EhISDtpSc93fpxUniQ==} - engines: {node: '>=12.13'} - merge-descriptors@1.0.3: resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} @@ -13509,30 +13104,6 @@ packages: serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} - seroval-plugins@1.0.7: - resolution: {integrity: sha512-GO7TkWvodGp6buMEX9p7tNyIkbwlyuAWbI6G9Ec5bhcm7mQdu3JOK1IXbEUwb3FVzSc363GraG/wLW23NSavIw==} - engines: {node: '>=10'} - peerDependencies: - seroval: ^1.0 - - seroval-plugins@1.1.1: - resolution: {integrity: sha512-qNSy1+nUj7hsCOon7AO4wdAIo9P0jrzAMp18XhiOzA6/uO5TKtP7ScozVJ8T293oRIvi5wyCHSM4TrJo/c/GJA==} - engines: {node: '>=10'} - peerDependencies: - seroval: ^1.0 - - seroval@0.15.1: - resolution: {integrity: sha512-OPVtf0qmeC7RW+ScVX+7aOS+xoIM7pWcZ0jOWg2aTZigCydgRB04adfteBRbecZnnrO1WuGQ+C3tLeBBzX2zSQ==} - engines: {node: '>=10'} - - seroval@1.0.7: - resolution: {integrity: sha512-n6ZMQX5q0Vn19Zq7CIKNIo7E75gPkGCFUEqDpa8jgwpYr/vScjqnQ6H09t1uIiZ0ZSK0ypEGvrYK2bhBGWsGdw==} - engines: {node: '>=10'} - - seroval@1.1.1: - resolution: {integrity: sha512-rqEO6FZk8mv7Hyv4UCj3FD3b6Waqft605TLfsCe/BiaylRpyyMC0b+uA5TJKawX3KzMrdi3wsLbCaLplrQmBvQ==} - engines: {node: '>=10'} - serve-placeholder@2.0.2: resolution: {integrity: sha512-/TMG8SboeiQbZJWRlfTCqMs2DD3SZgWp0kDQePz9yUuCnDfDh/92gf7/PxGhzXTKBIPASIHxFcZndoNbp6QOLQ==} @@ -13587,14 +13158,6 @@ packages: shell-quote@1.8.1: resolution: {integrity: sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==} - shikiji-core@0.9.19: - resolution: {integrity: sha512-AFJu/vcNT21t0e6YrfadZ+9q86gvPum6iywRyt1OtIPjPFe25RQnYJyxHQPMLKCCWA992TPxmEmbNcOZCAJclw==} - deprecated: Deprecated, use @shikijs/core instead - - shikiji@0.9.19: - resolution: {integrity: sha512-Kw2NHWktdcdypCj1GkKpXH4o6Vxz8B8TykPlPuLHOGSV8VkhoCLcFOH4k19K4LXAQYRQmxg+0X/eM+m2sLhAkg==} - deprecated: Deprecated, use shiki instead - shimmer@1.2.1: resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} @@ -13651,33 +13214,11 @@ packages: smob@1.4.1: resolution: {integrity: sha512-9LK+E7Hv5R9u4g4C3p+jjLstaLe11MDsL21UpYaCNmapvMkYhqCV4A/f/3gyH8QjMyh6l68q9xC85vihY9ahMQ==} - solid-js@1.8.7: - resolution: {integrity: sha512-9dzrSVieh2zj3SnJ02II6xZkonR6c+j/91b7XZUNcC6xSaldlqjjGh98F1fk5cRJ8ZTkzqF5fPIWDxEOs6QZXA==} - - solid-js@1.9.3: - resolution: {integrity: sha512-5ba3taPoZGt9GY3YlsCB24kCg0Lv/rie/HTD4kG6h4daZZz7+yK02xn8Vx8dLYBc9i6Ps5JwAbEiqjmKaLB3Ag==} + sonic-boom@4.1.0: + resolution: {integrity: sha512-NGipjjRicyJJ03rPiZCJYjwlsuP2d1/5QUviozRXC7S3WdVWNK5e3Ojieb9CCyfhq2UC+3+SRd9nG3I2lPRvUw==} - solid-refresh@0.5.3: - resolution: {integrity: sha512-Otg5it5sjOdZbQZJnvo99TEBAr6J7PQ5AubZLNU6szZzg3RQQ5MX04oteBIIGDs0y2Qv8aXKm9e44V8z+UnFdw==} - peerDependencies: - solid-js: ^1.3 - - solid-refresh@0.6.3: - resolution: {integrity: sha512-F3aPsX6hVw9ttm5LYlth8Q15x6MlI/J3Dn+o3EQyRTtTxidepSTwAYdozt01/YA+7ObcciagGEyXIopGZzQtbA==} - peerDependencies: - solid-js: ^1.3 - - solid-use@0.8.0: - resolution: {integrity: sha512-YX+XmcKLvSx3bwMimMhFy40ZkDnShnUcEw6cW6fSscwKEgl1TG3GlgAvkBmQ3AeWjvQSd8+HGTr82ImsrjkkqA==} - engines: {node: '>=10'} - peerDependencies: - solid-js: ^1.7 - - sonic-boom@4.1.0: - resolution: {integrity: sha512-NGipjjRicyJJ03rPiZCJYjwlsuP2d1/5QUviozRXC7S3WdVWNK5e3Ojieb9CCyfhq2UC+3+SRd9nG3I2lPRvUw==} - - sonner@1.7.1: - resolution: {integrity: sha512-b6LHBfH32SoVasRFECrdY8p8s7hXPDn3OHUFbZZbiB1ctLS9Gdh6rpX2dVrpQA0kiL5jcRzDDldwwLkSKk3+QQ==} + sonner@1.7.1: + resolution: {integrity: sha512-b6LHBfH32SoVasRFECrdY8p8s7hXPDn3OHUFbZZbiB1ctLS9Gdh6rpX2dVrpQA0kiL5jcRzDDldwwLkSKk3+QQ==} peerDependencies: react: ^18.0.0 || ^19.0.0 || ^19.0.0-rc react-dom: ^18.0.0 || ^19.0.0 || ^19.0.0-rc @@ -13728,9 +13269,6 @@ packages: stackback@0.0.2: resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} - stackframe@1.3.4: - resolution: {integrity: sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==} - stacktrace-parser@0.1.10: resolution: {integrity: sha512-KJP1OCML99+8fhOHxwwzyWrlUuVX5GQ0ZpJTd1DFXhdkrvg1szxfHhawXUZ3g9TkXORQd4/WG68jMlQZ2p8wlg==} engines: {node: '>=6'} @@ -14056,12 +13594,6 @@ packages: resolution: {integrity: sha512-25HcdYC79g0rPxk9o7RIp3i0/ebP+viR6vj2Fsxh1a9pE6o7PfXz4HlmdYLGsQsmBeQNK88BA2UJo4IzBRfzaA==} engines: {node: '>=12'} - terracotta@1.0.5: - resolution: {integrity: sha512-4jkpXGKemeWjsBGDoBK1tnovGfIEMM8+Fa99T0TD4VYUaZq6hXHEWMfHshxy1h+DzsanDAwSBIBM0NnOohzijw==} - engines: {node: '>=10'} - peerDependencies: - solid-js: ^1.8 - terser-webpack-plugin@5.3.10: resolution: {integrity: sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==} engines: {node: '>= 10.13.0'} @@ -14446,10 +13978,6 @@ packages: resolution: {integrity: sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==} engines: {node: '>=10'} - type-fest@2.19.0: - resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} - engines: {node: '>=12.20'} - type-fest@4.26.1: resolution: {integrity: sha512-yOGpmOAL7CkKe/91I5O3gPICmJNLJ1G4zFYVAsRHg7M64biSnPtRj0WNQt++bRkjYOqjWXrhnUw1utzmVErAdg==} engines: {node: '>=16'} @@ -14529,9 +14057,6 @@ packages: undici-types@5.26.5: resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - undici-types@5.28.4: - resolution: {integrity: sha512-3OeMF5Lyowe8VW0skf5qaIE7Or3yS9LS7fvMUI0gg4YxpIBVg0L8BxCmROw2CcYhSkpR68Epz7CGc8MPj94Uww==} - undici-types@6.19.8: resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} @@ -14739,9 +14264,6 @@ packages: typescript: optional: true - validate-html-nesting@1.2.2: - resolution: {integrity: sha512-hGdgQozCsQJMyfK5urgFcWEqsSSrK63Awe0t/IMR0bZ0QMtnuaiHzThW81guu3qx9abLi99NEuiaN6P9gVYsNg==} - vary@1.1.2: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} @@ -14752,10 +14274,6 @@ packages: vfile@6.0.3: resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} - vinxi@0.4.3: - resolution: {integrity: sha512-RgJz7RWftML5h/qfPsp3QKVc2FSlvV4+HevpE0yEY2j+PS/I2ULjoSsZDXaR8Ks2WYuFFDzQr8yrox7v8aqkng==} - hasBin: true - vite-hot-client@0.2.3: resolution: {integrity: sha512-rOGAV7rUlUHX89fP2p2v0A2WWvV3QMX2UYq0fRqsWSvFvev4atHWqjwGoKaZT1VTKyLGk533ecu3eyd0o59CAg==} peerDependencies: @@ -14815,22 +14333,6 @@ packages: '@nuxt/kit': optional: true - vite-plugin-solid@2.10.2: - resolution: {integrity: sha512-AOEtwMe2baBSXMXdo+BUwECC8IFHcKS6WQV/1NEd+Q7vHPap5fmIhLcAzr+DUJ04/KHx/1UBU0l1/GWP+rMAPQ==} - peerDependencies: - '@testing-library/jest-dom': ^5.16.6 || ^5.17.0 || ^6.* - solid-js: ^1.7.2 - vite: ^3.0.0 || ^4.0.0 || ^5.0.0 - peerDependenciesMeta: - '@testing-library/jest-dom': - optional: true - - vite-plugin-solid@2.7.2: - resolution: {integrity: sha512-GV2SMLAibOoXe76i02AsjAg7sbm/0lngBlERvJKVN67HOrJsHcWgkt0R6sfGLDJuFkv2aBe14Zm4vJcNME+7zw==} - peerDependencies: - solid-js: ^1.7.2 - vite: ^3.0.0 || ^4.0.0 - vite-plugin-vue-inspector@5.1.3: resolution: {integrity: sha512-pMrseXIDP1Gb38mOevY+BvtNGNqiqmqa2pKB99lnLsADQww9w9xMbAfT4GB6RUoaOkSPrtlXqpq2Fq+Dj2AgFg==} peerDependencies: @@ -14907,14 +14409,6 @@ packages: yaml: optional: true - vitefu@0.2.5: - resolution: {integrity: sha512-SgHtMLoqaeeGnd2evZ849ZbACbnwQCIwRH57t18FxcXoZop0uQu0uzlIhJBlF/eWVzuce0sHeqPcDo+evVcg8Q==} - peerDependencies: - vite: ^3.0.0 || ^4.0.0 || ^5.0.0 - peerDependenciesMeta: - vite: - optional: true - vitefu@1.0.6: resolution: {integrity: sha512-+Rex1GlappUyNN6UfwbVZne/9cYC4+R2XDk9xkNXBKMw6HQagdX9PgZ8V2v1WUSK1wfBLp7qbI1+XSNIlB1xmA==} peerDependencies: @@ -15136,11 +14630,6 @@ packages: engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} hasBin: true - which@4.0.0: - resolution: {integrity: sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==} - engines: {node: ^16.13.0 || >=18.0.0} - hasBin: true - why-is-node-running@2.3.0: resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} engines: {node: '>=8'} @@ -15149,10 +14638,6 @@ packages: wide-align@1.1.5: resolution: {integrity: sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==} - widest-line@4.0.1: - resolution: {integrity: sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==} - engines: {node: '>=12'} - word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -15917,10 +15402,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/helper-module-imports@7.18.6': - dependencies: - '@babel/types': 7.26.0 - '@babel/helper-module-imports@7.25.9': dependencies: '@babel/traverse': 7.25.9 @@ -16164,15 +15645,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/plugin-transform-modules-commonjs@7.25.9(@babel/core@7.26.0)': - dependencies: - '@babel/core': 7.26.0 - '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) - '@babel/helper-plugin-utils': 7.25.9 - '@babel/helper-simple-access': 7.25.9 - transitivePeerDependencies: - - supports-color - '@babel/plugin-transform-nullish-coalescing-operator@7.25.9(@babel/core@7.25.2)': dependencies: '@babel/core': 7.25.2 @@ -16244,17 +15716,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/preset-typescript@7.26.0(@babel/core@7.26.0)': - dependencies: - '@babel/core': 7.26.0 - '@babel/helper-plugin-utils': 7.25.9 - '@babel/helper-validator-option': 7.25.9 - '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) - '@babel/plugin-transform-modules-commonjs': 7.25.9(@babel/core@7.26.0) - '@babel/plugin-transform-typescript': 7.25.9(@babel/core@7.26.0) - transitivePeerDependencies: - - supports-color - '@babel/register@7.25.9(@babel/core@7.25.2)': dependencies: '@babel/core': 7.25.2 @@ -16495,13 +15956,6 @@ snapshots: dependencies: postcss-selector-parser: 7.0.0 - '@deno/shim-deno-test@0.5.0': {} - - '@deno/shim-deno@0.19.2': - dependencies: - '@deno/shim-deno-test': 0.5.0 - which: 4.0.0 - '@edge-runtime/primitives@6.0.0': {} '@edge-runtime/vm@5.0.0': @@ -16516,9 +15970,6 @@ snapshots: '@esbuild/aix-ppc64@0.19.12': optional: true - '@esbuild/aix-ppc64@0.20.2': - optional: true - '@esbuild/aix-ppc64@0.21.5': optional: true @@ -16540,9 +15991,6 @@ snapshots: '@esbuild/android-arm64@0.19.12': optional: true - '@esbuild/android-arm64@0.20.2': - optional: true - '@esbuild/android-arm64@0.21.5': optional: true @@ -16564,9 +16012,6 @@ snapshots: '@esbuild/android-arm@0.19.12': optional: true - '@esbuild/android-arm@0.20.2': - optional: true - '@esbuild/android-arm@0.21.5': optional: true @@ -16588,9 +16033,6 @@ snapshots: '@esbuild/android-x64@0.19.12': optional: true - '@esbuild/android-x64@0.20.2': - optional: true - '@esbuild/android-x64@0.21.5': optional: true @@ -16612,9 +16054,6 @@ snapshots: '@esbuild/darwin-arm64@0.19.12': optional: true - '@esbuild/darwin-arm64@0.20.2': - optional: true - '@esbuild/darwin-arm64@0.21.5': optional: true @@ -16636,9 +16075,6 @@ snapshots: '@esbuild/darwin-x64@0.19.12': optional: true - '@esbuild/darwin-x64@0.20.2': - optional: true - '@esbuild/darwin-x64@0.21.5': optional: true @@ -16660,9 +16096,6 @@ snapshots: '@esbuild/freebsd-arm64@0.19.12': optional: true - '@esbuild/freebsd-arm64@0.20.2': - optional: true - '@esbuild/freebsd-arm64@0.21.5': optional: true @@ -16684,9 +16117,6 @@ snapshots: '@esbuild/freebsd-x64@0.19.12': optional: true - '@esbuild/freebsd-x64@0.20.2': - optional: true - '@esbuild/freebsd-x64@0.21.5': optional: true @@ -16708,9 +16138,6 @@ snapshots: '@esbuild/linux-arm64@0.19.12': optional: true - '@esbuild/linux-arm64@0.20.2': - optional: true - '@esbuild/linux-arm64@0.21.5': optional: true @@ -16732,9 +16159,6 @@ snapshots: '@esbuild/linux-arm@0.19.12': optional: true - '@esbuild/linux-arm@0.20.2': - optional: true - '@esbuild/linux-arm@0.21.5': optional: true @@ -16756,9 +16180,6 @@ snapshots: '@esbuild/linux-ia32@0.19.12': optional: true - '@esbuild/linux-ia32@0.20.2': - optional: true - '@esbuild/linux-ia32@0.21.5': optional: true @@ -16780,9 +16201,6 @@ snapshots: '@esbuild/linux-loong64@0.19.12': optional: true - '@esbuild/linux-loong64@0.20.2': - optional: true - '@esbuild/linux-loong64@0.21.5': optional: true @@ -16804,9 +16222,6 @@ snapshots: '@esbuild/linux-mips64el@0.19.12': optional: true - '@esbuild/linux-mips64el@0.20.2': - optional: true - '@esbuild/linux-mips64el@0.21.5': optional: true @@ -16828,9 +16243,6 @@ snapshots: '@esbuild/linux-ppc64@0.19.12': optional: true - '@esbuild/linux-ppc64@0.20.2': - optional: true - '@esbuild/linux-ppc64@0.21.5': optional: true @@ -16852,9 +16264,6 @@ snapshots: '@esbuild/linux-riscv64@0.19.12': optional: true - '@esbuild/linux-riscv64@0.20.2': - optional: true - '@esbuild/linux-riscv64@0.21.5': optional: true @@ -16876,9 +16285,6 @@ snapshots: '@esbuild/linux-s390x@0.19.12': optional: true - '@esbuild/linux-s390x@0.20.2': - optional: true - '@esbuild/linux-s390x@0.21.5': optional: true @@ -16900,9 +16306,6 @@ snapshots: '@esbuild/linux-x64@0.19.12': optional: true - '@esbuild/linux-x64@0.20.2': - optional: true - '@esbuild/linux-x64@0.21.5': optional: true @@ -16927,9 +16330,6 @@ snapshots: '@esbuild/netbsd-x64@0.19.12': optional: true - '@esbuild/netbsd-x64@0.20.2': - optional: true - '@esbuild/netbsd-x64@0.21.5': optional: true @@ -16963,9 +16363,6 @@ snapshots: '@esbuild/openbsd-x64@0.19.12': optional: true - '@esbuild/openbsd-x64@0.20.2': - optional: true - '@esbuild/openbsd-x64@0.21.5': optional: true @@ -16987,9 +16384,6 @@ snapshots: '@esbuild/sunos-x64@0.19.12': optional: true - '@esbuild/sunos-x64@0.20.2': - optional: true - '@esbuild/sunos-x64@0.21.5': optional: true @@ -17011,9 +16405,6 @@ snapshots: '@esbuild/win32-arm64@0.19.12': optional: true - '@esbuild/win32-arm64@0.20.2': - optional: true - '@esbuild/win32-arm64@0.21.5': optional: true @@ -17035,9 +16426,6 @@ snapshots: '@esbuild/win32-ia32@0.19.12': optional: true - '@esbuild/win32-ia32@0.20.2': - optional: true - '@esbuild/win32-ia32@0.21.5': optional: true @@ -17059,9 +16447,6 @@ snapshots: '@esbuild/win32-x64@0.19.12': optional: true - '@esbuild/win32-x64@0.20.2': - optional: true - '@esbuild/win32-x64@0.21.5': optional: true @@ -19872,11 +19257,6 @@ snapshots: '@parcel/watcher-linux-x64-musl@2.4.1': optional: true - '@parcel/watcher-wasm@2.3.0': - dependencies: - is-glob: 4.0.3 - micromatch: 4.0.8 - '@parcel/watcher-wasm@2.4.1': dependencies: is-glob: 4.0.3 @@ -20944,58 +20324,6 @@ snapshots: '@smithy/util-buffer-from': 4.0.0 tslib: 2.8.1 - '@solid-primitives/trigger@1.1.0(solid-js@1.8.7)': - dependencies: - '@solid-primitives/utils': 6.2.3(solid-js@1.8.7) - solid-js: 1.8.7 - - '@solid-primitives/utils@6.2.3(solid-js@1.8.7)': - dependencies: - solid-js: 1.8.7 - - '@solidjs/meta@0.29.4(solid-js@1.9.3)': - dependencies: - solid-js: 1.9.3 - - '@solidjs/router@0.15.1(solid-js@1.8.7)': - dependencies: - solid-js: 1.8.7 - optional: true - - '@solidjs/router@0.15.1(solid-js@1.9.3)': - dependencies: - solid-js: 1.9.3 - - '@solidjs/start@1.0.10(@testing-library/jest-dom@6.6.3)(solid-js@1.9.3)(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3))(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))': - dependencies: - '@vinxi/plugin-directives': 0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3)) - '@vinxi/server-components': 0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3)) - '@vinxi/server-functions': 0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3)) - defu: 6.1.4 - error-stack-parser: 2.1.4 - html-to-image: 1.11.11 - radix3: 1.1.2 - seroval: 1.0.7 - seroval-plugins: 1.0.7(seroval@1.0.7) - shikiji: 0.9.19 - source-map-js: 1.2.1 - terracotta: 1.0.5(solid-js@1.9.3) - tinyglobby: 0.2.10 - vite-plugin-solid: 2.10.2(@testing-library/jest-dom@6.6.3)(solid-js@1.9.3)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) - transitivePeerDependencies: - - '@testing-library/jest-dom' - - solid-js - - supports-color - - vinxi - - vite - - '@solidjs/testing-library@0.8.10(@solidjs/router@0.15.1(solid-js@1.8.7))(solid-js@1.8.7)': - dependencies: - '@testing-library/dom': 10.4.0 - solid-js: 1.8.7 - optionalDependencies: - '@solidjs/router': 0.15.1(solid-js@1.8.7) - '@sveltejs/acorn-typescript@1.0.5(acorn@8.14.1)': dependencies: acorn: 8.14.1 @@ -21177,8 +20505,6 @@ snapshots: '@types/connect': 3.4.38 '@types/node': 20.17.24 - '@types/braces@3.0.4': {} - '@types/bunyan@1.8.9': dependencies: '@types/node': 20.17.24 @@ -21285,10 +20611,6 @@ snapshots: '@types/methods@1.1.4': {} - '@types/micromatch@4.0.8': - dependencies: - '@types/braces': 3.0.4 - '@types/mime@1.3.5': {} '@types/ms@0.7.34': {} @@ -21771,61 +21093,6 @@ snapshots: '@opentelemetry/sdk-metrics': 1.29.0(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-base': 1.29.0(@opentelemetry/api@1.9.0) - '@vinxi/listhen@1.5.6': - dependencies: - '@parcel/watcher': 2.4.1 - '@parcel/watcher-wasm': 2.3.0 - citty: 0.1.6 - clipboardy: 4.0.0 - consola: 3.2.3 - defu: 6.1.4 - get-port-please: 3.1.2 - h3: 1.13.0 - http-shutdown: 1.2.2 - jiti: 1.21.6 - mlly: 1.7.2 - node-forge: 1.3.1 - pathe: 1.1.2 - std-env: 3.8.0 - ufo: 1.5.4 - untun: 0.1.3 - uqr: 0.1.2 - - '@vinxi/plugin-directives@0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3))': - dependencies: - '@babel/parser': 7.26.2 - acorn: 8.14.1 - acorn-jsx: 5.3.2(acorn@8.14.1) - acorn-loose: 8.4.0 - acorn-typescript: 1.4.13(acorn@8.14.1) - astring: 1.8.6 - magicast: 0.2.11 - recast: 0.23.9 - tslib: 2.8.1 - vinxi: 0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3) - - '@vinxi/server-components@0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3))': - dependencies: - '@vinxi/plugin-directives': 0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3)) - acorn: 8.14.1 - acorn-loose: 8.4.0 - acorn-typescript: 1.4.13(acorn@8.14.1) - astring: 1.8.6 - magicast: 0.2.11 - recast: 0.23.9 - vinxi: 0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3) - - '@vinxi/server-functions@0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3))': - dependencies: - '@vinxi/plugin-directives': 0.4.3(vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3)) - acorn: 8.14.1 - acorn-loose: 8.4.0 - acorn-typescript: 1.4.13(acorn@8.14.1) - astring: 1.8.6 - magicast: 0.2.11 - recast: 0.23.9 - vinxi: 0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3) - '@vitejs/plugin-react@4.3.3(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))': dependencies: '@babel/core': 7.25.2 @@ -22276,10 +21543,6 @@ snapshots: dependencies: acorn: 8.14.1 - acorn-typescript@1.4.13(acorn@8.14.1): - dependencies: - acorn: 8.14.1 - acorn-walk@8.3.3: dependencies: acorn: 8.12.1 @@ -22341,10 +21604,6 @@ snapshots: json-schema-traverse: 1.0.0 require-from-string: 2.0.2 - ansi-align@3.0.1: - dependencies: - string-width: 4.2.3 - ansi-colors@4.1.3: {} ansi-escapes@4.3.2: @@ -22553,8 +21812,6 @@ snapshots: astral-regex@2.0.0: {} - astring@1.8.6: {} - async-retry@1.3.3: dependencies: retry: 0.13.1 @@ -22664,15 +21921,6 @@ snapshots: '@types/babel__core': 7.20.5 '@types/babel__traverse': 7.20.6 - babel-plugin-jsx-dom-expressions@0.37.8(@babel/core@7.26.0): - dependencies: - '@babel/core': 7.26.0 - '@babel/helper-module-imports': 7.18.6 - '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) - '@babel/types': 7.26.0 - html-entities: 2.3.3 - validate-html-nesting: 1.2.2 - babel-preset-current-node-syntax@1.1.0(@babel/core@7.26.0): dependencies: '@babel/core': 7.26.0 @@ -22698,11 +21946,6 @@ snapshots: babel-plugin-jest-hoist: 29.6.3 babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.0) - babel-preset-solid@1.8.4(@babel/core@7.26.0): - dependencies: - '@babel/core': 7.26.0 - babel-plugin-jsx-dom-expressions: 0.37.8(@babel/core@7.26.0) - bail@2.0.2: {} balanced-match@1.0.2: {} @@ -22786,17 +22029,6 @@ snapshots: bowser@2.11.0: optional: true - boxen@7.1.1: - dependencies: - ansi-align: 3.0.1 - camelcase: 7.0.1 - chalk: 5.3.0 - cli-boxes: 3.0.0 - string-width: 5.1.2 - type-fest: 2.19.0 - widest-line: 4.0.1 - wrap-ansi: 8.1.0 - brace-expansion@1.1.11: dependencies: balanced-match: 1.0.2 @@ -22943,8 +22175,6 @@ snapshots: camelcase@6.3.0: {} - camelcase@7.0.1: {} - caniuse-api@3.0.0: dependencies: browserslist: 4.24.0 @@ -23070,8 +22300,6 @@ snapshots: clear@0.1.0: {} - cli-boxes@3.0.0: {} - cli-cursor@3.1.0: dependencies: restore-cursor: 3.1.0 @@ -23324,8 +22552,6 @@ snapshots: shebang-command: 2.0.0 which: 2.0.2 - crossws@0.2.4: {} - crossws@0.3.1: dependencies: uncrypto: 0.1.3 @@ -23419,8 +22645,6 @@ snapshots: '@asamuzakjp/css-color': 2.8.3 rrweb-cssom: 0.8.0 - csstype@3.1.2: {} - csstype@3.1.3: {} cycled@1.2.0: {} @@ -23450,11 +22674,6 @@ snapshots: es-errors: 1.3.0 is-data-view: 1.0.1 - dax-sh@0.39.2: - dependencies: - '@deno/shim-deno': 0.19.2 - undici-types: 5.28.4 - db0@0.2.1: {} debug@2.6.9: @@ -23721,10 +22940,6 @@ snapshots: error-stack-parser-es@0.1.5: {} - error-stack-parser@2.1.4: - dependencies: - stackframe: 1.3.4 - errx@0.1.0: {} es-abstract@1.23.3: @@ -23899,32 +23114,6 @@ snapshots: '@esbuild/win32-ia32': 0.19.12 '@esbuild/win32-x64': 0.19.12 - esbuild@0.20.2: - optionalDependencies: - '@esbuild/aix-ppc64': 0.20.2 - '@esbuild/android-arm': 0.20.2 - '@esbuild/android-arm64': 0.20.2 - '@esbuild/android-x64': 0.20.2 - '@esbuild/darwin-arm64': 0.20.2 - '@esbuild/darwin-x64': 0.20.2 - '@esbuild/freebsd-arm64': 0.20.2 - '@esbuild/freebsd-x64': 0.20.2 - '@esbuild/linux-arm': 0.20.2 - '@esbuild/linux-arm64': 0.20.2 - '@esbuild/linux-ia32': 0.20.2 - '@esbuild/linux-loong64': 0.20.2 - '@esbuild/linux-mips64el': 0.20.2 - '@esbuild/linux-ppc64': 0.20.2 - '@esbuild/linux-riscv64': 0.20.2 - '@esbuild/linux-s390x': 0.20.2 - '@esbuild/linux-x64': 0.20.2 - '@esbuild/netbsd-x64': 0.20.2 - '@esbuild/openbsd-x64': 0.20.2 - '@esbuild/sunos-x64': 0.20.2 - '@esbuild/win32-arm64': 0.20.2 - '@esbuild/win32-ia32': 0.20.2 - '@esbuild/win32-x64': 0.20.2 - esbuild@0.21.5: optionalDependencies: '@esbuild/aix-ppc64': 0.21.5 @@ -24107,7 +23296,7 @@ snapshots: eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.9.0(eslint@8.57.1) eslint-plugin-react: 7.35.0(eslint@8.57.1) eslint-plugin-react-hooks: 4.6.2(eslint@8.57.1) @@ -24182,8 +23371,8 @@ snapshots: debug: 4.4.0(supports-color@9.4.0) enhanced-resolve: 5.17.1 eslint: 8.57.1 - eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) fast-glob: 3.3.2 get-tsconfig: 4.7.2 is-core-module: 2.13.1 @@ -24222,7 +23411,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): + eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: @@ -24255,7 +23444,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): + eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: @@ -24304,7 +23493,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): + eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 @@ -24314,7 +23503,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.15.0 is-glob: 4.0.3 @@ -25000,8 +24189,6 @@ snapshots: flow-parser@0.252.0: {} - follow-redirects@1.15.3: {} - follow-redirects@1.15.9: {} for-each@0.3.3: @@ -25386,21 +24573,6 @@ snapshots: dependencies: duplexer: 0.1.2 - h3@1.11.1: - dependencies: - cookie-es: 1.2.2 - crossws: 0.2.4 - defu: 6.1.4 - destr: 2.0.3 - iron-webcrypto: 1.2.1 - ohash: 1.1.4 - radix3: 1.1.2 - ufo: 1.5.4 - uncrypto: 0.1.3 - unenv: 1.10.0 - transitivePeerDependencies: - - uWebSockets.js - h3@1.13.0: dependencies: cookie-es: 1.2.2 @@ -25484,14 +24656,10 @@ snapshots: dependencies: whatwg-encoding: 3.1.1 - html-entities@2.3.3: {} - html-escaper@2.0.2: {} html-tags@3.3.1: {} - html-to-image@1.11.11: {} - html-url-attributes@3.0.1: {} http-assert@1.5.0: @@ -25536,14 +24704,6 @@ snapshots: transitivePeerDependencies: - supports-color - http-proxy@1.18.1: - dependencies: - eventemitter3: 4.0.7 - follow-redirects: 1.15.3 - requires-port: 1.0.0 - transitivePeerDependencies: - - debug - http-shutdown@1.2.2: {} https-proxy-agent@5.0.1: @@ -25955,8 +25115,6 @@ snapshots: isexe@2.0.0: {} - isexe@3.1.1: {} - isobject@3.0.1: {} istanbul-lib-coverage@3.2.2: {} @@ -26949,12 +26107,6 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.0 - magicast@0.2.11: - dependencies: - '@babel/parser': 7.26.2 - '@babel/types': 7.26.0 - recast: 0.23.9 - magicast@0.3.5: dependencies: '@babel/parser': 7.26.2 @@ -27101,10 +26253,6 @@ snapshots: dependencies: fs-monkey: 1.0.6 - merge-anything@5.1.7: - dependencies: - is-what: 4.1.16 - merge-descriptors@1.0.3: {} merge-descriptors@2.0.0: {} @@ -28489,14 +27637,6 @@ snapshots: postcss: 8.4.49 ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.6.3) - postcss-load-config@4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@22.7.4)(typescript@5.6.3)): - dependencies: - lilconfig: 3.1.3 - yaml: 2.7.0 - optionalDependencies: - postcss: 8.4.49 - ts-node: 10.9.2(@types/node@22.7.4)(typescript@5.6.3) - postcss-load-config@4.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)): dependencies: lilconfig: 3.1.3 @@ -29387,20 +28527,6 @@ snapshots: dependencies: randombytes: 2.1.0 - seroval-plugins@1.0.7(seroval@1.0.7): - dependencies: - seroval: 1.0.7 - - seroval-plugins@1.1.1(seroval@1.1.1): - dependencies: - seroval: 1.1.1 - - seroval@0.15.1: {} - - seroval@1.0.7: {} - - seroval@1.1.1: {} - serve-placeholder@2.0.2: dependencies: defu: 6.1.4 @@ -29491,12 +28617,6 @@ snapshots: shell-quote@1.8.1: {} - shikiji-core@0.9.19: {} - - shikiji@0.9.19: - dependencies: - shikiji-core: 0.9.19 - shimmer@1.2.1: {} side-channel@1.0.4: @@ -29560,39 +28680,6 @@ snapshots: smob@1.4.1: {} - solid-js@1.8.7: - dependencies: - csstype: 3.1.2 - seroval: 0.15.1 - - solid-js@1.9.3: - dependencies: - csstype: 3.1.3 - seroval: 1.1.1 - seroval-plugins: 1.1.1(seroval@1.1.1) - - solid-refresh@0.5.3(solid-js@1.8.7): - dependencies: - '@babel/generator': 7.26.2 - '@babel/helper-module-imports': 7.25.9 - '@babel/types': 7.26.0 - solid-js: 1.8.7 - transitivePeerDependencies: - - supports-color - - solid-refresh@0.6.3(solid-js@1.9.3): - dependencies: - '@babel/generator': 7.26.2 - '@babel/helper-module-imports': 7.25.9 - '@babel/types': 7.26.0 - solid-js: 1.9.3 - transitivePeerDependencies: - - supports-color - - solid-use@0.8.0(solid-js@1.9.3): - dependencies: - solid-js: 1.9.3 - sonic-boom@4.1.0: dependencies: atomic-sleep: 1.0.0 @@ -29641,8 +28728,6 @@ snapshots: stackback@0.0.2: {} - stackframe@1.3.4: {} - stacktrace-parser@0.1.10: dependencies: type-fest: 0.7.1 @@ -30054,33 +29139,6 @@ snapshots: transitivePeerDependencies: - ts-node - tailwindcss@3.4.15(ts-node@10.9.2(@types/node@22.7.4)(typescript@5.6.3)): - dependencies: - '@alloc/quick-lru': 5.2.0 - arg: 5.0.2 - chokidar: 3.6.0 - didyoumean: 1.2.2 - dlv: 1.1.3 - fast-glob: 3.3.2 - glob-parent: 6.0.2 - is-glob: 4.0.3 - jiti: 1.21.6 - lilconfig: 2.1.0 - micromatch: 4.0.8 - normalize-path: 3.0.0 - object-hash: 3.0.0 - picocolors: 1.1.1 - postcss: 8.4.49 - postcss-import: 15.1.0(postcss@8.4.49) - postcss-js: 4.0.1(postcss@8.4.49) - postcss-load-config: 4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@22.7.4)(typescript@5.6.3)) - postcss-nested: 6.2.0(postcss@8.4.49) - postcss-selector-parser: 6.1.2 - resolve: 1.22.8 - sucrase: 3.35.0 - transitivePeerDependencies: - - ts-node - tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.7.4)(typescript@5.6.3)): dependencies: '@alloc/quick-lru': 5.2.0 @@ -30151,11 +29209,6 @@ snapshots: transitivePeerDependencies: - debug - terracotta@1.0.5(solid-js@1.9.3): - dependencies: - solid-js: 1.9.3 - solid-use: 0.8.0(solid-js@1.9.3) - terser-webpack-plugin@5.3.10(esbuild@0.18.20)(webpack@5.96.1(esbuild@0.18.20)): dependencies: '@jridgewell/trace-mapping': 0.3.25 @@ -30600,8 +29653,6 @@ snapshots: type-fest@1.4.0: {} - type-fest@2.19.0: {} - type-fest@4.26.1: {} type-is@1.6.18: @@ -30693,8 +29744,6 @@ snapshots: undici-types@5.26.5: {} - undici-types@5.28.4: {} - undici-types@6.19.8: {} undici@5.28.4: @@ -30954,8 +30003,6 @@ snapshots: optionalDependencies: typescript: 5.6.3 - validate-html-nesting@1.2.2: {} - vary@1.1.2: {} vfile-message@4.0.2: @@ -30968,76 +30015,6 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.2 - vinxi@0.4.3(@types/node@22.7.4)(@upstash/redis@1.34.3)(ioredis@5.4.1)(terser@5.31.3)(typescript@5.6.3): - dependencies: - '@babel/core': 7.26.0 - '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) - '@babel/plugin-syntax-typescript': 7.25.9(@babel/core@7.26.0) - '@types/micromatch': 4.0.8 - '@vinxi/listhen': 1.5.6 - boxen: 7.1.1 - chokidar: 3.6.0 - citty: 0.1.6 - consola: 3.2.3 - crossws: 0.2.4 - dax-sh: 0.39.2 - defu: 6.1.4 - es-module-lexer: 1.5.4 - esbuild: 0.20.2 - fast-glob: 3.3.2 - get-port-please: 3.1.2 - h3: 1.11.1 - hookable: 5.5.3 - http-proxy: 1.18.1 - micromatch: 4.0.8 - nitropack: 2.10.4(@upstash/redis@1.34.3)(typescript@5.6.3) - node-fetch-native: 1.6.4 - path-to-regexp: 6.3.0 - pathe: 1.1.2 - radix3: 1.1.2 - resolve: 1.22.8 - serve-placeholder: 2.0.2 - serve-static: 1.16.2 - ufo: 1.5.4 - unctx: 2.3.1 - unenv: 1.10.0 - unstorage: 1.13.1(@upstash/redis@1.34.3)(ioredis@5.4.1) - vite: 5.4.11(@types/node@22.7.4)(terser@5.31.3) - zod: 3.23.8 - transitivePeerDependencies: - - '@azure/app-configuration' - - '@azure/cosmos' - - '@azure/data-tables' - - '@azure/identity' - - '@azure/keyvault-secrets' - - '@azure/storage-blob' - - '@capacitor/preferences' - - '@electric-sql/pglite' - - '@libsql/client' - - '@netlify/blobs' - - '@planetscale/database' - - '@types/node' - - '@upstash/redis' - - '@vercel/kv' - - better-sqlite3 - - debug - - drizzle-orm - - encoding - - idb-keyval - - ioredis - - less - - lightningcss - - mysql2 - - sass - - sass-embedded - - stylus - - sugarss - - supports-color - - terser - - typescript - - uWebSockets.js - - xml2js - vite-hot-client@0.2.3(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): dependencies: vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) @@ -31137,34 +30114,6 @@ snapshots: - rollup - supports-color - vite-plugin-solid@2.10.2(@testing-library/jest-dom@6.6.3)(solid-js@1.9.3)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)): - dependencies: - '@babel/core': 7.26.0 - '@types/babel__core': 7.20.5 - babel-preset-solid: 1.8.4(@babel/core@7.26.0) - merge-anything: 5.1.7 - solid-js: 1.9.3 - solid-refresh: 0.6.3(solid-js@1.9.3) - vite: 6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) - vitefu: 0.2.5(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) - optionalDependencies: - '@testing-library/jest-dom': 6.6.3 - transitivePeerDependencies: - - supports-color - - vite-plugin-solid@2.7.2(solid-js@1.8.7): - dependencies: - '@babel/core': 7.26.0 - '@babel/preset-typescript': 7.26.0(@babel/core@7.26.0) - '@types/babel__core': 7.20.5 - babel-preset-solid: 1.8.4(@babel/core@7.26.0) - merge-anything: 5.1.7 - solid-js: 1.8.7 - solid-refresh: 0.5.3(solid-js@1.8.7) - vitefu: 0.2.5(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) - transitivePeerDependencies: - - supports-color - vite-plugin-vue-inspector@5.1.3(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): dependencies: '@babel/core': 7.26.0 @@ -31226,10 +30175,6 @@ snapshots: tsx: 4.19.2 yaml: 2.7.0 - vitefu@0.2.5(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)): - optionalDependencies: - vite: 6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) - vitefu@1.0.6(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)): optionalDependencies: vite: 6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) @@ -31612,10 +30557,6 @@ snapshots: dependencies: isexe: 2.0.0 - which@4.0.0: - dependencies: - isexe: 3.1.1 - why-is-node-running@2.3.0: dependencies: siginfo: 2.0.0 @@ -31625,10 +30566,6 @@ snapshots: dependencies: string-width: 4.2.3 - widest-line@4.0.1: - dependencies: - string-width: 5.1.2 - word-wrap@1.2.5: {} wrap-ansi@6.2.0: From 31b9047102efede13d1fedc3c0c0f66c500d116e Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 2 Apr 2025 17:31:13 +0200 Subject: [PATCH 0002/1307] chore (provider-utils): remove json and streaming test server (#5510) --- packages/provider-utils/src/test/index.ts | 2 - .../src/test/json-test-server.ts | 72 ---------------- .../src/test/streaming-test-server.ts | 83 ------------------- 3 files changed, 157 deletions(-) delete mode 100644 packages/provider-utils/src/test/json-test-server.ts delete mode 100644 packages/provider-utils/src/test/streaming-test-server.ts diff --git a/packages/provider-utils/src/test/index.ts b/packages/provider-utils/src/test/index.ts index e6e50179195b..4fbea1b76ffc 100644 --- a/packages/provider-utils/src/test/index.ts +++ b/packages/provider-utils/src/test/index.ts @@ -3,8 +3,6 @@ export * from './convert-array-to-readable-stream'; export * from './convert-async-iterable-to-array'; export * from './convert-readable-stream-to-array'; export * from './convert-response-stream-to-array'; -export * from './json-test-server'; export * from './mock-id'; -export * from './streaming-test-server'; export * from './test-server'; export * from './unified-test-server'; diff --git a/packages/provider-utils/src/test/json-test-server.ts b/packages/provider-utils/src/test/json-test-server.ts deleted file mode 100644 index 2916961f6821..000000000000 --- a/packages/provider-utils/src/test/json-test-server.ts +++ /dev/null @@ -1,72 +0,0 @@ -import { HttpResponse, http } from 'msw'; -import { SetupServer, setupServer } from 'msw/node'; - -/** - * @deprecated Use createTestServer instead - */ -export class JsonTestServer { - readonly server: SetupServer; - - responseHeaders: Record = {}; - responseBodyJson: any = {}; - - request: Request | undefined; - - /** - * @deprecated Use createTestServer instead - */ - constructor(url: string) { - const responseBodyJson = () => this.responseBodyJson; - - this.server = setupServer( - http.post(url, ({ request }) => { - this.request = request; - - return HttpResponse.json(responseBodyJson(), { - headers: { - 'Content-Type': 'application/json', - ...this.responseHeaders, - }, - }); - }), - ); - } - - async getRequestBodyJson() { - expect(this.request).toBeDefined(); - return JSON.parse(await this.request!.text()); - } - - async getRequestHeaders() { - expect(this.request).toBeDefined(); - const requestHeaders = this.request!.headers; - - // convert headers to object for easier comparison - const headersObject: Record = {}; - requestHeaders.forEach((value, key) => { - headersObject[key] = value; - }); - - return headersObject; - } - - async getRequestUrlSearchParams() { - expect(this.request).toBeDefined(); - return new URL(this.request!.url).searchParams; - } - - async getRequestUrl() { - expect(this.request).toBeDefined(); - return new URL(this.request!.url).toString(); - } - - setupTestEnvironment() { - beforeAll(() => this.server.listen()); - beforeEach(() => { - this.responseBodyJson = {}; - this.request = undefined; - }); - afterEach(() => this.server.resetHandlers()); - afterAll(() => this.server.close()); - } -} diff --git a/packages/provider-utils/src/test/streaming-test-server.ts b/packages/provider-utils/src/test/streaming-test-server.ts deleted file mode 100644 index 99e24ce79630..000000000000 --- a/packages/provider-utils/src/test/streaming-test-server.ts +++ /dev/null @@ -1,83 +0,0 @@ -import { HttpResponse, http } from 'msw'; -import { SetupServer, setupServer } from 'msw/node'; - -/** - * @deprecated Use createTestServer instead - */ -export class StreamingTestServer { - readonly server: SetupServer; - - responseHeaders: Record = {}; - responseChunks: any[] = []; - - request: Request | undefined; - - /** - * @deprecated Use createTestServer instead - */ - constructor(url: string) { - const responseChunks = () => this.responseChunks; - - this.server = setupServer( - http.post(url, ({ request }) => { - this.request = request; - - const encoder = new TextEncoder(); - const stream = new ReadableStream({ - async start(controller) { - try { - for (const chunk of responseChunks()) { - controller.enqueue(encoder.encode(chunk)); - } - } finally { - controller.close(); - } - }, - }); - - return new HttpResponse(stream, { - status: 200, - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - ...this.responseHeaders, - }, - }); - }), - ); - } - - async getRequestBodyJson() { - expect(this.request).toBeDefined(); - return JSON.parse(await this.request!.text()); - } - - async getRequestHeaders() { - expect(this.request).toBeDefined(); - const requestHeaders = this.request!.headers; - - // convert headers to object for easier comparison - const headersObject: Record = {}; - requestHeaders.forEach((value, key) => { - headersObject[key] = value; - }); - - return headersObject; - } - - async getRequestUrlSearchParams() { - expect(this.request).toBeDefined(); - return new URL(this.request!.url).searchParams; - } - - setupTestEnvironment() { - beforeAll(() => this.server.listen()); - beforeEach(() => { - this.responseChunks = []; - this.request = undefined; - }); - afterEach(() => this.server.resetHandlers()); - afterAll(() => this.server.close()); - } -} From f8794558765be87afe84bdf027efb17bb9a6b504 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 2 Apr 2025 17:44:24 +0200 Subject: [PATCH 0003/1307] chore (ci): enable ci on v5 branch (#5513) --- .github/workflows/ci.yml | 4 ++-- .github/workflows/quality.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7de754472594..163960165d9b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,9 +2,9 @@ name: CI on: push: - branches: [main] + branches: [main, v5] pull_request: - branches: [main] + branches: [main, v5] jobs: test: diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 42a185321759..6214494aba89 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -2,9 +2,9 @@ name: Quality on: push: - branches: [main] + branches: [main, v5] pull_request: - branches: [main] + branches: [main, v5] jobs: prettier: From c0ae429f1d9608ace90bc2d3871877e5b71b2064 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 2 Apr 2025 18:35:28 +0200 Subject: [PATCH 0004/1307] chore (ui): remove useAssistant (#5512) --- .../01-next/120-stream-assistant-response.mdx | 92 ----- ...1-stream-assistant-response-with-tools.mdx | 149 -------- .../20-rsc/120-stream-assistant-response.mdx | 218 ------------ ...1-stream-assistant-response-with-tools.mdx | 287 ---------------- .../01-navigating-the-library.mdx | 1 - content/docs/02-guides/20-sonnet-3-7.mdx | 2 +- content/docs/02-guides/21-llama-3_1.mdx | 2 +- content/docs/02-guides/22-gpt-4-5.mdx | 2 +- content/docs/02-guides/23-o1.mdx | 2 +- content/docs/02-guides/24-o3.mdx | 2 +- content/docs/02-guides/25-r1.mdx | 2 +- content/docs/04-ai-sdk-ui/01-overview.mdx | 4 +- .../04-ai-sdk-ui/10-openai-assistants.mdx | 212 ------------ .../docs/04-ai-sdk-ui/21-error-handling.mdx | 2 +- content/docs/04-ai-sdk-ui/index.mdx | 5 - .../02-ai-sdk-ui/20-use-assistant.mdx | 159 --------- .../02-ai-sdk-ui/21-assistant-response.mdx | 72 ---- .../docs/07-reference/02-ai-sdk-ui/index.mdx | 18 - .../app/api/assistant-tools/route.ts | 71 ---- .../app/api/assistant/route.ts | 35 -- .../index.tsx | 31 -- .../stream-assistant-response/index.tsx | 31 -- .../stream-assistant-switch-threads/index.tsx | 82 ----- .../app/api/assistant/assistant-setup.md | 63 ---- .../next-openai/app/api/assistant/route.ts | 126 ------- examples/next-openai/app/assistant/page.tsx | 87 ----- .../actions.tsx | 161 --------- .../ai.ts | 10 - .../function.ts | 45 --- .../layout.tsx | 6 - .../message.tsx | 9 - .../page.tsx | 65 ---- .../stream-assistant-responses/actions.tsx | 95 ------ .../app/stream-assistant-responses/ai.ts | 10 - .../stream-assistant-responses/functions.tsx | 43 --- .../app/stream-assistant-responses/layout.tsx | 6 - .../stream-assistant-responses/message.tsx | 9 - .../app/stream-assistant-responses/page.tsx | 64 ---- .../nuxt-openai/pages/assistant/index.vue | 83 ----- examples/nuxt-openai/server/api/assistant.ts | 130 ------- packages/ai/core/index.ts | 6 - packages/ai/react/index.ts | 6 - packages/ai/streams/assistant-response.ts | 152 --------- packages/ai/streams/index.ts | 1 - packages/react/README.md | 2 +- packages/react/src/index.ts | 1 - packages/react/src/use-assistant.ts | 285 ---------------- packages/react/src/use-assistant.ui.test.tsx | 320 ------------------ .../src/assistant-stream-parts.test.ts | 20 -- .../ui-utils/src/assistant-stream-parts.ts | 220 ------------ packages/ui-utils/src/index.ts | 9 - .../src/process-assistant-stream.test.ts | 305 ----------------- .../ui-utils/src/process-assistant-stream.ts | 108 ------ packages/ui-utils/src/types.ts | 26 -- packages/ui-utils/src/use-assistant-types.ts | 46 --- packages/vue/README.md | 1 - .../src/TestChatAssistantStreamComponent.vue | 25 -- ...TestChatAssistantThreadChangeComponent.vue | 31 -- packages/vue/src/index.ts | 1 - packages/vue/src/use-assistant.ts | 286 ---------------- packages/vue/src/use-assistant.ui.test.tsx | 243 ------------- 61 files changed, 9 insertions(+), 4578 deletions(-) delete mode 100644 content/cookbook/01-next/120-stream-assistant-response.mdx delete mode 100644 content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx delete mode 100644 content/cookbook/20-rsc/120-stream-assistant-response.mdx delete mode 100644 content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx delete mode 100644 content/docs/04-ai-sdk-ui/10-openai-assistants.mdx delete mode 100644 content/docs/07-reference/02-ai-sdk-ui/20-use-assistant.mdx delete mode 100644 content/docs/07-reference/02-ai-sdk-ui/21-assistant-response.mdx delete mode 100644 examples/next-openai-pages/app/api/assistant-tools/route.ts delete mode 100644 examples/next-openai-pages/app/api/assistant/route.ts delete mode 100644 examples/next-openai-pages/pages/assistants/stream-assistant-response-with-tools/index.tsx delete mode 100644 examples/next-openai-pages/pages/assistants/stream-assistant-response/index.tsx delete mode 100644 examples/next-openai-pages/pages/assistants/stream-assistant-switch-threads/index.tsx delete mode 100644 examples/next-openai/app/api/assistant/assistant-setup.md delete mode 100644 examples/next-openai/app/api/assistant/route.ts delete mode 100644 examples/next-openai/app/assistant/page.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses-with-tools/actions.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses-with-tools/ai.ts delete mode 100644 examples/next-openai/app/stream-assistant-responses-with-tools/function.ts delete mode 100644 examples/next-openai/app/stream-assistant-responses-with-tools/layout.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses-with-tools/message.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses-with-tools/page.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses/actions.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses/ai.ts delete mode 100644 examples/next-openai/app/stream-assistant-responses/functions.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses/layout.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses/message.tsx delete mode 100644 examples/next-openai/app/stream-assistant-responses/page.tsx delete mode 100644 examples/nuxt-openai/pages/assistant/index.vue delete mode 100644 examples/nuxt-openai/server/api/assistant.ts delete mode 100644 packages/ai/streams/assistant-response.ts delete mode 100644 packages/react/src/use-assistant.ts delete mode 100644 packages/react/src/use-assistant.ui.test.tsx delete mode 100644 packages/ui-utils/src/assistant-stream-parts.test.ts delete mode 100644 packages/ui-utils/src/assistant-stream-parts.ts delete mode 100644 packages/ui-utils/src/process-assistant-stream.test.ts delete mode 100644 packages/ui-utils/src/process-assistant-stream.ts delete mode 100644 packages/ui-utils/src/use-assistant-types.ts delete mode 100644 packages/vue/src/TestChatAssistantStreamComponent.vue delete mode 100644 packages/vue/src/TestChatAssistantThreadChangeComponent.vue delete mode 100644 packages/vue/src/use-assistant.ts delete mode 100644 packages/vue/src/use-assistant.ui.test.tsx diff --git a/content/cookbook/01-next/120-stream-assistant-response.mdx b/content/cookbook/01-next/120-stream-assistant-response.mdx deleted file mode 100644 index 9dc2613d8e3e..000000000000 --- a/content/cookbook/01-next/120-stream-assistant-response.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Stream Assistant Response -description: Learn how to stream OpenAI Assistant's response using the AI SDK and Next.js -tags: ['next', 'streaming', 'assistant'] ---- - -# Stream Assistant Response - -## Client - -Let's create a simple chat interface that allows users to send messages to the assistant and receive responses. You will integrate the `useAssistant` hook from `@ai-sdk/react` to stream the messages and status. - -```tsx filename='app/page.tsx' -'use client'; - -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Page() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant' }); - - return ( -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
- ); -} -``` - -## Server - -Next, you will create an API route for `api/assistant` to handle the assistant's messages and responses. You will use the `AssistantResponse` function from `ai` to stream the assistant's responses back to the `useAssistant` hook on the client. - -```tsx filename='app/api/assistant/route.ts' -import OpenAI from 'openai'; -import { AssistantResponse } from 'ai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export async function POST(req: Request) { - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream }) => { - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID environment is not set'); - })(), - }); - - await forwardStream(runStream); - }, - ); -} -``` - ---- - - diff --git a/content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx b/content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx deleted file mode 100644 index a0134cc9195a..000000000000 --- a/content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: Stream Assistant Response with Tools -description: Learn how to stream OpenAI Assistant's response using the AI SDK and Next.js -tags: ['next', 'streaming', 'assistant'] ---- - -# Stream Assistant Response with Tools - -Let's create a simple chat interface that allows users to send messages to the assistant and receive responses and give it the ability to use tools. You will integrate the `useAssistant` hook from `@ai-sdk/react` to stream the messages and status. - -You will need to provide the list of tools on the OpenAI [Assistant Dashboard](https://platform.openai.com/assistants). You can use the following schema to create a tool to convert celsius to fahrenheit. - -```json -{ - "name": "celsiusToFahrenheit", - "description": "convert celsius to fahrenheit.", - "parameters": { - "type": "object", - "properties": { - "value": { - "type": "number", - "description": "the value in celsius." - } - }, - "required": ["value"] - } -} -``` - -## Client - -Let's create a simple chat interface that allows users to send messages to the assistant and receive responses. You will integrate the `useAssistant` hook from `@ai-sdk/react` to stream the messages and status. - -```tsx filename='app/page.tsx' -'use client'; - -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Page() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant' }); - - return ( -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
- ); -} -``` - -## Server - -Next, you will create an API route for `api/assistant` to handle the assistant's messages and responses. You will use the `AssistantResponse` function from `ai` to stream the assistant's responses back to the `useAssistant` hook on the client. - -```tsx filename='app/api/assistant/route.ts' -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export async function POST(req: Request) { - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream }) => { - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID is not set'); - })(), - }); - - let runResult = await forwardStream(runStream); - - while ( - runResult?.status === 'requires_action' && - runResult.required_action?.type === 'submit_tool_outputs' - ) { - const tool_outputs = - runResult.required_action.submit_tool_outputs.tool_calls.map( - (toolCall: any) => { - const parameters = JSON.parse(toolCall.function.arguments); - - switch (toolCall.function.name) { - case 'celsiusToFahrenheit': - const celsius = parseFloat(parameters.value); - const fahrenheit = celsius * (9 / 5) + 32; - - return { - tool_call_id: toolCall.id, - output: `${celsius}°C is ${fahrenheit.toFixed(2)}°F`, - }; - - default: - throw new Error( - `Unknown tool call function: ${toolCall.function.name}`, - ); - } - }, - ); - - runResult = await forwardStream( - openai.beta.threads.runs.submitToolOutputsStream( - threadId, - runResult.id, - { tool_outputs }, - ), - ); - } - }, - ); -} -``` - ---- - - diff --git a/content/cookbook/20-rsc/120-stream-assistant-response.mdx b/content/cookbook/20-rsc/120-stream-assistant-response.mdx deleted file mode 100644 index b686df19c591..000000000000 --- a/content/cookbook/20-rsc/120-stream-assistant-response.mdx +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Stream Assistant Response -description: Learn how to generate text using the AI SDK and React Server Components. -tags: ['rsc', 'streaming', 'assistant'] ---- - -# Stream Assistant Responses - -In this example, you'll learn how to stream responses from OpenAI's [Assistant API](https://platform.openai.com/docs/assistants/overview) using `ai/rsc`. - -## Client - -In your client component, you will create a simple chat interface that allows users to send messages to the assistant and receive responses. The assistant's responses will be streamed in two parts: the status of the current run and the text content of the messages. - -```tsx filename='app/page.tsx' -'use client'; - -import { useState } from 'react'; -import { ClientMessage } from './actions'; -import { useActions } from 'ai/rsc'; - -export default function Home() { - const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); - const { submitMessage } = useActions(); - - const handleSubmission = async () => { - setMessages(currentMessages => [ - ...currentMessages, - { - id: '123', - status: 'user.message.created', - text: input, - gui: null, - }, - ]); - - const response = await submitMessage(input); - setMessages(currentMessages => [...currentMessages, response]); - setInput(''); - }; - - return ( -
-
- setInput(event.target.value)} - placeholder="Ask a question" - onKeyDown={event => { - if (event.key === 'Enter') { - handleSubmission(); - } - }} - /> - -
- -
-
- {messages.map(message => ( -
-
-
{message.status}
-
-
{message.text}
-
- ))} -
-
-
- ); -} -``` - -```tsx filename='app/message.tsx' -'use client'; - -import { StreamableValue, useStreamableValue } from 'ai/rsc'; - -export function Message({ textStream }: { textStream: StreamableValue }) { - const [text] = useStreamableValue(textStream); - - return
{text}
; -} -``` - -## Server - -In your server action, you will create a function called `submitMessage` that adds the user's message to the thread. The function will create a new thread if one does not exist and add the user's message to the thread. If a thread already exists, the function will add the user's message to the existing thread. The function will then create a run and stream the assistant's response to the client. Furthermore, the run queue is used to manage multiple runs in the same thread during the lifetime of the server action. - -```tsx filename='app/actions.tsx' -'use server'; - -import { generateId } from 'ai'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; -import { OpenAI } from 'openai'; -import { ReactNode } from 'react'; -import { Message } from './message'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY, -}); - -export interface ClientMessage { - id: string; - status: ReactNode; - text: ReactNode; -} - -const ASSISTANT_ID = 'asst_xxxx'; -let THREAD_ID = ''; -let RUN_ID = ''; - -export async function submitMessage(question: string): Promise { - const statusUIStream = createStreamableUI('thread.init'); - - const textStream = createStreamableValue(''); - const textUIStream = createStreamableUI( - , - ); - - const runQueue = []; - - (async () => { - if (THREAD_ID) { - await openai.beta.threads.messages.create(THREAD_ID, { - role: 'user', - content: question, - }); - - const run = await openai.beta.threads.runs.create(THREAD_ID, { - assistant_id: ASSISTANT_ID, - stream: true, - }); - - runQueue.push({ id: generateId(), run }); - } else { - const run = await openai.beta.threads.createAndRun({ - assistant_id: ASSISTANT_ID, - stream: true, - thread: { - messages: [{ role: 'user', content: question }], - }, - }); - - runQueue.push({ id: generateId(), run }); - } - - while (runQueue.length > 0) { - const latestRun = runQueue.shift(); - - if (latestRun) { - for await (const delta of latestRun.run) { - const { data, event } = delta; - - statusUIStream.update(event); - - if (event === 'thread.created') { - THREAD_ID = data.id; - } else if (event === 'thread.run.created') { - RUN_ID = data.id; - } else if (event === 'thread.message.delta') { - data.delta.content?.map(part => { - if (part.type === 'text') { - if (part.text) { - textStream.append(part.text.value as string); - } - } - }); - } else if (event === 'thread.run.failed') { - console.error(data); - } - } - } - } - - statusUIStream.done(); - textStream.done(); - })(); - - return { - id: generateId(), - status: statusUIStream.value, - text: textUIStream.value, - }; -} -``` - -```tsx filename="app/ai.ts" -import { createAI } from 'ai/rsc'; -import { submitMessage } from './actions'; - -export const AI = createAI({ - actions: { - submitMessage, - }, - initialAIState: [], - initialUIState: [], -}); -``` - -And finally, make sure to update your layout component to wrap the children with the `AI` component. - -```tsx filename="app/layout.tsx" -import { ReactNode } from 'react'; -import { AI } from './ai'; - -export default function Layout({ children }: { children: ReactNode }) { - return {children}; -} -``` diff --git a/content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx b/content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx deleted file mode 100644 index cd24a11d7b5d..000000000000 --- a/content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx +++ /dev/null @@ -1,287 +0,0 @@ ---- -title: Stream Assistant Response with Tools -description: Learn how to generate text using the AI SDK and React Server Components. -tags: ['rsc', 'streaming', 'assistant'] ---- - -# Stream Assistant Responses - -In this example, you'll learn how to stream responses along with tool calls from OpenAI's [Assistant API](https://platform.openai.com/docs/assistants/overview) using `ai/rsc`. - -## Client - -In your client component, you will create a simple chat interface that allows users to send messages to the assistant and receive responses. The assistant's responses will be streamed in two parts: the status of the current run and the text content of the messages. - -```tsx filename='app/page.tsx' -'use client'; - -import { useState } from 'react'; -import { ClientMessage, submitMessage } from './actions'; -import { useActions } from 'ai/rsc'; - -export default function Home() { - const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); - const { submitMessage } = useActions(); - - const handleSubmission = async () => { - setMessages(currentMessages => [ - ...currentMessages, - { - id: '123', - status: 'user.message.created', - text: input, - gui: null, - }, - ]); - - const response = await submitMessage(input); - setMessages(currentMessages => [...currentMessages, response]); - setInput(''); - }; - - return ( -
-
- setInput(event.target.value)} - placeholder="Ask a question" - onKeyDown={event => { - if (event.key === 'Enter') { - handleSubmission(); - } - }} - /> - -
- -
-
- {messages.map(message => ( -
-
-
{message.status}
-
-
{message.gui}
-
{message.text}
-
- ))} -
-
-
- ); -} -``` - -```tsx filename='app/message.tsx' -'use client'; - -import { StreamableValue, useStreamableValue } from 'ai/rsc'; - -export function Message({ textStream }: { textStream: StreamableValue }) { - const [text] = useStreamableValue(textStream); - - return
{text}
; -} -``` - -## Server - -In your server action, you will create a function called `submitMessage` that adds the user's message to the thread. The function will create a new thread if one does not exist and add the user's message to the thread. If a thread already exists, the function will add the user's message to the existing thread. The function will then create a run and stream the assistant's response to the client. Furthermore, the run queue is used to manage multiple runs in the same thread during the lifetime of the server action. - -In case the assistant requires a tool call, the server action will handle the tool call and return the output to the assistant. In this example, the assistant requires a tool call to search for emails. The server action will search for emails based on the `query` and `has_attachments` parameters and return the output to the both the assistant and the client. - -```tsx filename='app/actions.tsx' -'use server'; - -import { generateId } from 'ai'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; -import { OpenAI } from 'openai'; -import { ReactNode } from 'react'; -import { searchEmails } from './function'; -import { Message } from './message'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY, -}); - -export interface ClientMessage { - id: string; - status: ReactNode; - text: ReactNode; - gui: ReactNode; -} - -const ASSISTANT_ID = 'asst_xxxx'; -let THREAD_ID = ''; -let RUN_ID = ''; - -export async function submitMessage(question: string): Promise { - const status = createStreamableUI('thread.init'); - const textStream = createStreamableValue(''); - const textUIStream = createStreamableUI( - , - ); - const gui = createStreamableUI(); - - const runQueue = []; - - (async () => { - if (THREAD_ID) { - await openai.beta.threads.messages.create(THREAD_ID, { - role: 'user', - content: question, - }); - - const run = await openai.beta.threads.runs.create(THREAD_ID, { - assistant_id: ASSISTANT_ID, - stream: true, - }); - - runQueue.push({ id: generateId(), run }); - } else { - const run = await openai.beta.threads.createAndRun({ - assistant_id: ASSISTANT_ID, - stream: true, - thread: { - messages: [{ role: 'user', content: question }], - }, - }); - - runQueue.push({ id: generateId(), run }); - } - - while (runQueue.length > 0) { - const latestRun = runQueue.shift(); - - if (latestRun) { - for await (const delta of latestRun.run) { - const { data, event } = delta; - - status.update(event); - - if (event === 'thread.created') { - THREAD_ID = data.id; - } else if (event === 'thread.run.created') { - RUN_ID = data.id; - } else if (event === 'thread.message.delta') { - data.delta.content?.map((part: any) => { - if (part.type === 'text') { - if (part.text) { - textStream.append(part.text.value); - } - } - }); - } else if (event === 'thread.run.requires_action') { - if (data.required_action) { - if (data.required_action.type === 'submit_tool_outputs') { - const { tool_calls } = data.required_action.submit_tool_outputs; - const tool_outputs = []; - - for (const tool_call of tool_calls) { - const { id: toolCallId, function: fn } = tool_call; - const { name, arguments: args } = fn; - - if (name === 'search_emails') { - const { query, has_attachments } = JSON.parse(args); - - gui.append( -
-
- Searching for emails: {query}, has_attachments: - {has_attachments ? 'true' : 'false'} -
-
, - ); - - await new Promise(resolve => setTimeout(resolve, 2000)); - - const fakeEmails = searchEmails({ query, has_attachments }); - - gui.append( -
- {fakeEmails.map(email => ( -
-
-
{email.subject}
-
-
{email.date}
-
- ))} -
, - ); - - tool_outputs.push({ - tool_call_id: toolCallId, - output: JSON.stringify(fakeEmails), - }); - } - } - - const nextRun: any = - await openai.beta.threads.runs.submitToolOutputs( - THREAD_ID, - RUN_ID, - { - tool_outputs, - stream: true, - }, - ); - - runQueue.push({ id: generateId(), run: nextRun }); - } - } - } else if (event === 'thread.run.failed') { - console.log(data); - } - } - } - } - - status.done(); - textUIStream.done(); - gui.done(); - })(); - - return { - id: generateId(), - status: status.value, - text: textUIStream.value, - gui: gui.value, - }; -} -``` - -```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; -import { submitMessage } from './actions'; - -export const AI = createAI({ - actions: { - submitMessage, - }, - initialAIState: [], - initialUIState: [], -}); -``` - -And finally, make sure to update your layout component to wrap the children with the `AI` component. - -```tsx filename="app/layout.tsx" -import { ReactNode } from 'react'; -import { AI } from './ai'; - -export default function Layout({ children }: { children: ReactNode }) { - return {children}; -} -``` diff --git a/content/docs/02-getting-started/01-navigating-the-library.mdx b/content/docs/02-getting-started/01-navigating-the-library.mdx index c0d28d67d9a8..8f55507dbb2a 100644 --- a/content/docs/02-getting-started/01-navigating-the-library.mdx +++ b/content/docs/02-getting-started/01-navigating-the-library.mdx @@ -56,7 +56,6 @@ AI SDK UI supports the following frameworks: [React](https://react.dev/), [Sve | [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | | | | | [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | | | | [useObject](/docs/reference/ai-sdk-ui/use-object) | | | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/content/docs/02-guides/20-sonnet-3-7.mdx b/content/docs/02-guides/20-sonnet-3-7.mdx index 5e6b4032c920..04f41c28beae 100644 --- a/content/docs/02-guides/20-sonnet-3-7.mdx +++ b/content/docs/02-guides/20-sonnet-3-7.mdx @@ -73,7 +73,7 @@ AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another po AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. -With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and Claude 3.7 Sonnet: diff --git a/content/docs/02-guides/21-llama-3_1.mdx b/content/docs/02-guides/21-llama-3_1.mdx index e56bffd47b5c..95d59a2f8fd4 100644 --- a/content/docs/02-guides/21-llama-3_1.mdx +++ b/content/docs/02-guides/21-llama-3_1.mdx @@ -190,7 +190,7 @@ AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another po AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. -With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and Llama 3.1 (via [DeepInfra](https://deepinfra.com)): diff --git a/content/docs/02-guides/22-gpt-4-5.mdx b/content/docs/02-guides/22-gpt-4-5.mdx index 76958f923047..7736132b48bf 100644 --- a/content/docs/02-guides/22-gpt-4-5.mdx +++ b/content/docs/02-guides/22-gpt-4-5.mdx @@ -107,7 +107,7 @@ AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another po AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. -With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and OpenAI GPT-4.5: diff --git a/content/docs/02-guides/23-o1.mdx b/content/docs/02-guides/23-o1.mdx index d98e569d8ae2..f93f38a7f153 100644 --- a/content/docs/02-guides/23-o1.mdx +++ b/content/docs/02-guides/23-o1.mdx @@ -178,7 +178,7 @@ AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another po AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. -With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and OpenAI o1: diff --git a/content/docs/02-guides/24-o3.mdx b/content/docs/02-guides/24-o3.mdx index 12762bc2bd08..443c2953db89 100644 --- a/content/docs/02-guides/24-o3.mdx +++ b/content/docs/02-guides/24-o3.mdx @@ -147,7 +147,7 @@ AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another po AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. -With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and OpenAI o3-mini: diff --git a/content/docs/02-guides/25-r1.mdx b/content/docs/02-guides/25-r1.mdx index a4ca99eff4ff..b461ab594b0e 100644 --- a/content/docs/02-guides/25-r1.mdx +++ b/content/docs/02-guides/25-r1.mdx @@ -129,7 +129,7 @@ AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another po AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. -With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and DeepSeek R1: diff --git a/content/docs/04-ai-sdk-ui/01-overview.mdx b/content/docs/04-ai-sdk-ui/01-overview.mdx index ed42daa2daaf..790b23633041 100644 --- a/content/docs/04-ai-sdk-ui/01-overview.mdx +++ b/content/docs/04-ai-sdk-ui/01-overview.mdx @@ -7,12 +7,11 @@ description: An overview of AI SDK UI. AI SDK UI is designed to help you build interactive chat, completion, and assistant applications with ease. It is a **framework-agnostic toolkit**, streamlining the integration of advanced AI functionalities into your applications. -AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. With four main hooks — **`useChat`**, **`useCompletion`**, **`useObject`**, and **`useAssistant`** — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. +AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently. With four main hooks — **`useChat`**, **`useCompletion`**, and **`useObject`** — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app. - **[`useChat`](/docs/ai-sdk-ui/chatbot)** offers real-time streaming of chat messages, abstracting state management for inputs, messages, loading, and errors, allowing for seamless integration into any UI design. - **[`useCompletion`](/docs/ai-sdk-ui/completion)** enables you to handle text completions in your applications, managing the prompt input and automatically updating the UI as new completions are streamed. - **[`useObject`](/docs/ai-sdk-ui/object-generation)** is a hook that allows you to consume streamed JSON objects, providing a simple way to handle and display structured data in your application. -- **[`useAssistant`](/docs/ai-sdk-ui/openai-assistants)** is designed to facilitate interaction with OpenAI-compatible assistant APIs, managing UI state and updating it automatically as responses are streamed. These hooks are designed to reduce the complexity and time required to implement AI interactions, letting you focus on creating exceptional user experiences. @@ -26,7 +25,6 @@ Here is a comparison of the supported functions across these frameworks: | [useChat](/docs/reference/ai-sdk-ui/use-chat) | | Chat | | | [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | Completion | | | [useObject](/docs/reference/ai-sdk-ui/use-object) | | StructuredObject | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/content/docs/04-ai-sdk-ui/10-openai-assistants.mdx b/content/docs/04-ai-sdk-ui/10-openai-assistants.mdx deleted file mode 100644 index 9f0d7d3f116e..000000000000 --- a/content/docs/04-ai-sdk-ui/10-openai-assistants.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: OpenAI Assistants -description: Learn how to use the useAssistant hook. ---- - -# OpenAI Assistants - -The `useAssistant` hook allows you to handle the client state when interacting with an OpenAI compatible assistant API. -This hook is useful when you want to integrate assistant capabilities into your application, -with the UI updated automatically as the assistant is streaming its execution. - -The `useAssistant` hook is supported in `@ai-sdk/react`, `ai/svelte`, and `ai/vue`. - -## Example - -```tsx filename='app/page.tsx' -'use client'; - -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Chat() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant' }); - - return ( -
- {messages.map((m: Message) => ( -
- {`${m.role}: `} - {m.role !== 'data' && m.content} - {m.role === 'data' && ( - <> - {(m.data as any).description} -
-
-                {JSON.stringify(m.data, null, 2)}
-              
- - )} -
- ))} - - {status === 'in_progress' &&
} - -
- -
-
- ); -} -``` - -```tsx filename='app/api/assistant/route.ts' -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -// Allow streaming responses up to 30 seconds -export const maxDuration = 30; - -export async function POST(req: Request) { - // Parse the request body - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - // Create a thread if needed - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - // Add a message to the thread - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream, sendDataMessage }) => { - // Run the assistant on the thread - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID is not set'); - })(), - }); - - // forward run status would stream message deltas - let runResult = await forwardStream(runStream); - - // status can be: queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired - while ( - runResult?.status === 'requires_action' && - runResult.required_action?.type === 'submit_tool_outputs' - ) { - const tool_outputs = - runResult.required_action.submit_tool_outputs.tool_calls.map( - (toolCall: any) => { - const parameters = JSON.parse(toolCall.function.arguments); - - switch (toolCall.function.name) { - // configure your tool calls here - - default: - throw new Error( - `Unknown tool call function: ${toolCall.function.name}`, - ); - } - }, - ); - - runResult = await forwardStream( - openai.beta.threads.runs.submitToolOutputsStream( - threadId, - runResult.id, - { tool_outputs }, - ), - ); - } - }, - ); -} -``` - -## Customized UI - -`useAssistant` also provides ways to manage the chat message and input states via code and show loading and error states. - -### Loading and error states - -To show a loading spinner while the assistant is running the thread, you can use the `status` state returned by the `useAssistant` hook: - -```tsx -const { status, ... } = useAssistant() - -return( - <> - {status === "in_progress" ? : null} - -) -``` - -Similarly, the `error` state reflects the error object thrown during the fetch request. It can be used to display an error message, or show a toast notification: - -```tsx -const { error, ... } = useAssistant() - -useEffect(() => { - if (error) { - toast.error(error.message) - } -}, [error]) - -// Or display the error message in the UI: -return ( - <> - {error ?
{error.message}
: null} - -) -``` - -### Controlled input - -In the initial example, we have `handleSubmit` and `handleInputChange` callbacks that manage the input changes and form submissions. These are handy for common use cases, but you can also use uncontrolled APIs for more advanced scenarios such as form validation or customized components. - -The following example demonstrates how to use more granular APIs like `append` with your custom input and submit button components: - -```tsx -const { append } = useAssistant(); - -return ( - <> - { - // Send a new message to the AI provider - append({ - role: 'user', - content: input, - }); - }} - /> - -); -``` - -## Configure Request Options - -By default, the `useAssistant` hook sends a HTTP POST request to the `/api/assistant` endpoint with the prompt as part of the request body. You can customize the request by passing additional options to the `useAssistant` hook: - -```tsx -const { messages, input, handleInputChange, handleSubmit } = useAssistant({ - api: '/api/custom-completion', - headers: { - Authorization: 'your_token', - }, - body: { - user_id: '123', - }, - credentials: 'same-origin', -}); -``` - -In this example, the `useAssistant` hook sends a POST request to the `/api/custom-completion` endpoint with the specified headers, additional body fields, and credentials for that fetch request. On your server side, you can handle the request with these additional information. diff --git a/content/docs/04-ai-sdk-ui/21-error-handling.mdx b/content/docs/04-ai-sdk-ui/21-error-handling.mdx index a4177f7be49e..bb7ae344a805 100644 --- a/content/docs/04-ai-sdk-ui/21-error-handling.mdx +++ b/content/docs/04-ai-sdk-ui/21-error-handling.mdx @@ -101,7 +101,7 @@ export default function Chat() { ### Error Handling Callback -Errors can be processed by passing an [`onError`](/docs/reference/ai-sdk-ui/use-chat#on-error) callback function as an option to the [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion) or [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) hooks. +Errors can be processed by passing an [`onError`](/docs/reference/ai-sdk-ui/use-chat#on-error) callback function as an option to the [`useChat`](/docs/reference/ai-sdk-ui/use-chat) or [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion) hooks. The callback function receives an error object as an argument. ```tsx file="app/page.tsx" highlight="8-11" diff --git a/content/docs/04-ai-sdk-ui/index.mdx b/content/docs/04-ai-sdk-ui/index.mdx index c506e0bffeb7..bda55e4e6c10 100644 --- a/content/docs/04-ai-sdk-ui/index.mdx +++ b/content/docs/04-ai-sdk-ui/index.mdx @@ -38,11 +38,6 @@ description: Learn about the AI SDK UI. description: 'Learn how to integrate an interface for object generation.', href: '/docs/ai-sdk-ui/object-generation' }, - { - title: 'OpenAI Assistants', - description: 'Learn how to integrate an interface for OpenAI Assistants.', - href: '/docs/ai-sdk-ui/openai-assistants' - }, { title: 'Streaming Data', description: 'Learn how to stream data.', diff --git a/content/docs/07-reference/02-ai-sdk-ui/20-use-assistant.mdx b/content/docs/07-reference/02-ai-sdk-ui/20-use-assistant.mdx deleted file mode 100644 index 12b9755e6633..000000000000 --- a/content/docs/07-reference/02-ai-sdk-ui/20-use-assistant.mdx +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: useAssistant -description: API reference for the useAssistant hook. ---- - -# `useAssistant()` - -Allows you to handle the client state when interacting with an OpenAI compatible assistant API. -This hook is useful when you want to integrate assistant capabilities into your application, -with the UI updated automatically as the assistant is streaming its execution. - -This works in conjunction with [`AssistantResponse`](./assistant-response) in the backend. - -## Import - - - - - - - - - - -## API Signature - -### Parameters - - | Headers', - isOptional: true, - description: 'Headers to be passed to the API endpoint.', - }, - { - name: 'body', - type: 'any', - isOptional: true, - description: 'Additional body to be passed to the API endpoint.', - }, - { - name: 'onError', - type: '(error: Error) => void', - isOptional: true, - description: - 'Callback that will be called when the assistant encounters an error', - }, - { - name: 'fetch', - type: 'FetchFunction', - isOptional: true, - description: - 'Optional. A custom fetch function to be used for the API call. Defaults to the global fetch function.', - }, - ]} -/> - -### Returns - ->', - description: 'Function to update the `messages` array.', - }, - { - name: 'threadId', - type: 'string | undefined', - description: 'The current thread ID.', - }, - { - name: 'setThreadId', - type: '(threadId: string | undefined) => void', - description: - "Set the current thread ID. Specifying a thread ID will switch to that thread, if it exists. If set to 'undefined', a new thread will be created. For both cases, `threadId` will be updated with the new value and `messages` will be cleared.", - }, - { - name: 'input', - type: 'string', - description: 'The current value of the input field.', - }, - { - name: 'setInput', - type: 'React.Dispatch>', - description: 'Function to update the `input` value.', - }, - { - name: 'handleInputChange', - type: '(event: any) => void', - description: - "Handler for the `onChange` event of the input field to control the input's value.", - }, - { - name: 'submitMessage', - type: '(event?: { preventDefault?: () => void }) => void', - description: - 'Form submission handler that automatically resets the input field and appends a user message.', - }, - { - name: 'status', - type: "'awaiting_message' | 'in_progress'", - description: - 'The current status of the assistant. This can be used to show a loading indicator.', - }, - { - name: 'append', - type: '(message: Message | CreateMessage, chatRequestOptions: { options: { headers, body } }) => Promise', - description: - "Function to append a user message to the current thread. This triggers the API call to fetch the assistant's response.", - }, - { - name: 'stop', - type: '() => void', - description: - 'Function to abort the current request from streaming the assistant response. Note that the run will still be in progress.', - }, - { - name: 'error', - type: 'undefined | Error', - description: - 'The error thrown during the assistant message processing, if any.', - }, - ]} -/> diff --git a/content/docs/07-reference/02-ai-sdk-ui/21-assistant-response.mdx b/content/docs/07-reference/02-ai-sdk-ui/21-assistant-response.mdx deleted file mode 100644 index ef177e9475eb..000000000000 --- a/content/docs/07-reference/02-ai-sdk-ui/21-assistant-response.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: AssistantResponse -description: API reference for the AssistantResponse streaming helper. ---- - -# `AssistantResponse` - -The AssistantResponse class is designed to facilitate streaming assistant responses to the [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) hook. -It receives an assistant thread and a current message, and can send messages and data messages to the client. - -## Import - - - -## API Signature - -### Parameters - - Run | undefined', - description: - 'Forwards the assistant response stream to the client. Returns the Run object after it completes, or when it requires an action.', - }, - { - name: 'sendDataMessage', - type: '(message: DataMessage) => void', - description: - 'Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.', - }, - ], - }, - ], - }, - ]} -/> diff --git a/content/docs/07-reference/02-ai-sdk-ui/index.mdx b/content/docs/07-reference/02-ai-sdk-ui/index.mdx index b28a073d885e..ce87f31cdc82 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/index.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/index.mdx @@ -30,11 +30,6 @@ AI SDK UI contains the following hooks: description: 'Use a hook for consuming a streamed JSON objects.', href: '/docs/reference/ai-sdk-ui/use-object', }, - { - title: 'useAssistant', - description: 'Use a hook to interact with OpenAI assistants.', - href: '/docs/reference/ai-sdk-ui/use-assistant', - }, { title: 'convertToCoreMessages', description: @@ -79,18 +74,6 @@ AI SDK UI contains the following hooks: ]} /> -It also contains the following helper functions: - - - ## UI Framework Support AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), and [Vue.js](https://vuejs.org/). @@ -101,7 +84,6 @@ Here is a comparison of the supported functions across these frameworks: | [useChat](/docs/reference/ai-sdk-ui/use-chat) | | Chat | | | [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | Completion | | | [useObject](/docs/reference/ai-sdk-ui/use-object) | | StructuredObject | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/examples/next-openai-pages/app/api/assistant-tools/route.ts b/examples/next-openai-pages/app/api/assistant-tools/route.ts deleted file mode 100644 index 170c97602d3f..000000000000 --- a/examples/next-openai-pages/app/api/assistant-tools/route.ts +++ /dev/null @@ -1,71 +0,0 @@ -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export async function POST(req: Request) { - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream }) => { - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID is not set'); - })(), - }); - - let runResult = await forwardStream(runStream); - - while ( - runResult?.status === 'requires_action' && - runResult.required_action?.type === 'submit_tool_outputs' - ) { - const tool_outputs = - runResult.required_action.submit_tool_outputs.tool_calls.map( - (toolCall: any) => { - const parameters = JSON.parse(toolCall.function.arguments); - - switch (toolCall.function.name) { - case 'celsiusToFahrenheit': - const celsius = parseFloat(parameters.value); - const fahrenheit = celsius * (9 / 5) + 32; - - return { - tool_call_id: toolCall.id, - output: `${celsius}°C is ${fahrenheit.toFixed(2)}°F`, - }; - - default: - throw new Error( - `Unknown tool call function: ${toolCall.function.name}`, - ); - } - }, - ); - - runResult = await forwardStream( - openai.beta.threads.runs.submitToolOutputsStream( - threadId, - runResult.id, - { tool_outputs }, - ), - ); - } - }, - ); -} diff --git a/examples/next-openai-pages/app/api/assistant/route.ts b/examples/next-openai-pages/app/api/assistant/route.ts deleted file mode 100644 index 747c8ac5ece5..000000000000 --- a/examples/next-openai-pages/app/api/assistant/route.ts +++ /dev/null @@ -1,35 +0,0 @@ -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export async function POST(req: Request) { - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream }) => { - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID environment is not set'); - })(), - }); - - await forwardStream(runStream); - }, - ); -} diff --git a/examples/next-openai-pages/pages/assistants/stream-assistant-response-with-tools/index.tsx b/examples/next-openai-pages/pages/assistants/stream-assistant-response-with-tools/index.tsx deleted file mode 100644 index fe9122884f8e..000000000000 --- a/examples/next-openai-pages/pages/assistants/stream-assistant-response-with-tools/index.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Page() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant-tools' }); - - return ( -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
- ); -} diff --git a/examples/next-openai-pages/pages/assistants/stream-assistant-response/index.tsx b/examples/next-openai-pages/pages/assistants/stream-assistant-response/index.tsx deleted file mode 100644 index 9718ded27dca..000000000000 --- a/examples/next-openai-pages/pages/assistants/stream-assistant-response/index.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Page() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant' }); - - return ( -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
- ); -} diff --git a/examples/next-openai-pages/pages/assistants/stream-assistant-switch-threads/index.tsx b/examples/next-openai-pages/pages/assistants/stream-assistant-switch-threads/index.tsx deleted file mode 100644 index c25e2889bc0c..000000000000 --- a/examples/next-openai-pages/pages/assistants/stream-assistant-switch-threads/index.tsx +++ /dev/null @@ -1,82 +0,0 @@ -import { Message, useAssistant } from '@ai-sdk/react'; -import { useEffect, useState } from 'react'; - -export default function Page() { - const { - status, - messages, - input, - submitMessage, - handleInputChange, - threadId, - setThreadId, - } = useAssistant({ api: '/api/assistant' }); - - const [threads, setThreads] = useState([ - 'thread_wFjFAc6llmI2DaVvaRs6en0z', - 'thread_o1KXo6qCtb12A5GaVCx1X5YL', - 'thread_jrANWD0rR4QWoIV5Lxq6YFrD', - ]); - - useEffect(() => { - if (threadId !== undefined) { - if (!threads.includes(threadId)) { - setThreads([...threads, threadId]); - } - } - }, [threadId, threads]); - - return ( -
-
-
{ - setThreadId(undefined); - }} - > - new thread -
- - {threads.map((thread, index) => ( -
{ - setThreadId(thread); - }} - > - thread {index + 1} -
- ))} -
- -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
-
- ); -} diff --git a/examples/next-openai/app/api/assistant/assistant-setup.md b/examples/next-openai/app/api/assistant/assistant-setup.md deleted file mode 100644 index df6f6e8404c3..000000000000 --- a/examples/next-openai/app/api/assistant/assistant-setup.md +++ /dev/null @@ -1,63 +0,0 @@ -# Home Automation Assistant Example - -## Setup - -### Create OpenAI Assistant - -[OpenAI Assistant Website](https://platform.openai.com/assistants) - -Create a new assistant. Enable Code interpreter. Add the following functions and instructions to the assistant. - -Then add the assistant id to the `.env.local` file as `ASSISTANT_ID=your-assistant-id`. - -### Instructions - -``` -You are an assistant with access to a home automation system. You can get and set the temperature in the bedroom, home office, living room, kitchen and bathroom. - -The system uses temperature in Celsius. If the user requests Fahrenheit, you should convert the temperature to Fahrenheit. -``` - -### getRoomTemperature function - -```json -{ - "name": "getRoomTemperature", - "description": "Get the temperature in a room", - "parameters": { - "type": "object", - "properties": { - "room": { - "type": "string", - "enum": ["bedroom", "home office", "living room", "kitchen", "bathroom"] - } - }, - "required": ["room"] - } -} -``` - -### setRoomTemperature function - -```json -{ - "name": "setRoomTemperature", - "description": "Set the temperature in a room", - "parameters": { - "type": "object", - "properties": { - "room": { - "type": "string", - "enum": ["bedroom", "home office", "living room", "kitchen", "bathroom"] - }, - "temperature": { "type": "number" } - }, - "required": ["room", "temperature"] - } -} -``` - -## Run - -1. Run `pnpm run dev` in `examples/next-openai` -2. Go to http://localhost:3000/assistant diff --git a/examples/next-openai/app/api/assistant/route.ts b/examples/next-openai/app/api/assistant/route.ts deleted file mode 100644 index a6c73221a67d..000000000000 --- a/examples/next-openai/app/api/assistant/route.ts +++ /dev/null @@ -1,126 +0,0 @@ -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -// Create an OpenAI API client (that's edge friendly!) -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -// Allow streaming responses up to 30 seconds -export const maxDuration = 30; - -const homeTemperatures = { - bedroom: 20, - 'home office': 21, - 'living room': 21, - kitchen: 22, - bathroom: 23, -}; - -export async function POST(req: Request) { - // Parse the request body - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - // Create a thread if needed - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - // Add a message to the thread - const createdMessage = await openai.beta.threads.messages.create( - threadId, - { - role: 'user', - content: input.message, - }, - { signal: req.signal }, - ); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream, sendDataMessage }) => { - // Run the assistant on the thread - const runStream = openai.beta.threads.runs.stream( - threadId, - { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID is not set'); - })(), - }, - { signal: req.signal }, - ); - - // forward run status would stream message deltas - let runResult = await forwardStream(runStream); - - // status can be: queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired - while ( - runResult?.status === 'requires_action' && - runResult.required_action?.type === 'submit_tool_outputs' - ) { - const tool_outputs = - runResult.required_action.submit_tool_outputs.tool_calls.map( - (toolCall: any) => { - const parameters = JSON.parse(toolCall.function.arguments); - - switch (toolCall.function.name) { - case 'getRoomTemperature': { - const temperature = - homeTemperatures[ - parameters.room as keyof typeof homeTemperatures - ]; - - return { - tool_call_id: toolCall.id, - output: temperature.toString(), - }; - } - - case 'setRoomTemperature': { - const oldTemperature = - homeTemperatures[ - parameters.room as keyof typeof homeTemperatures - ]; - - homeTemperatures[ - parameters.room as keyof typeof homeTemperatures - ] = parameters.temperature; - - sendDataMessage({ - role: 'data', - data: { - oldTemperature, - newTemperature: parameters.temperature, - description: `Temperature in ${parameters.room} changed from ${oldTemperature} to ${parameters.temperature}`, - }, - }); - - return { - tool_call_id: toolCall.id, - output: `temperature set successfully`, - }; - } - - default: - throw new Error( - `Unknown tool call function: ${toolCall.function.name}`, - ); - } - }, - ); - - runResult = await forwardStream( - openai.beta.threads.runs.submitToolOutputsStream( - threadId, - runResult.id, - { tool_outputs }, - { signal: req.signal }, - ), - ); - } - }, - ); -} diff --git a/examples/next-openai/app/assistant/page.tsx b/examples/next-openai/app/assistant/page.tsx deleted file mode 100644 index cea489dc7017..000000000000 --- a/examples/next-openai/app/assistant/page.tsx +++ /dev/null @@ -1,87 +0,0 @@ -'use client'; - -import { Message, useAssistant as useAssistant } from '@ai-sdk/react'; -import { useEffect, useRef } from 'react'; - -const roleToColorMap: Record = { - system: 'red', - user: 'black', - assistant: 'green', - data: 'orange', -}; - -export default function Chat() { - const { - status, - messages, - input, - submitMessage, - handleInputChange, - error, - stop, - } = useAssistant({ api: '/api/assistant' }); - - // When status changes to accepting messages, focus the input: - const inputRef = useRef(null); - useEffect(() => { - if (status === 'awaiting_message') { - inputRef.current?.focus(); - } - }, [status]); - - return ( -
- {error != null && ( -
- - Error: {(error as any).toString()} - -
- )} - - {messages.map((m: Message) => ( -
- {`${m.role}: `} - {m.role !== 'data' && m.content} - {m.role === 'data' && ( - <> - {(m.data as any).description} -
-
-                {JSON.stringify(m.data, null, 2)}
-              
- - )} -
-
-
- ))} - - {status === 'in_progress' && ( -
- )} - -
- -
- - -
- ); -} diff --git a/examples/next-openai/app/stream-assistant-responses-with-tools/actions.tsx b/examples/next-openai/app/stream-assistant-responses-with-tools/actions.tsx deleted file mode 100644 index ba5457a7b1ae..000000000000 --- a/examples/next-openai/app/stream-assistant-responses-with-tools/actions.tsx +++ /dev/null @@ -1,161 +0,0 @@ -'use server'; - -import { generateId } from 'ai'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; -import { OpenAI } from 'openai'; -import { ReactNode } from 'react'; -import { searchEmails } from './function'; -import { Message } from './message'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export interface ClientMessage { - id: string; - status: ReactNode; - text: ReactNode; - gui: ReactNode; -} - -const ASSISTANT_ID = 'asst_xxxx'; -let THREAD_ID = ''; -let RUN_ID = ''; - -export async function submitMessage(question: string): Promise { - const status = createStreamableUI('thread.init'); - const textStream = createStreamableValue(''); - const textUIStream = createStreamableUI( - , - ); - const gui = createStreamableUI(); - - const runQueue = []; - - (async () => { - if (THREAD_ID) { - await openai.beta.threads.messages.create(THREAD_ID, { - role: 'user', - content: question, - }); - - const run = await openai.beta.threads.runs.create(THREAD_ID, { - assistant_id: ASSISTANT_ID, - stream: true, - }); - - runQueue.push({ id: generateId(), run }); - } else { - const run = await openai.beta.threads.createAndRun({ - assistant_id: ASSISTANT_ID, - stream: true, - thread: { - messages: [{ role: 'user', content: question }], - }, - }); - - runQueue.push({ id: generateId(), run }); - } - - while (runQueue.length > 0) { - const latestRun = runQueue.shift(); - - if (latestRun) { - for await (const delta of latestRun.run) { - const { data, event } = delta; - - status.update(event); - - if (event === 'thread.created') { - THREAD_ID = data.id; - } else if (event === 'thread.run.created') { - RUN_ID = data.id; - } else if (event === 'thread.message.delta') { - data.delta.content?.map((part: any) => { - if (part.type === 'text') { - if (part.text) { - textStream.append(part.text.value); - } - } - }); - } else if (event === 'thread.run.requires_action') { - if (data.required_action) { - if (data.required_action.type === 'submit_tool_outputs') { - const { tool_calls } = data.required_action.submit_tool_outputs; - const tool_outputs = []; - - for (const tool_call of tool_calls) { - const { id: toolCallId, function: fn } = tool_call; - const { name, arguments: args } = fn; - - if (name === 'search_emails') { - const { query, has_attachments } = JSON.parse(args); - - gui.append( -
-
- Searching for emails: {query}, has_attachments: - {has_attachments ? 'true' : 'false'} -
-
, - ); - - await new Promise(resolve => setTimeout(resolve, 2000)); - - const fakeEmails = searchEmails({ query, has_attachments }); - - gui.append( -
- {fakeEmails.map(email => ( -
-
-
{email.subject}
-
-
{email.date}
-
- ))} -
, - ); - - tool_outputs.push({ - tool_call_id: toolCallId, - output: JSON.stringify(fakeEmails), - }); - } - } - - const nextRun: any = - await openai.beta.threads.runs.submitToolOutputs( - THREAD_ID, - RUN_ID, - { - tool_outputs, - stream: true, - }, - ); - - runQueue.push({ id: generateId(), run: nextRun }); - } - } - } else if (event === 'thread.run.failed') { - console.log(data); - } - } - } - } - - status.done(); - textUIStream.done(); - gui.done(); - })(); - - return { - id: generateId(), - status: status.value, - text: textUIStream.value, - gui: gui.value, - }; -} diff --git a/examples/next-openai/app/stream-assistant-responses-with-tools/ai.ts b/examples/next-openai/app/stream-assistant-responses-with-tools/ai.ts deleted file mode 100644 index 8e11e17b8066..000000000000 --- a/examples/next-openai/app/stream-assistant-responses-with-tools/ai.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { createAI } from 'ai/rsc'; -import { submitMessage } from './actions'; - -export const AI = createAI({ - actions: { - submitMessage, - }, - initialAIState: [], - initialUIState: [], -}); diff --git a/examples/next-openai/app/stream-assistant-responses-with-tools/function.ts b/examples/next-openai/app/stream-assistant-responses-with-tools/function.ts deleted file mode 100644 index af1e662ccd1f..000000000000 --- a/examples/next-openai/app/stream-assistant-responses-with-tools/function.ts +++ /dev/null @@ -1,45 +0,0 @@ -export const searchEmails = ({ - query, - has_attachments, -}: { - query: string; - has_attachments: boolean; -}) => { - return [ - { - id: '1', - subject: 'Q1 Investor Update', - date: 'Apr 1, 2023', - }, - { - id: '2', - subject: 'Q2 Investor Update', - date: 'Jul 1, 2023', - }, - { - id: '3', - subject: 'Q3 Investor Update', - date: 'Oct 1, 2023', - }, - ]; -}; - -export const openEmail = ({ id }: { id: string }) => { - return { - body: ` - Subject: Investor Update - - Hi Team, - - Here is the investor update for Q${id} 2023. - - We have seen a ${ - id === '1' ? 23 : id === '2' ? 34 : 42 - }% increase in revenue compared to last quarter. This is due to the successful launch of our new product line. We are also expanding our team to keep up with the demand. - - Best, - CEO - - `, - }; -}; diff --git a/examples/next-openai/app/stream-assistant-responses-with-tools/layout.tsx b/examples/next-openai/app/stream-assistant-responses-with-tools/layout.tsx deleted file mode 100644 index cc2afee1a9ab..000000000000 --- a/examples/next-openai/app/stream-assistant-responses-with-tools/layout.tsx +++ /dev/null @@ -1,6 +0,0 @@ -import { ReactNode } from 'react'; -import { AI } from './ai'; - -export default function Layout({ children }: { children: ReactNode }) { - return {children}; -} diff --git a/examples/next-openai/app/stream-assistant-responses-with-tools/message.tsx b/examples/next-openai/app/stream-assistant-responses-with-tools/message.tsx deleted file mode 100644 index 1e8679731a89..000000000000 --- a/examples/next-openai/app/stream-assistant-responses-with-tools/message.tsx +++ /dev/null @@ -1,9 +0,0 @@ -'use client'; - -import { StreamableValue, useStreamableValue } from 'ai/rsc'; - -export function Message({ textStream }: { textStream: StreamableValue }) { - const [text] = useStreamableValue(textStream); - - return
{text}
; -} diff --git a/examples/next-openai/app/stream-assistant-responses-with-tools/page.tsx b/examples/next-openai/app/stream-assistant-responses-with-tools/page.tsx deleted file mode 100644 index c3afe120e606..000000000000 --- a/examples/next-openai/app/stream-assistant-responses-with-tools/page.tsx +++ /dev/null @@ -1,65 +0,0 @@ -'use client'; - -import { useState } from 'react'; -import { ClientMessage, submitMessage } from './actions'; -import { useActions } from 'ai/rsc'; - -export default function Home() { - const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); - const { submitMessage } = useActions(); - - const handleSubmission = async () => { - setMessages(currentMessages => [ - ...currentMessages, - { - id: '123', - status: 'user.message.created', - text: input, - gui: null, - }, - ]); - - const response = await submitMessage(input); - setMessages(currentMessages => [...currentMessages, response]); - setInput(''); - }; - - return ( -
-
- setInput(event.target.value)} - placeholder="Ask a question" - onKeyDown={event => { - if (event.key === 'Enter') { - handleSubmission(); - } - }} - /> - -
- -
-
- {messages.map(message => ( -
-
-
{message.status}
-
-
{message.gui}
-
{message.text}
-
- ))} -
-
-
- ); -} diff --git a/examples/next-openai/app/stream-assistant-responses/actions.tsx b/examples/next-openai/app/stream-assistant-responses/actions.tsx deleted file mode 100644 index c49aa9c255a7..000000000000 --- a/examples/next-openai/app/stream-assistant-responses/actions.tsx +++ /dev/null @@ -1,95 +0,0 @@ -'use server'; - -import { generateId } from 'ai'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; -import { OpenAI } from 'openai'; -import { ReactNode } from 'react'; -import { Message } from './message'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export interface ClientMessage { - id: string; - status: ReactNode; - text: ReactNode; -} - -const ASSISTANT_ID = 'asst_xxxx'; -let THREAD_ID = ''; -let RUN_ID = ''; - -export async function submitMessage(question: string): Promise { - const statusUIStream = createStreamableUI('thread.init'); - - const textStream = createStreamableValue(''); - const textUIStream = createStreamableUI( - , - ); - - const runQueue = []; - - (async () => { - if (THREAD_ID) { - await openai.beta.threads.messages.create(THREAD_ID, { - role: 'user', - content: question, - }); - - const run = await openai.beta.threads.runs.create(THREAD_ID, { - assistant_id: ASSISTANT_ID, - stream: true, - }); - - runQueue.push({ id: generateId(), run }); - } else { - const run = await openai.beta.threads.createAndRun({ - assistant_id: ASSISTANT_ID, - stream: true, - thread: { - messages: [{ role: 'user', content: question }], - }, - }); - - runQueue.push({ id: generateId(), run }); - } - - while (runQueue.length > 0) { - const latestRun = runQueue.shift(); - - if (latestRun) { - for await (const delta of latestRun.run) { - const { data, event } = delta; - - statusUIStream.update(event); - - if (event === 'thread.created') { - THREAD_ID = data.id; - } else if (event === 'thread.run.created') { - RUN_ID = data.id; - } else if (event === 'thread.message.delta') { - data.delta.content?.map(part => { - if (part.type === 'text') { - if (part.text) { - textStream.append(part.text.value as string); - } - } - }); - } else if (event === 'thread.run.failed') { - console.error(data); - } - } - } - } - - statusUIStream.done(); - textStream.done(); - })(); - - return { - id: generateId(), - status: statusUIStream.value, - text: textUIStream.value, - }; -} diff --git a/examples/next-openai/app/stream-assistant-responses/ai.ts b/examples/next-openai/app/stream-assistant-responses/ai.ts deleted file mode 100644 index 8e11e17b8066..000000000000 --- a/examples/next-openai/app/stream-assistant-responses/ai.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { createAI } from 'ai/rsc'; -import { submitMessage } from './actions'; - -export const AI = createAI({ - actions: { - submitMessage, - }, - initialAIState: [], - initialUIState: [], -}); diff --git a/examples/next-openai/app/stream-assistant-responses/functions.tsx b/examples/next-openai/app/stream-assistant-responses/functions.tsx deleted file mode 100644 index bb778a8ecf67..000000000000 --- a/examples/next-openai/app/stream-assistant-responses/functions.tsx +++ /dev/null @@ -1,43 +0,0 @@ -export const searchEmails = ({ - query, - has_attachments, -}: { - query: string; - has_attachments: boolean; -}) => { - return [ - { - id: '123', - subject: 'Q1 Investor Update', - date: 'Apr 1, 2023', - }, - { - id: '234', - subject: 'Q2 Investor Update', - date: 'Jul 1, 2023', - }, - { - id: '345', - subject: 'Q3 Investor Update', - date: 'Oct 1, 2023', - }, - ]; -}; - -export const openEmail = ({ id }: { id: string }) => { - return { - body: ` - Subject: Investor Update - - Hi Team, - - Here is the investor update for Q1 2023. - - We have seen a 20% increase in revenue compared to last quarter. This is due to the successful launch of our new product line. We are also expanding our team to keep up with the demand. - - Best, - CEO - - `, - }; -}; diff --git a/examples/next-openai/app/stream-assistant-responses/layout.tsx b/examples/next-openai/app/stream-assistant-responses/layout.tsx deleted file mode 100644 index cc2afee1a9ab..000000000000 --- a/examples/next-openai/app/stream-assistant-responses/layout.tsx +++ /dev/null @@ -1,6 +0,0 @@ -import { ReactNode } from 'react'; -import { AI } from './ai'; - -export default function Layout({ children }: { children: ReactNode }) { - return {children}; -} diff --git a/examples/next-openai/app/stream-assistant-responses/message.tsx b/examples/next-openai/app/stream-assistant-responses/message.tsx deleted file mode 100644 index 1e8679731a89..000000000000 --- a/examples/next-openai/app/stream-assistant-responses/message.tsx +++ /dev/null @@ -1,9 +0,0 @@ -'use client'; - -import { StreamableValue, useStreamableValue } from 'ai/rsc'; - -export function Message({ textStream }: { textStream: StreamableValue }) { - const [text] = useStreamableValue(textStream); - - return
{text}
; -} diff --git a/examples/next-openai/app/stream-assistant-responses/page.tsx b/examples/next-openai/app/stream-assistant-responses/page.tsx deleted file mode 100644 index 8bbcf04bf0e9..000000000000 --- a/examples/next-openai/app/stream-assistant-responses/page.tsx +++ /dev/null @@ -1,64 +0,0 @@ -'use client'; - -import { useState } from 'react'; -import { ClientMessage } from './actions'; -import { useActions } from 'ai/rsc'; - -export default function Home() { - const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); - const { submitMessage } = useActions(); - - const handleSubmission = async () => { - setMessages(currentMessages => [ - ...currentMessages, - { - id: '123', - status: 'user.message.created', - text: input, - gui: null, - }, - ]); - - const response = await submitMessage(input); - setMessages(currentMessages => [...currentMessages, response]); - setInput(''); - }; - - return ( -
-
- setInput(event.target.value)} - placeholder="Ask a question" - onKeyDown={event => { - if (event.key === 'Enter') { - handleSubmission(); - } - }} - /> - -
- -
-
- {messages.map(message => ( -
-
-
{message.status}
-
-
{message.text}
-
- ))} -
-
-
- ); -} diff --git a/examples/nuxt-openai/pages/assistant/index.vue b/examples/nuxt-openai/pages/assistant/index.vue deleted file mode 100644 index c793f02fd00e..000000000000 --- a/examples/nuxt-openai/pages/assistant/index.vue +++ /dev/null @@ -1,83 +0,0 @@ - - - diff --git a/examples/nuxt-openai/server/api/assistant.ts b/examples/nuxt-openai/server/api/assistant.ts deleted file mode 100644 index 8c7cdea9a786..000000000000 --- a/examples/nuxt-openai/server/api/assistant.ts +++ /dev/null @@ -1,130 +0,0 @@ -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -type AssistantRequest = { - threadId: string | null; - message: string; -}; - -// Allow streaming responses up to 30 seconds -export const maxDuration = 30; - -export default defineLazyEventHandler(async () => { - // Validate the OpenAI API key and Assistant ID are set - const apiKey = useRuntimeConfig().openaiApiKey; - if (!apiKey) - throw new Error('Missing OpenAI API key, `NUXT_OPEN_API_KEY` not set'); - - const assistantId = useRuntimeConfig().assistantId; - if (!assistantId) - throw new Error('Missing Assistant ID, `NUXT_ASSISTANT_ID` not set'); - - // Create an OpenAI API client (that's edge friendly!) - const openai = new OpenAI({ apiKey }); - - const homeTemperatures = { - bedroom: 20, - 'home office': 21, - 'living room': 21, - kitchen: 22, - bathroom: 23, - }; - - return defineEventHandler(async (event: any) => { - const { threadId: userThreadId, message }: AssistantRequest = - await readBody(event); - - // Extract the signal from the H3 request if available - const signal = event?.web?.request?.signal; - - // Create a thread if needed - const threadId = userThreadId ?? (await openai.beta.threads.create({})).id; - - // Add a message to the thread - const createdMessage = await openai.beta.threads.messages.create( - threadId, - { - role: 'user', - content: message, - }, - { signal }, - ); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream, sendDataMessage }) => { - // Run the assistant on the thread - const runStream = openai.beta.threads.runs.stream( - threadId, - { assistant_id: assistantId }, - { signal }, - ); - - // forward run status would stream message deltas - let runResult = await forwardStream(runStream); - - // status can be: queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired - while ( - runResult?.status === 'requires_action' && - runResult?.required_action?.type === 'submit_tool_outputs' - ) { - // Process the required action to submit tool outputs - const tool_outputs = - runResult.required_action.submit_tool_outputs.tool_calls.map( - (toolCall: any) => { - const parameters = JSON.parse(toolCall.function.arguments); - - switch (toolCall.function.name) { - case 'getRoomTemperature': { - const room: keyof typeof homeTemperatures = parameters.room; - const temperature = homeTemperatures[room]; - - return { - tool_call_id: toolCall.id, - output: temperature.toString(), - }; - } - - case 'setRoomTemperature': { - const room: keyof typeof homeTemperatures = parameters.room; - const oldTemperature = homeTemperatures[room]; - - homeTemperatures[room] = parameters.temperature; - - sendDataMessage({ - role: 'data', - data: { - oldTemperature, - newTemperature: parameters.temperature, - description: `Temperature in the ${room} changed from ${oldTemperature} to ${parameters.temperature}`, - }, - }); - - return { - tool_call_id: toolCall.id, - output: 'Temperature set successfully', - }; - } - default: { - throw new Error( - `Unknown tool call function: ${toolCall.function.name}`, - ); - } - } - }, - ); - - // Submit the tool outputs - runResult = await forwardStream( - openai.beta.threads.runs.submitToolOutputsStream( - threadId, - runResult.id, - { tool_outputs }, - { signal }, - ), - ); - } - }, - ); - }); -}); diff --git a/packages/ai/core/index.ts b/packages/ai/core/index.ts index d23acab74658..2a55e5c1b3e7 100644 --- a/packages/ai/core/index.ts +++ b/packages/ai/core/index.ts @@ -2,23 +2,18 @@ export { createIdGenerator, generateId } from '@ai-sdk/provider-utils'; export type { IDGenerator } from '@ai-sdk/provider-utils'; export { - formatAssistantStreamPart, formatDataStreamPart, jsonSchema, - parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema, } from '@ai-sdk/ui-utils'; export type { - AssistantMessage, - AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, - DataMessage, DataStreamPart, DeepPartial, IdGenerator, @@ -28,7 +23,6 @@ export type { RequestOptions, Schema, ToolInvocation, - UseAssistantOptions, } from '@ai-sdk/ui-utils'; // directory exports: diff --git a/packages/ai/react/index.ts b/packages/ai/react/index.ts index aa335240a2b2..b3f79af12e0d 100644 --- a/packages/ai/react/index.ts +++ b/packages/ai/react/index.ts @@ -1,7 +1,6 @@ import { useChat as useChatReact, useCompletion as useCompletionReact, - useAssistant as useAssistantReact, experimental_useObject as experimental_useObjectReact, } from '@ai-sdk/react'; @@ -15,11 +14,6 @@ export const useChat = useChatReact; */ export const useCompletion = useCompletionReact; -/** - * @deprecated Use `@ai-sdk/react` instead. - */ -export const useAssistant = useAssistantReact; - /** * @deprecated Use `@ai-sdk/react` instead. */ diff --git a/packages/ai/streams/assistant-response.ts b/packages/ai/streams/assistant-response.ts deleted file mode 100644 index 1a22b8d98c4c..000000000000 --- a/packages/ai/streams/assistant-response.ts +++ /dev/null @@ -1,152 +0,0 @@ -import { - AssistantMessage, - DataMessage, - formatAssistantStreamPart, -} from '@ai-sdk/ui-utils'; - -/** -You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response. - */ -type AssistantResponseSettings = { - /** -The thread ID that the response is associated with. - */ - threadId: string; - - /** -The ID of the latest message that the response is associated with. - */ - messageId: string; -}; - -/** -The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client. - */ -type AssistantResponseCallback = (options: { - /** -Forwards an assistant message (non-streaming) to the client. - */ - sendMessage: (message: AssistantMessage) => void; - - /** -Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread. - */ - sendDataMessage: (message: DataMessage) => void; - - /** -Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action. - */ - forwardStream: (stream: any) => Promise; -}) => Promise; - -/** -The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`. -It is designed to facilitate streaming assistant responses to the `useAssistant` hook. -It receives an assistant thread and a current message, and can send messages and data messages to the client. - */ -export function AssistantResponse( - { threadId, messageId }: AssistantResponseSettings, - process: AssistantResponseCallback, -): Response { - const stream = new ReadableStream({ - async start(controller) { - const textEncoder = new TextEncoder(); - - const sendMessage = (message: AssistantMessage) => { - controller.enqueue( - textEncoder.encode( - formatAssistantStreamPart('assistant_message', message), - ), - ); - }; - - const sendDataMessage = (message: DataMessage) => { - controller.enqueue( - textEncoder.encode( - formatAssistantStreamPart('data_message', message), - ), - ); - }; - - const sendError = (errorMessage: string) => { - controller.enqueue( - textEncoder.encode(formatAssistantStreamPart('error', errorMessage)), - ); - }; - - const forwardStream = async (stream: any) => { - let result: any | undefined = undefined; - - for await (const value of stream) { - switch (value.event) { - case 'thread.message.created': { - controller.enqueue( - textEncoder.encode( - formatAssistantStreamPart('assistant_message', { - id: value.data.id, - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - ), - ); - break; - } - - case 'thread.message.delta': { - const content = value.data.delta.content?.[0]; - - if (content?.type === 'text' && content.text?.value != null) { - controller.enqueue( - textEncoder.encode( - formatAssistantStreamPart('text', content.text.value), - ), - ); - } - - break; - } - - case 'thread.run.completed': - case 'thread.run.requires_action': { - result = value.data; - break; - } - } - } - - return result; - }; - - // send the threadId and messageId as the first message: - controller.enqueue( - textEncoder.encode( - formatAssistantStreamPart('assistant_control_data', { - threadId, - messageId, - }), - ), - ); - - try { - await process({ - sendMessage, - sendDataMessage, - forwardStream, - }); - } catch (error) { - sendError((error as any).message ?? `${error}`); - } finally { - controller.close(); - } - }, - pull(controller) {}, - cancel() {}, - }); - - return new Response(stream, { - status: 200, - headers: { - 'Content-Type': 'text/plain; charset=utf-8', - }, - }); -} diff --git a/packages/ai/streams/index.ts b/packages/ai/streams/index.ts index 91eb3b5233bf..cc86718a4ff5 100644 --- a/packages/ai/streams/index.ts +++ b/packages/ai/streams/index.ts @@ -1,7 +1,6 @@ export * from '../core/index'; export * from '../errors/index'; -export * from './assistant-response'; export * as LangChainAdapter from './langchain-adapter'; export * as LlamaIndexAdapter from './llamaindex-adapter'; export * from './stream-data'; diff --git a/packages/react/README.md b/packages/react/README.md index 760c22b707e4..00fcb6f210de 100644 --- a/packages/react/README.md +++ b/packages/react/README.md @@ -4,4 +4,4 @@ - [`useChat`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-chat) hook - [`useCompletion`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-completion) hook -- [`useAssistant`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-assistant) hook +- [`useObject`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-object) hook diff --git a/packages/react/src/index.ts b/packages/react/src/index.ts index 4882a35c91d1..680df238066a 100644 --- a/packages/react/src/index.ts +++ b/packages/react/src/index.ts @@ -1,4 +1,3 @@ -export * from './use-assistant'; export * from './use-chat'; export * from './use-completion'; export * from './use-object'; diff --git a/packages/react/src/use-assistant.ts b/packages/react/src/use-assistant.ts deleted file mode 100644 index 0436adedc69f..000000000000 --- a/packages/react/src/use-assistant.ts +++ /dev/null @@ -1,285 +0,0 @@ -import { isAbortError } from '@ai-sdk/provider-utils'; -import { - AssistantStatus, - CreateMessage, - Message, - UseAssistantOptions, - generateId, - processAssistantStream, -} from '@ai-sdk/ui-utils'; -import { useCallback, useRef, useState } from 'react'; - -// use function to allow for mocking in tests: -const getOriginalFetch = () => fetch; - -export type UseAssistantHelpers = { - /** - * The current array of chat messages. - */ - messages: Message[]; - - /** - * Update the message store with a new array of messages. - */ - setMessages: React.Dispatch>; - - /** - * The current thread ID. - */ - threadId: string | undefined; - - /** - * Set the current thread ID. Specifying a thread ID will switch to that thread, if it exists. If set to 'undefined', a new thread will be created. For both cases, `threadId` will be updated with the new value and `messages` will be cleared. - */ - setThreadId: (threadId: string | undefined) => void; - - /** - * The current value of the input field. - */ - input: string; - - /** - * Append a user message to the chat list. This triggers the API call to fetch - * the assistant's response. - * @param message The message to append - * @param requestOptions Additional options to pass to the API call - */ - append: ( - message: Message | CreateMessage, - requestOptions?: { - data?: Record; - }, - ) => Promise; - - /** -Abort the current request immediately, keep the generated tokens if any. - */ - stop: () => void; - - /** - * setState-powered method to update the input value. - */ - setInput: React.Dispatch>; - - /** - * Handler for the `onChange` event of the input field to control the input's value. - */ - handleInputChange: ( - event: - | React.ChangeEvent - | React.ChangeEvent, - ) => void; - - /** - * Form submission handler that automatically resets the input field and appends a user message. - */ - submitMessage: ( - event?: React.FormEvent, - requestOptions?: { - data?: Record; - }, - ) => Promise; - - /** - * The current status of the assistant. This can be used to show a loading indicator. - */ - status: AssistantStatus; - - /** - * The error thrown during the assistant message processing, if any. - */ - error: undefined | Error; -}; - -export function useAssistant({ - api, - threadId: threadIdParam, - credentials, - headers, - body, - onError, - fetch, -}: UseAssistantOptions): UseAssistantHelpers { - const [messages, setMessages] = useState([]); - const [input, setInput] = useState(''); - const [currentThreadId, setCurrentThreadId] = useState( - undefined, - ); - const [status, setStatus] = useState('awaiting_message'); - const [error, setError] = useState(undefined); - - const handleInputChange = ( - event: - | React.ChangeEvent - | React.ChangeEvent, - ) => { - setInput(event.target.value); - }; - - // Abort controller to cancel the current API call. - const abortControllerRef = useRef(null); - - const stop = useCallback(() => { - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - abortControllerRef.current = null; - } - }, []); - - const append = async ( - message: Message | CreateMessage, - requestOptions?: { - data?: Record; - }, - ) => { - setStatus('in_progress'); - - setMessages(messages => [ - ...messages, - { - ...message, - id: message.id ?? generateId(), - }, - ]); - - setInput(''); - - const abortController = new AbortController(); - - try { - abortControllerRef.current = abortController; - - const actualFetch = fetch ?? getOriginalFetch(); - const response = await actualFetch(api, { - method: 'POST', - credentials, - signal: abortController.signal, - headers: { 'Content-Type': 'application/json', ...headers }, - body: JSON.stringify({ - ...body, - // always use user-provided threadId when available: - threadId: threadIdParam ?? currentThreadId ?? null, - message: message.content, - - // optional request data: - data: requestOptions?.data, - }), - }); - - if (!response.ok) { - throw new Error( - (await response.text()) ?? 'Failed to fetch the assistant response.', - ); - } - - if (response.body == null) { - throw new Error('The response body is empty.'); - } - - await processAssistantStream({ - stream: response.body, - onAssistantMessagePart(value) { - setMessages(messages => [ - ...messages, - { - id: value.id, - role: value.role, - content: value.content[0].text.value, - parts: [], - }, - ]); - }, - onTextPart(value) { - // text delta - add to last message: - setMessages(messages => { - const lastMessage = messages[messages.length - 1]; - return [ - ...messages.slice(0, messages.length - 1), - { - id: lastMessage.id, - role: lastMessage.role, - content: lastMessage.content + value, - parts: lastMessage.parts, - }, - ]; - }); - }, - onAssistantControlDataPart(value) { - setCurrentThreadId(value.threadId); - - // set id of last message: - setMessages(messages => { - const lastMessage = messages[messages.length - 1]; - lastMessage.id = value.messageId; - return [...messages.slice(0, messages.length - 1), lastMessage]; - }); - }, - onDataMessagePart(value) { - setMessages(messages => [ - ...messages, - { - id: value.id ?? generateId(), - role: 'data', - content: '', - data: value.data, - parts: [], - }, - ]); - }, - onErrorPart(value) { - setError(new Error(value)); - }, - }); - } catch (error) { - // Ignore abort errors as they are expected when the user cancels the request: - if (isAbortError(error) && abortController.signal.aborted) { - abortControllerRef.current = null; - return; - } - - if (onError && error instanceof Error) { - onError(error); - } - - setError(error as Error); - } finally { - abortControllerRef.current = null; - setStatus('awaiting_message'); - } - }; - - const submitMessage = async ( - event?: React.FormEvent, - requestOptions?: { - data?: Record; - }, - ) => { - event?.preventDefault?.(); - - if (input === '') { - return; - } - - append({ role: 'user', content: input, parts: [] }, requestOptions); - }; - - const setThreadId = (threadId: string | undefined) => { - setCurrentThreadId(threadId); - setMessages([]); - }; - - return { - append, - messages, - setMessages, - threadId: currentThreadId, - setThreadId, - input, - setInput, - handleInputChange, - submitMessage, - status, - error, - stop, - }; -} diff --git a/packages/react/src/use-assistant.ui.test.tsx b/packages/react/src/use-assistant.ui.test.tsx deleted file mode 100644 index e35f6593066b..000000000000 --- a/packages/react/src/use-assistant.ui.test.tsx +++ /dev/null @@ -1,320 +0,0 @@ -import { formatAssistantStreamPart } from '@ai-sdk/ui-utils'; -import { - mockFetchDataStream, - mockFetchDataStreamWithGenerator, - mockFetchError, -} from '@ai-sdk/ui-utils/test'; -import '@testing-library/jest-dom/vitest'; -import { findByText, screen } from '@testing-library/react'; -import userEvent from '@testing-library/user-event'; -import { setupTestComponent } from './setup-test-component'; -import { useAssistant } from './use-assistant'; - -describe('stream data stream', () => { - setupTestComponent(() => { - const { status, messages, error, append } = useAssistant({ - api: '/api/assistant', - }); - - return ( -
-
{status}
- {error &&
{error.toString()}
} - {messages.map((m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- ))} - -
- ); - }); - - it('should show streamed response', async () => { - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual( - JSON.stringify({ - threadId: null, - message: 'hi', - }), - ); - }); - - it('should show error response', async () => { - mockFetchError({ statusCode: 500, errorMessage: 'Internal Error' }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toHaveTextContent( - 'Error: Internal Error', - ); - }); - - describe('loading state', () => { - it('should show loading state', async () => { - let finishGeneration: ((value?: unknown) => void) | undefined; - const finishGenerationPromise = new Promise(resolve => { - finishGeneration = resolve; - }); - - mockFetchDataStreamWithGenerator({ - url: 'https://example.com/api/chat', - chunkGenerator: (async function* generate() { - const encoder = new TextEncoder(); - - yield encoder.encode( - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm1', - }), - ); - - yield encoder.encode( - formatAssistantStreamPart('assistant_message', { - id: 'm1', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - ); - - yield encoder.encode('0:"Hello"\n'); - - await finishGenerationPromise; - })(), - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('status'); - expect(screen.getByTestId('status')).toHaveTextContent('in_progress'); - - finishGeneration?.(); - - await findByText(await screen.findByTestId('status'), 'awaiting_message'); - expect(screen.getByTestId('status')).toHaveTextContent( - 'awaiting_message', - ); - }); - }); -}); - -describe('thread management', () => { - setupTestComponent(() => { - const { status, messages, error, append, setThreadId, threadId } = - useAssistant({ - api: '/api/assistant', - }); - - return ( -
-
{status}
-
{threadId || 'undefined'}
- {error &&
{error.toString()}
} - {messages.map((m, idx) => ( -
- {m.role === 'user' ? 'User: ' : 'AI: '} - {m.content} -
- ))} - -
- ); - }); - - it('create new thread', async () => { - await screen.findByTestId('thread-id'); - expect(screen.getByTestId('thread-id')).toHaveTextContent('undefined'); - }); - - it('should show streamed response', async () => { - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t0'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual( - JSON.stringify({ - threadId: null, - message: 'hi', - }), - ); - }); - - it('should switch to new thread on setting undefined threadId', async () => { - await userEvent.click(screen.getByTestId('do-new-thread')); - - expect(screen.queryByTestId('message-0')).toBeNull(); - expect(screen.queryByTestId('message-1')).toBeNull(); - - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't1', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t1'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual( - JSON.stringify({ - threadId: null, - message: 'hi', - }), - ); - }); - - it('should switch to thread on setting previously created threadId', async () => { - await userEvent.click(screen.getByTestId('do-thread-3')); - - expect(screen.queryByTestId('message-0')).toBeNull(); - expect(screen.queryByTestId('message-1')).toBeNull(); - - const { requestBody } = mockFetchDataStream({ - url: 'https://example.com/api/assistant', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't3', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }); - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t3'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await requestBody).toStrictEqual( - JSON.stringify({ - threadId: 't3', - message: 'hi', - }), - ); - }); -}); diff --git a/packages/ui-utils/src/assistant-stream-parts.test.ts b/packages/ui-utils/src/assistant-stream-parts.test.ts deleted file mode 100644 index 11e5559622e0..000000000000 --- a/packages/ui-utils/src/assistant-stream-parts.test.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { - formatAssistantStreamPart, - parseAssistantStreamPart, -} from './assistant-stream-parts'; - -describe('text stream part', () => { - it('should format a text stream part', () => { - expect(formatAssistantStreamPart('text', 'value\nvalue')).toEqual( - '0:"value\\nvalue"\n', - ); - }); - - it('should parse a text line', () => { - const input = '0:"Hello, world!"'; - expect(parseAssistantStreamPart(input)).toEqual({ - type: 'text', - value: 'Hello, world!', - }); - }); -}); diff --git a/packages/ui-utils/src/assistant-stream-parts.ts b/packages/ui-utils/src/assistant-stream-parts.ts deleted file mode 100644 index 959378b94f3e..000000000000 --- a/packages/ui-utils/src/assistant-stream-parts.ts +++ /dev/null @@ -1,220 +0,0 @@ -import { AssistantMessage, DataMessage, JSONValue } from './types'; - -export type AssistantStreamString = - `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`; - -export interface AssistantStreamPart< - CODE extends string, - NAME extends string, - TYPE, -> { - code: CODE; - name: NAME; - parse: (value: JSONValue) => { type: NAME; value: TYPE }; -} - -const textStreamPart: AssistantStreamPart<'0', 'text', string> = { - code: '0', - name: 'text', - parse: (value: JSONValue) => { - if (typeof value !== 'string') { - throw new Error('"text" parts expect a string value.'); - } - return { type: 'text', value }; - }, -}; - -const errorStreamPart: AssistantStreamPart<'3', 'error', string> = { - code: '3', - name: 'error', - parse: (value: JSONValue) => { - if (typeof value !== 'string') { - throw new Error('"error" parts expect a string value.'); - } - return { type: 'error', value }; - }, -}; - -const assistantMessageStreamPart: AssistantStreamPart< - '4', - 'assistant_message', - AssistantMessage -> = { - code: '4', - name: 'assistant_message', - parse: (value: JSONValue) => { - if ( - value == null || - typeof value !== 'object' || - !('id' in value) || - !('role' in value) || - !('content' in value) || - typeof value.id !== 'string' || - typeof value.role !== 'string' || - value.role !== 'assistant' || - !Array.isArray(value.content) || - !value.content.every( - item => - item != null && - typeof item === 'object' && - 'type' in item && - item.type === 'text' && - 'text' in item && - item.text != null && - typeof item.text === 'object' && - 'value' in item.text && - typeof item.text.value === 'string', - ) - ) { - throw new Error( - '"assistant_message" parts expect an object with an "id", "role", and "content" property.', - ); - } - - return { - type: 'assistant_message', - value: value as AssistantMessage, - }; - }, -}; - -const assistantControlDataStreamPart: AssistantStreamPart< - '5', - 'assistant_control_data', - { - threadId: string; - messageId: string; - } -> = { - code: '5', - name: 'assistant_control_data', - parse: (value: JSONValue) => { - if ( - value == null || - typeof value !== 'object' || - !('threadId' in value) || - !('messageId' in value) || - typeof value.threadId !== 'string' || - typeof value.messageId !== 'string' - ) { - throw new Error( - '"assistant_control_data" parts expect an object with a "threadId" and "messageId" property.', - ); - } - - return { - type: 'assistant_control_data', - value: { - threadId: value.threadId, - messageId: value.messageId, - }, - }; - }, -}; - -const dataMessageStreamPart: AssistantStreamPart< - '6', - 'data_message', - DataMessage -> = { - code: '6', - name: 'data_message', - parse: (value: JSONValue) => { - if ( - value == null || - typeof value !== 'object' || - !('role' in value) || - !('data' in value) || - typeof value.role !== 'string' || - value.role !== 'data' - ) { - throw new Error( - '"data_message" parts expect an object with a "role" and "data" property.', - ); - } - - return { - type: 'data_message', - value: value as DataMessage, - }; - }, -}; - -const assistantStreamParts = [ - textStreamPart, - errorStreamPart, - assistantMessageStreamPart, - assistantControlDataStreamPart, - dataMessageStreamPart, -] as const; - -type AssistantStreamParts = - | typeof textStreamPart - | typeof errorStreamPart - | typeof assistantMessageStreamPart - | typeof assistantControlDataStreamPart - | typeof dataMessageStreamPart; - -type AssistantStreamPartValueType = { - [P in AssistantStreamParts as P['name']]: ReturnType['value']; -}; - -export type AssistantStreamPartType = - | ReturnType - | ReturnType - | ReturnType - | ReturnType - | ReturnType; - -export const assistantStreamPartsByCode = { - [textStreamPart.code]: textStreamPart, - [errorStreamPart.code]: errorStreamPart, - [assistantMessageStreamPart.code]: assistantMessageStreamPart, - [assistantControlDataStreamPart.code]: assistantControlDataStreamPart, - [dataMessageStreamPart.code]: dataMessageStreamPart, -} as const; - -export const StreamStringPrefixes = { - [textStreamPart.name]: textStreamPart.code, - [errorStreamPart.name]: errorStreamPart.code, - [assistantMessageStreamPart.name]: assistantMessageStreamPart.code, - [assistantControlDataStreamPart.name]: assistantControlDataStreamPart.code, - [dataMessageStreamPart.name]: dataMessageStreamPart.code, -} as const; - -export const validCodes = assistantStreamParts.map(part => part.code); - -export const parseAssistantStreamPart = ( - line: string, -): AssistantStreamPartType => { - const firstSeparatorIndex = line.indexOf(':'); - - if (firstSeparatorIndex === -1) { - throw new Error('Failed to parse stream string. No separator found.'); - } - - const prefix = line.slice(0, firstSeparatorIndex); - - if (!validCodes.includes(prefix as keyof typeof assistantStreamPartsByCode)) { - throw new Error(`Failed to parse stream string. Invalid code ${prefix}.`); - } - - const code = prefix as keyof typeof assistantStreamPartsByCode; - - const textValue = line.slice(firstSeparatorIndex + 1); - const jsonValue: JSONValue = JSON.parse(textValue); - - return assistantStreamPartsByCode[code].parse(jsonValue); -}; - -export function formatAssistantStreamPart< - T extends keyof AssistantStreamPartValueType, ->(type: T, value: AssistantStreamPartValueType[T]): AssistantStreamString { - const streamPart = assistantStreamParts.find(part => part.name === type); - - if (!streamPart) { - throw new Error(`Invalid stream part type: ${type}`); - } - - return `${streamPart.code}:${JSON.stringify(value)}\n`; -} diff --git a/packages/ui-utils/src/index.ts b/packages/ui-utils/src/index.ts index 12ba2071ccb6..308df0be4d3e 100644 --- a/packages/ui-utils/src/index.ts +++ b/packages/ui-utils/src/index.ts @@ -5,14 +5,6 @@ export { generateId } from '@ai-sdk/provider-utils'; // Export stream data utilities for custom stream implementations, // both on the client and server side. // NOTE: this is experimental / internal and may change without notice -export { - formatAssistantStreamPart, - parseAssistantStreamPart, -} from './assistant-stream-parts'; -export type { - AssistantStreamPart, - AssistantStreamString, -} from './assistant-stream-parts'; export { callChatApi } from './call-chat-api'; export { callCompletionApi } from './call-completion-api'; export { formatDataStreamPart, parseDataStreamPart } from './data-stream-parts'; @@ -25,7 +17,6 @@ export { getMessageParts } from './get-message-parts'; export { isDeepEqualData } from './is-deep-equal-data'; export { parsePartialJson } from './parse-partial-json'; export { prepareAttachmentsForRequest } from './prepare-attachments-for-request'; -export { processAssistantStream } from './process-assistant-stream'; export { processDataStream } from './process-data-stream'; export { processTextStream } from './process-text-stream'; export { asSchema, jsonSchema } from './schema'; diff --git a/packages/ui-utils/src/process-assistant-stream.test.ts b/packages/ui-utils/src/process-assistant-stream.test.ts deleted file mode 100644 index 91ff5ec6efe7..000000000000 --- a/packages/ui-utils/src/process-assistant-stream.test.ts +++ /dev/null @@ -1,305 +0,0 @@ -import { describe, expect, it, vi } from 'vitest'; -import { AssistantStreamPartType } from './assistant-stream-parts'; -import { processAssistantStream } from './process-assistant-stream'; - -function createReadableStream( - chunks: Uint8Array[], -): ReadableStream { - return new ReadableStream({ - start(controller) { - chunks.forEach(chunk => controller.enqueue(chunk)); - controller.close(); - }, - }); -} - -function encodeText(text: string): Uint8Array { - return new TextEncoder().encode(text); -} - -describe('processAssistantStream', () => { - // Basic Functionality Tests - it('should process a simple text stream part', async () => { - const chunks = [encodeText('0:"Hello"\n')]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ - type: 'text', - value: 'Hello', - }); - }); - - it('should handle multiple stream parts in sequence', async () => { - const chunks = [encodeText('0:"Hello"\n3:"error"\n')]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - onErrorPart: value => { - receivedParts.push({ type: 'error', value }); - }, - }); - - expect(receivedParts).toHaveLength(2); - expect(receivedParts[0]).toEqual({ type: 'text', value: 'Hello' }); - expect(receivedParts[1]).toEqual({ type: 'error', value: 'error' }); - }); - - it('should handle assistant message parts', async () => { - const assistantMessage = { - id: 'msg_123', - role: 'assistant', - content: [{ type: 'text', text: { value: 'Hello' } }], - }; - const chunks = [encodeText(`4:${JSON.stringify(assistantMessage)}\n`)]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onAssistantMessagePart: value => { - receivedParts.push({ type: 'assistant_message', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ - type: 'assistant_message', - value: assistantMessage, - }); - }); - - // Edge Environment Specific Tests - it('should handle chunks that split JSON values', async () => { - const chunks = [encodeText('0:"Hel'), encodeText('lo"\n')]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ type: 'text', value: 'Hello' }); - }); - - it('should handle chunks that split at newlines', async () => { - const chunks = [encodeText('0:"Hello"\n'), encodeText('0:"World"\n')]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(2); - expect(receivedParts[0]).toEqual({ type: 'text', value: 'Hello' }); - expect(receivedParts[1]).toEqual({ type: 'text', value: 'World' }); - }); - - it('should handle chunks that split unicode characters', async () => { - const emoji = '👋'; - const encoded = encodeText(`0:"Hello ${emoji}"\n`); - const splitPoint = encoded.length - 3; // Split in the middle of emoji bytes - - const chunks = [encoded.slice(0, splitPoint), encoded.slice(splitPoint)]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ type: 'text', value: `Hello ${emoji}` }); - }); - - // Error Cases - it('should throw on malformed JSON', async () => { - const chunks = [encodeText('0:{malformed]]\n')]; - const stream = createReadableStream(chunks); - - await expect( - processAssistantStream({ - stream, - onTextPart: async () => {}, - }), - ).rejects.toThrow(); - }); - - it('should throw on invalid stream part codes', async () => { - const chunks = [encodeText('x:"invalid"\n')]; - const stream = createReadableStream(chunks); - - await expect( - processAssistantStream({ - stream, - onTextPart: async () => {}, - }), - ).rejects.toThrow('Invalid code'); - }); - - // Edge Cases - it('should handle control data parts', async () => { - const controlData = { - threadId: 'thread_123', - messageId: 'msg_123', - }; - const chunks = [encodeText(`5:${JSON.stringify(controlData)}\n`)]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onAssistantControlDataPart: value => { - receivedParts.push({ type: 'assistant_control_data', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ - type: 'assistant_control_data', - value: controlData, - }); - }); - - it('should handle data message parts', async () => { - const dataMessage = { - role: 'data', - data: { some: 'data' }, - }; - const chunks = [encodeText(`6:${JSON.stringify(dataMessage)}\n`)]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onDataMessagePart: value => { - receivedParts.push({ type: 'data_message', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ - type: 'data_message', - value: dataMessage, - }); - }); - - it('should handle empty chunks', async () => { - const chunks = [ - new Uint8Array(0), - encodeText('0:"Hello"\n'), - new Uint8Array(0), - encodeText('0:"World"\n'), - ]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(2); - expect(receivedParts[0]).toEqual({ type: 'text', value: 'Hello' }); - expect(receivedParts[1]).toEqual({ type: 'text', value: 'World' }); - }); - - it('should handle very large messages', async () => { - const largeString = 'x'.repeat(1024 * 1024); // 1MB string - const chunks = [encodeText(`0:"${largeString}"\n`)]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(1); - expect(receivedParts[0]).toEqual({ type: 'text', value: largeString }); - }); - - it('should handle multiple newlines', async () => { - const chunks = [encodeText('0:"Hello"\n\n0:"World"\n')]; - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(2); - expect(receivedParts[0]).toEqual({ type: 'text', value: 'Hello' }); - expect(receivedParts[1]).toEqual({ type: 'text', value: 'World' }); - }); - - // Cleanup and Resource Management - it('should properly release reader resources', async () => { - const mockRelease = vi.fn(); - const stream = new ReadableStream({ - start(controller) { - controller.enqueue(encodeText('0:"Hello"\n')); - controller.close(); - }, - cancel: mockRelease, - }); - - await processAssistantStream({ - stream, - onTextPart: async () => {}, - }); - - expect(mockRelease).not.toHaveBeenCalled(); - }); - - // Concurrency Tests - it('should handle rapid stream processing', async () => { - const parts = Array.from({ length: 100 }, (_, i) => `0:"Message ${i}"\n`); - const chunks = parts.map(encodeText); - const stream = createReadableStream(chunks); - const receivedParts: AssistantStreamPartType[] = []; - - await processAssistantStream({ - stream, - onTextPart: value => { - receivedParts.push({ type: 'text', value }); - }, - }); - - expect(receivedParts).toHaveLength(100); - receivedParts.forEach((part, i) => { - expect(part).toEqual({ type: 'text', value: `Message ${i}` }); - }); - }); -}); diff --git a/packages/ui-utils/src/process-assistant-stream.ts b/packages/ui-utils/src/process-assistant-stream.ts deleted file mode 100644 index 98cb779a5f48..000000000000 --- a/packages/ui-utils/src/process-assistant-stream.ts +++ /dev/null @@ -1,108 +0,0 @@ -import { - AssistantStreamPartType, - parseAssistantStreamPart, -} from './assistant-stream-parts'; - -const NEWLINE = '\n'.charCodeAt(0); - -// concatenates all the chunks into a single Uint8Array -function concatChunks(chunks: Uint8Array[], totalLength: number) { - const concatenatedChunks = new Uint8Array(totalLength); - - let offset = 0; - for (const chunk of chunks) { - concatenatedChunks.set(chunk, offset); - offset += chunk.length; - } - chunks.length = 0; - - return concatenatedChunks; -} - -export async function processAssistantStream({ - stream, - onTextPart, - onErrorPart, - onAssistantMessagePart, - onAssistantControlDataPart, - onDataMessagePart, -}: { - stream: ReadableStream; - onTextPart?: ( - streamPart: (AssistantStreamPartType & { type: 'text' })['value'], - ) => Promise | void; - onErrorPart?: ( - streamPart: (AssistantStreamPartType & { type: 'error' })['value'], - ) => Promise | void; - onAssistantMessagePart?: ( - streamPart: (AssistantStreamPartType & { - type: 'assistant_message'; - })['value'], - ) => Promise | void; - onAssistantControlDataPart?: ( - streamPart: (AssistantStreamPartType & { - type: 'assistant_control_data'; - })['value'], - ) => Promise | void; - onDataMessagePart?: ( - streamPart: (AssistantStreamPartType & { type: 'data_message' })['value'], - ) => Promise | void; -}): Promise { - // implementation note: this slightly more complex algorithm is required - // to pass the tests in the edge environment. - - const reader = stream.getReader(); - const decoder = new TextDecoder(); - const chunks: Uint8Array[] = []; - let totalLength = 0; - - while (true) { - const { value } = await reader.read(); - - if (value) { - chunks.push(value); - totalLength += value.length; - if (value[value.length - 1] !== NEWLINE) { - // if the last character is not a newline, we have not read the whole JSON value - continue; - } - } - - if (chunks.length === 0) { - break; // we have reached the end of the stream - } - - const concatenatedChunks = concatChunks(chunks, totalLength); - totalLength = 0; - - const streamParts = decoder - .decode(concatenatedChunks, { stream: true }) - .split('\n') - .filter(line => line !== '') - .map(parseAssistantStreamPart); - - for (const { type, value } of streamParts) { - switch (type) { - case 'text': - await onTextPart?.(value); - break; - case 'error': - await onErrorPart?.(value); - break; - case 'assistant_message': - await onAssistantMessagePart?.(value); - break; - case 'assistant_control_data': - await onAssistantControlDataPart?.(value); - break; - case 'data_message': - await onDataMessagePart?.(value); - break; - default: { - const exhaustiveCheck: never = type; - throw new Error(`Unknown stream part type: ${exhaustiveCheck}`); - } - } - } - } -} diff --git a/packages/ui-utils/src/types.ts b/packages/ui-utils/src/types.ts index 4ed0ca967e0e..2c08bc41a0bb 100644 --- a/packages/ui-utils/src/types.ts +++ b/packages/ui-utils/src/types.ts @@ -5,8 +5,6 @@ import { import { FetchFunction, ToolCall, ToolResult } from '@ai-sdk/provider-utils'; import { LanguageModelUsage } from './duplicated/usage'; -export * from './use-assistant-types'; - export type IdGenerator = () => string; /** @@ -477,27 +475,3 @@ export type JSONValue = | boolean | { [value: string]: JSONValue } | Array; - -export type AssistantMessage = { - id: string; - role: 'assistant'; - content: Array<{ - type: 'text'; - text: { - value: string; - }; - }>; -}; - -/* - * A data message is an application-specific message from the assistant - * that should be shown in order with the other messages. - * - * It can trigger other operations on the frontend, such as annotating - * a map. - */ -export type DataMessage = { - id?: string; // optional id, implement if needed (e.g. for persistence) - role: 'data'; - data: JSONValue; // application-specific data -}; diff --git a/packages/ui-utils/src/use-assistant-types.ts b/packages/ui-utils/src/use-assistant-types.ts deleted file mode 100644 index 1688b9a216dd..000000000000 --- a/packages/ui-utils/src/use-assistant-types.ts +++ /dev/null @@ -1,46 +0,0 @@ -import { FetchFunction } from '@ai-sdk/provider-utils'; - -// Define a type for the assistant status -export type AssistantStatus = 'in_progress' | 'awaiting_message'; - -export type UseAssistantOptions = { - /** - * The API endpoint that accepts a `{ threadId: string | null; message: string; }` object and returns an `AssistantResponse` stream. - * The threadId refers to an existing thread with messages (or is `null` to create a new thread). - * The message is the next message that should be appended to the thread and sent to the assistant. - */ - api: string; - - /** - * An optional string that represents the ID of an existing thread. - * If not provided, a new thread will be created. - */ - threadId?: string; - - /** - * An optional literal that sets the mode of credentials to be used on the request. - * Defaults to "same-origin". - */ - credentials?: RequestCredentials; - - /** - * An optional object of headers to be passed to the API endpoint. - */ - headers?: Record | Headers; - - /** - * An optional, additional body object to be passed to the API endpoint. - */ - body?: object; - - /** - * An optional callback that will be called when the assistant encounters an error. - */ - onError?: (error: Error) => void; - - /** -Custom fetch implementation. You can use it as a middleware to intercept requests, -or to provide a custom fetch implementation for e.g. testing. - */ - fetch?: FetchFunction; -}; diff --git a/packages/vue/README.md b/packages/vue/README.md index b247be9b2aea..73375c0fc9f6 100644 --- a/packages/vue/README.md +++ b/packages/vue/README.md @@ -4,4 +4,3 @@ - [`useChat`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-chat) composable - [`useCompletion`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-completion) composable -- [`useAssistant`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-assistant) composable diff --git a/packages/vue/src/TestChatAssistantStreamComponent.vue b/packages/vue/src/TestChatAssistantStreamComponent.vue deleted file mode 100644 index 5c05b1b72a69..000000000000 --- a/packages/vue/src/TestChatAssistantStreamComponent.vue +++ /dev/null @@ -1,25 +0,0 @@ - - - diff --git a/packages/vue/src/TestChatAssistantThreadChangeComponent.vue b/packages/vue/src/TestChatAssistantThreadChangeComponent.vue deleted file mode 100644 index df55314fb9c6..000000000000 --- a/packages/vue/src/TestChatAssistantThreadChangeComponent.vue +++ /dev/null @@ -1,31 +0,0 @@ - - - diff --git a/packages/vue/src/index.ts b/packages/vue/src/index.ts index 6a1f7b97f61c..2e5c55377ea3 100644 --- a/packages/vue/src/index.ts +++ b/packages/vue/src/index.ts @@ -1,3 +1,2 @@ export * from './use-chat'; export * from './use-completion'; -export * from './use-assistant'; diff --git a/packages/vue/src/use-assistant.ts b/packages/vue/src/use-assistant.ts deleted file mode 100644 index d1b513c1a641..000000000000 --- a/packages/vue/src/use-assistant.ts +++ /dev/null @@ -1,286 +0,0 @@ -/** - * A vue.js composable function to interact with the assistant API. - */ - -import { isAbortError } from '@ai-sdk/provider-utils'; -import type { - AssistantStatus, - CreateMessage, - Message, - UseAssistantOptions, -} from '@ai-sdk/ui-utils'; -import { generateId, processAssistantStream } from '@ai-sdk/ui-utils'; -import type { ComputedRef, Ref } from 'vue'; -import { computed, readonly, ref } from 'vue'; - -export type UseAssistantHelpers = { - /** - * The current array of chat messages. - */ - messages: Ref; - - /** - * Update the message store with a new array of messages. - */ - setMessages: (messagesProcessor: (messages: Message[]) => Message[]) => void; - - /** - * The current thread ID. - */ - threadId: Ref; - - /** - * Set the current thread ID. Specifying a thread ID will switch to that thread, if it exists. If set to 'undefined', a new thread will be created. For both cases, `threadId` will be updated with the new value and `messages` will be cleared. - */ - setThreadId: (threadId: string | undefined) => void; - /** - * The current value of the input field. - */ - input: Ref; - - /** - * Append a user message to the chat list. This triggers the API call to fetch - * the assistant's response. - * @param message The message to append - * @param requestOptions Additional options to pass to the API call - */ - append: ( - message: Message | CreateMessage, - requestOptions?: { - data?: Record; - }, - ) => Promise; - - /** - * Abort the current request immediately, keep the generated tokens if any. - */ - stop: ComputedRef<() => void>; - - /** - * Handler for the `onChange` event of the input field to control the input's value. - */ - handleInputChange: (e: Event & { target: HTMLInputElement }) => void; - - /** - * Handler for the `onSubmit` event of the form to append a user message and reset the input. - */ - handleSubmit: (e: Event & { target: HTMLFormElement }) => void; - - /** - * Whether the assistant is currently sending a message. - */ - isSending: ComputedRef; - - /** - * The current status of the assistant. - */ - status: Ref; - - /** - * The current error, if any. - */ - error: Ref; -}; - -export function useAssistant({ - api, - threadId: threadIdParam, - credentials, - headers, - body, - onError, -}: UseAssistantOptions): UseAssistantHelpers { - const messages: Ref = ref([]); - const input: Ref = ref(''); - const currentThreadId: Ref = ref(undefined); - const status: Ref = ref('awaiting_message'); - const error: Ref = ref(undefined); - - const setMessages = (messageFactory: (messages: Message[]) => Message[]) => { - messages.value = messageFactory(messages.value); - }; - - const setCurrentThreadId = (newThreadId: string | undefined) => { - currentThreadId.value = newThreadId; - messages.value = []; - }; - - const handleInputChange = (event: Event & { target: HTMLInputElement }) => { - input.value = event?.target?.value; - }; - - const isSending = computed(() => status.value === 'in_progress'); - - // Abort controller to cancel the current API call when required - const abortController = ref(null); - - // memoized function to stop the current request when required - const stop = computed(() => { - return () => { - if (abortController.value) { - abortController.value.abort(); - abortController.value = null; - } - }; - }); - - const append = async ( - message: Message | CreateMessage, - requestOptions?: { - data?: Record; - }, - ) => { - status.value = 'in_progress'; - - // Append the new message to the current list of messages - const newMessage: Message = { - ...message, - id: message.id ?? generateId(), - }; - - // Update the messages list with the new message - setMessages(messages => [...messages, newMessage]); - - input.value = ''; - - const controller = new AbortController(); - - try { - // Assign the new controller to the abortController ref - abortController.value = controller; - - const response = await fetch(api, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...headers, - }, - body: JSON.stringify({ - ...body, - // Message Content - message: message.content, - - // Always Use User Provided Thread ID When Available - threadId: threadIdParam ?? currentThreadId.value ?? null, - - // Optional Request Data - ...(requestOptions?.data && { data: requestOptions?.data }), - }), - signal: controller.signal, - credentials, - }); - - if (!response.ok) { - throw new Error( - response.statusText ?? 'An error occurred while sending the message', - ); - } - - if (!response.body) { - throw new Error('The response body is empty'); - } - - await processAssistantStream({ - stream: response.body, - onAssistantMessagePart(value) { - messages.value = [ - ...messages.value, - { - id: value.id, - content: value.content[0].text.value, - role: value.role, - parts: [], - }, - ]; - }, - onTextPart(value) { - setMessages(messages => { - const lastMessage = messages[messages.length - 1]; - lastMessage.content += value; - - return [...messages.slice(0, -1), lastMessage]; - }); - }, - onAssistantControlDataPart(value) { - if (value.threadId) { - currentThreadId.value = value.threadId; - } - - setMessages(messages => { - const lastMessage = messages[messages.length - 1]; - lastMessage.id = value.messageId; - - return [...messages.slice(0, -1), lastMessage]; - }); - }, - onDataMessagePart(value) { - setMessages(messages => [ - ...messages, - { - id: value.id ?? generateId(), - role: 'data', - content: '', - data: value.data, - parts: [], - }, - ]); - }, - onErrorPart(value) { - error.value = new Error(value); - }, - }); - } catch (err) { - // If the error is an AbortError and the signal is aborted, reset the abortController and do nothing. - if (isAbortError(err) && abortController.value?.signal.aborted) { - abortController.value = null; - return; - } - - // If an error handler is provided, call it with the error - if (onError && err instanceof Error) { - onError(err); - } - - error.value = err as Error; - } finally { - // Reset the status to 'awaiting_message' after the request is complete - abortController.value = null; - status.value = 'awaiting_message'; - } - }; - - const submitMessage = async ( - event: Event & { target: HTMLFormElement }, - requestOptions?: { - data?: Record; - }, - ) => { - event?.preventDefault?.(); - - if (!input.value) return; - - append( - { - role: 'user', - content: input.value, - parts: [], - }, - requestOptions, - ); - }; - - return { - append, - messages, - setMessages, - threadId: readonly(currentThreadId), - setThreadId: setCurrentThreadId, - input, - handleInputChange, - handleSubmit: submitMessage, - isSending, - status, - error, - stop, - }; -} diff --git a/packages/vue/src/use-assistant.ui.test.tsx b/packages/vue/src/use-assistant.ui.test.tsx deleted file mode 100644 index 0f06227ce41f..000000000000 --- a/packages/vue/src/use-assistant.ui.test.tsx +++ /dev/null @@ -1,243 +0,0 @@ -import { formatAssistantStreamPart } from '@ai-sdk/ui-utils'; -import { - createTestServer, - TestResponseController, -} from '@ai-sdk/provider-utils/test'; -import '@testing-library/jest-dom/vitest'; -import userEvent from '@testing-library/user-event'; -import { cleanup, findByText, render, screen } from '@testing-library/vue'; -import TestChatAssistantStreamComponent from './TestChatAssistantStreamComponent.vue'; -import TestChatAssistantThreadChangeComponent from './TestChatAssistantThreadChangeComponent.vue'; -import { setupTestComponent } from './setup-test-component'; - -const server = createTestServer({ - '/api/assistant': {}, -}); - -describe('stream data stream', () => { - setupTestComponent(TestChatAssistantStreamComponent); - - it('should show streamed response', async () => { - server.urls['/api/assistant'].response = { - type: 'stream-chunks', - chunks: [ - // Format the stream part - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // Text parts - '0:"Hello"\n', - '0:", world"\n', - '0:"."\n', - ], - }; - - // Click the button - await userEvent.click(screen.getByTestId('do-append')); - - // Find the message-0 element - await screen.findByTestId('message-0'); - // Expect the message-0 element to have the text content 'User: hi' - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - // Find the message-1 element - await screen.findByTestId('message-1'); - // Expect the message-1 element to have the text content 'AI: Hello, world.' - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - expect(await server.calls[0].requestBody).toStrictEqual({ - message: 'hi', - threadId: null, - }); - }); - - describe('loading state', () => { - it('should show loading state', async () => { - const controller = new TestResponseController(); - server.urls['/api/assistant'].response = { - type: 'controlled-stream', - controller, - }; - - await userEvent.click(screen.getByTestId('do-append')); - - // Find the loading element and expect it to be in progress - await screen.findByTestId('status'); - expect(screen.getByTestId('status')).toHaveTextContent('in_progress'); - - controller.write( - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm1', - }), - ); - - controller.write( - formatAssistantStreamPart('assistant_message', { - id: 'm1', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - ); - - controller.write('0:"Hello"\n'); - controller.close(); - - await findByText(await screen.findByTestId('status'), 'awaiting_message'); - expect(screen.getByTestId('status')).toHaveTextContent( - 'awaiting_message', - ); - }); - }); -}); - -describe('Thread management', () => { - beforeEach(() => { - render(TestChatAssistantThreadChangeComponent); - }); - - afterEach(() => { - vi.restoreAllMocks(); - cleanup(); - }); - - it('create new thread', async () => { - await screen.findByTestId('thread-id'); - expect(screen.getByTestId('thread-id')).toHaveTextContent('undefined'); - }); - - it('should show streamed response', async () => { - server.urls['/api/assistant'].response = { - type: 'stream-chunks', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't0', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }; - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t0'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - expect(await server.calls[0].requestBody).toStrictEqual({ - message: 'hi', - threadId: null, - }); - }); - - it('should switch to new thread on setting undefined threadId', async () => { - await userEvent.click(screen.getByTestId('do-new-thread')); - - expect(screen.queryByTestId('message-0')).toBeNull(); - expect(screen.queryByTestId('message-1')).toBeNull(); - - server.urls['/api/assistant'].response = { - type: 'stream-chunks', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't1', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }; - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t1'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - // check that correct information was sent to the server: - expect(await server.calls[0].requestBody).toStrictEqual({ - message: 'hi', - threadId: null, - }); - }); - - it('should switch to thread on setting previously created threadId', async () => { - await userEvent.click(screen.getByTestId('do-thread-3')); - - expect(screen.queryByTestId('message-0')).toBeNull(); - expect(screen.queryByTestId('message-1')).toBeNull(); - - server.urls['/api/assistant'].response = { - type: 'stream-chunks', - chunks: [ - formatAssistantStreamPart('assistant_control_data', { - threadId: 't3', - messageId: 'm0', - }), - formatAssistantStreamPart('assistant_message', { - id: 'm0', - role: 'assistant', - content: [{ type: 'text', text: { value: '' } }], - }), - // text parts: - '0:"Hello"\n', - '0:","\n', - '0:" world"\n', - '0:"."\n', - ], - }; - - await userEvent.click(screen.getByTestId('do-append')); - - await screen.findByTestId('message-0'); - expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); - - expect(screen.getByTestId('thread-id')).toHaveTextContent('t3'); - - await screen.findByTestId('message-1'); - expect(screen.getByTestId('message-1')).toHaveTextContent( - 'AI: Hello, world.', - ); - - expect(await server.calls[0].requestBody).toStrictEqual({ - message: 'hi', - threadId: 't3', - }); - }); -}); From 75f11ccf24f62de10930ff3f4aa605ea3bd52070 Mon Sep 17 00:00:00 2001 From: Dylan Mozlowski Date: Wed, 2 Apr 2025 23:10:16 -0700 Subject: [PATCH 0005/1307] docs: fix typo (#5521) --- content/cookbook/05-node/51-call-tools-in-parallel.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/cookbook/05-node/51-call-tools-in-parallel.mdx b/content/cookbook/05-node/51-call-tools-in-parallel.mdx index 960884645d7c..e1b9cb8585ac 100644 --- a/content/cookbook/05-node/51-call-tools-in-parallel.mdx +++ b/content/cookbook/05-node/51-call-tools-in-parallel.mdx @@ -1,5 +1,5 @@ --- -title: Call Tools in Parallels +title: Call Tools in Parallel description: Learn how to call tools in parallel using the AI SDK and Node tags: ['node', 'tool use'] --- From 5066589affffff8b7ef7ed94a173d772705383c6 Mon Sep 17 00:00:00 2001 From: Amagi Date: Thu, 3 Apr 2025 14:10:31 +0800 Subject: [PATCH 0006/1307] docs: fix highlights in node.js guide (#5519) --- content/docs/02-getting-started/06-nodejs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/content/docs/02-getting-started/06-nodejs.mdx b/content/docs/02-getting-started/06-nodejs.mdx index a6f0b945768c..f5ec7edce2de 100644 --- a/content/docs/02-getting-started/06-nodejs.mdx +++ b/content/docs/02-getting-started/06-nodejs.mdx @@ -149,7 +149,7 @@ Let's enhance your chatbot by adding a simple weather tool. Modify your `index.ts` file to include the new weather tool: -```ts filename="index.ts" highlight="2,4,25-36" +```ts filename="index.ts" highlight="2,4,25-38" import { openai } from '@ai-sdk/openai'; import { CoreMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; @@ -289,7 +289,7 @@ To solve this, you can enable multi-step tool calls using `maxSteps`. This featu Modify your `index.ts` file to include the `maxSteps` option: -```ts filename="index.ts" highlight="37-40" +```ts filename="index.ts" highlight="39-42" import { openai } from '@ai-sdk/openai'; import { CoreMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; @@ -362,7 +362,7 @@ By setting `maxSteps` to 5, you're allowing the model to use up to 5 "steps" for Update your `index.ts` file to add a new tool to convert the temperature from Celsius to Fahrenheit: -```ts filename="index.ts" highlight="36-45" +```ts filename="index.ts" highlight="38-49" import { openai } from '@ai-sdk/openai'; import { CoreMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; From 054f69d5f3fbb7ae4cb84396c023726c769eb914 Mon Sep 17 00:00:00 2001 From: Philip Kiely - Baseten <98474633+philipkiely-baseten@users.noreply.github.com> Date: Thu, 3 Apr 2025 00:10:57 -0600 Subject: [PATCH 0007/1307] docs: update baseten community provider docs (#5517) --- .../40-baseten.mdx | 75 ++++++------------- examples/ai-core/src/stream-text/baseten.ts | 23 +----- 2 files changed, 24 insertions(+), 74 deletions(-) diff --git a/content/providers/02-openai-compatible-providers/40-baseten.mdx b/content/providers/02-openai-compatible-providers/40-baseten.mdx index 9d0b22e1030b..54ea3d5a12f6 100644 --- a/content/providers/02-openai-compatible-providers/40-baseten.mdx +++ b/content/providers/02-openai-compatible-providers/40-baseten.mdx @@ -32,38 +32,27 @@ To use Baseten, you can create a custom provider instance with the `createOpenAI ```ts import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; -const BASETEN_MODEL_ID = ''; -const BASETEN_DEPLOYMENT_ID = null; - -// see https://docs.baseten.co/api-reference/openai for more information -const basetenExtraPayload = { - model_id: BASETEN_MODEL_ID, - deployment_id: BASETEN_DEPLOYMENT_ID, -}; +const BASETEN_MODEL_ID = ''; // e.g. 5q3z8xcw +const BASETEN_MODEL_URL = `https://model-${BASETEN_MODEL_ID}.api.baseten.co/environments/production/sync/v1`; const baseten = createOpenAICompatible({ name: 'baseten', - apiKey: process.env.BASETEN_API_KEY, - baseURL: 'https://bridge.baseten.co/v1/direct', - fetch: async (url, request) => { - const bodyWithBasetenPayload = JSON.stringify({ - ...JSON.parse(request.body), - baseten: basetenExtraPayload, - }); - return await fetch(url, { ...request, body: bodyWithBasetenPayload }); + baseURL: BASETEN_MODEL_URL, + headers: { + Authorization: `Bearer ${process.env.BASETEN_API_KEY ?? ''}`, }, }); ``` -Be sure to have your `BASETEN_API_KEY` set in your environment and the model `deployment id` ready. The `deployment_id` will be given after you have deployed the model on Baseten. +Be sure to have your `BASETEN_API_KEY` set in your environment and the model `` ready. The `` will be given after you have deployed the model on Baseten. ## Language Models -You can create [Baseten models](https://baseten.co/models) using a provider instance. -The first argument is the served model name, e.g. `ultravox`. +You can create [Baseten models](https://www.baseten.co/library/) using a provider instance. +The first argument is the served model name, e.g. `llama`. ```ts -const model = baseten('ultravox'); +const model = baseten('llama'); ``` ### Example @@ -74,30 +63,19 @@ You can use Baseten language models to generate text with the `generateText` fun import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { generateText } from 'ai'; -const BASETEN_MODEL_ID = ''; -const BASETEN_DEPLOYMENT_ID = null; - -// see https://docs.baseten.co/api-reference/openai for more information -const basetenExtraPayload = { - model_id: BASETEN_MODEL_ID, - deployment_id: BASETEN_DEPLOYMENT_ID, -}; +const BASETEN_MODEL_ID = ''; // e.g. 5q3z8xcw +const BASETEN_MODEL_URL = `https://model-${BASETEN_MODEL_ID}.api.baseten.co/environments/production/sync/v1`; const baseten = createOpenAICompatible({ name: 'baseten', - apiKey: process.env.BASETEN_API_KEY, - baseURL: 'https://bridge.baseten.co/v1/direct', - fetch: async (url, request) => { - const bodyWithBasetenPayload = JSON.stringify({ - ...JSON.parse(request.body), - baseten: basetenExtraPayload, - }); - return await fetch(url, { ...request, body: bodyWithBasetenPayload }); + baseURL: BASETEN_MODEL_URL, + headers: { + Authorization: `Bearer ${process.env.BASETEN_API_KEY ?? ''}`, }, }); const { text } = await generateText({ - model: baseten('ultravox'), + model: baseten('llama'), prompt: 'Tell me about yourself in one sentence', }); @@ -110,30 +88,19 @@ Baseten language models are also able to generate text in a streaming fashion wi import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { streamText } from 'ai'; -const BASETEN_MODEL_ID = ''; -const BASETEN_DEPLOYMENT_ID = null; - -// see https://docs.baseten.co/api-reference/openai for more information -const basetenExtraPayload = { - model_id: BASETEN_MODEL_ID, - deployment_id: BASETEN_DEPLOYMENT_ID, -}; +const BASETEN_MODEL_ID = ''; // e.g. 5q3z8xcw +const BASETEN_MODEL_URL = `https://model-${BASETEN_MODEL_ID}.api.baseten.co/environments/production/sync/v1`; const baseten = createOpenAICompatible({ name: 'baseten', - apiKey: process.env.BASETEN_API_KEY, - baseURL: 'https://bridge.baseten.co/v1/direct', - fetch: async (url, request) => { - const bodyWithBasetenPayload = JSON.stringify({ - ...JSON.parse(request.body), - baseten: basetenExtraPayload, - }); - return await fetch(url, { ...request, body: bodyWithBasetenPayload }); + baseURL: BASETEN_MODEL_URL, + headers: { + Authorization: `Bearer ${process.env.BASETEN_API_KEY ?? ''}`, }, }); const result = streamText({ - model: baseten('ultravox'), + model: baseten('llama'), prompt: 'Tell me about yourself in one sentence', }); diff --git a/examples/ai-core/src/stream-text/baseten.ts b/examples/ai-core/src/stream-text/baseten.ts index b1d6995f4eb9..82d33a5b3406 100644 --- a/examples/ai-core/src/stream-text/baseten.ts +++ b/examples/ai-core/src/stream-text/baseten.ts @@ -1,32 +1,15 @@ import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { streamText } from 'ai'; -import 'dotenv/config'; -const BASETEN_MODEL_ID = ''; -const BASETEN_DEPLOYMENT_ID = null; - -// see https://docs.baseten.co/api-reference/openai for more information -const basetenExtraPayload = { - model_id: BASETEN_MODEL_ID, - deployment_id: BASETEN_DEPLOYMENT_ID, -}; +const BASETEN_MODEL_ID = ''; // e.g. 5q3z8xcw +const BASETEN_MODEL_URL = `https://model-${BASETEN_MODEL_ID}.api.baseten.co/environments/production/sync/v1`; const baseten = createOpenAICompatible({ name: 'baseten', + baseURL: BASETEN_MODEL_URL, headers: { Authorization: `Bearer ${process.env.BASETEN_API_KEY ?? ''}`, }, - baseURL: 'https://bridge.baseten.co/v1/direct', - fetch: async (url, request) => { - if (!request || !request.body) { - throw new Error('Request body is undefined'); - } - const bodyWithBasetenPayload = JSON.stringify({ - ...JSON.parse(String(request.body)), - baseten: basetenExtraPayload, - }); - return await fetch(url, { ...request, body: bodyWithBasetenPayload }); - }, }); async function main() { From 9477ebb92c62752b19a9ff6127fd8b8f87799f43 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 3 Apr 2025 08:46:25 +0200 Subject: [PATCH 0008/1307] chore: add changeset (#5522) --- .changeset/green-deers-scream.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .changeset/green-deers-scream.md diff --git a/.changeset/green-deers-scream.md b/.changeset/green-deers-scream.md new file mode 100644 index 000000000000..e629b089f6ac --- /dev/null +++ b/.changeset/green-deers-scream.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/ui-utils': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +--- + +chore (ui): remove useAssistant hook (**breaking change**) From 91715e5c2d70db3eb43156116bb9f64a507c44bd Mon Sep 17 00:00:00 2001 From: Walter Korman Date: Thu, 3 Apr 2025 00:37:14 -0700 Subject: [PATCH 0009/1307] fix (provider/google-vertex): fix anthropic support for image urls in messages (#5523) --- .changeset/silent-nails-taste.md | 6 ++++ .../google-vertex-anthropic-image-url.ts | 29 +++++++++++++++++++ .../google-vertex-anthropic-image.ts | 2 +- .../src/anthropic-messages-language-model.ts | 7 +++-- packages/anthropic/src/anthropic-provider.ts | 1 + .../google-vertex-anthropic-provider.ts | 1 + 6 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 .changeset/silent-nails-taste.md create mode 100644 examples/ai-core/src/stream-text/google-vertex-anthropic-image-url.ts diff --git a/.changeset/silent-nails-taste.md b/.changeset/silent-nails-taste.md new file mode 100644 index 000000000000..6f0965942318 --- /dev/null +++ b/.changeset/silent-nails-taste.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/google-vertex': patch +'@ai-sdk/anthropic': patch +--- + +fix (provider/google-vertex): fix anthropic support for image urls in messages diff --git a/examples/ai-core/src/stream-text/google-vertex-anthropic-image-url.ts b/examples/ai-core/src/stream-text/google-vertex-anthropic-image-url.ts new file mode 100644 index 000000000000..f35c2fa4c2d4 --- /dev/null +++ b/examples/ai-core/src/stream-text/google-vertex-anthropic-image-url.ts @@ -0,0 +1,29 @@ +import 'dotenv/config'; +import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic'; +import { streamText } from 'ai'; +import fs from 'node:fs'; + +async function main() { + const result = streamText({ + model: vertexAnthropic('claude-3-7-sonnet@20250219'), + messages: [ + { + role: 'user', + content: [ + { type: 'text', text: 'Describe the image in detail.' }, + { + type: 'image', + image: + 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/comic-cat.png?raw=true', + }, + ], + }, + ], + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } +} + +main().catch(console.error); diff --git a/examples/ai-core/src/stream-text/google-vertex-anthropic-image.ts b/examples/ai-core/src/stream-text/google-vertex-anthropic-image.ts index c6ec43c63211..0e3d8933773f 100644 --- a/examples/ai-core/src/stream-text/google-vertex-anthropic-image.ts +++ b/examples/ai-core/src/stream-text/google-vertex-anthropic-image.ts @@ -5,7 +5,7 @@ import fs from 'node:fs'; async function main() { const result = streamText({ - model: vertexAnthropic('claude-3-5-sonnet-v2@20241022'), + model: vertexAnthropic('claude-3-7-sonnet@20250219'), messages: [ { role: 'user', diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index ddc063a660dc..167a5a9b46af 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -1,5 +1,4 @@ import { - InvalidArgumentError, LanguageModelV1, LanguageModelV1CallWarning, LanguageModelV1FinishReason, @@ -33,6 +32,7 @@ type AnthropicMessagesConfig = { provider: string; baseURL: string; headers: Resolvable>; + supportsImageUrls: boolean; fetch?: FetchFunction; buildRequestUrl?: (baseURL: string, isStreaming: boolean) => string; transformRequestBody?: (args: Record) => Record; @@ -41,7 +41,6 @@ type AnthropicMessagesConfig = { export class AnthropicMessagesLanguageModel implements LanguageModelV1 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'tool'; - readonly supportsImageUrls = true; readonly modelId: AnthropicMessagesModelId; readonly settings: AnthropicMessagesSettings; @@ -62,6 +61,10 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { return this.config.provider; } + get supportsImageUrls(): boolean { + return this.config.supportsImageUrls; + } + private async getArgs({ mode, prompt, diff --git a/packages/anthropic/src/anthropic-provider.ts b/packages/anthropic/src/anthropic-provider.ts index 82440c29e347..7a598e834f22 100644 --- a/packages/anthropic/src/anthropic-provider.ts +++ b/packages/anthropic/src/anthropic-provider.ts @@ -109,6 +109,7 @@ export function createAnthropic( baseURL, headers: getHeaders, fetch: options.fetch, + supportsImageUrls: true, }); const provider = function ( diff --git a/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts b/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts index 1c14853eddc0..e80447d79852 100644 --- a/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts +++ b/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts @@ -100,6 +100,7 @@ export function createVertexAnthropic( baseURL, headers: options.headers ?? {}, fetch: options.fetch, + supportsImageUrls: false, buildRequestUrl: (baseURL, isStreaming) => `${baseURL}/${modelId}:${ isStreaming ? 'streamRawPredict' : 'rawPredict' From 4ecbb8487d74a3e6b86b7682ce607ba90d6213b0 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 3 Apr 2025 09:38:48 +0200 Subject: [PATCH 0010/1307] chore (ci): enable v5 canary releases (#5524) --- .changeset/pre.json | 57 +++++++++++++++++++++++++++++++++++ .github/workflows/release.yml | 2 +- 2 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 .changeset/pre.json diff --git a/.changeset/pre.json b/.changeset/pre.json new file mode 100644 index 000000000000..cc72d3a7017e --- /dev/null +++ b/.changeset/pre.json @@ -0,0 +1,57 @@ +{ + "mode": "pre", + "tag": "canary", + "initialVersions": { + "ai-core-examples": "0.0.0", + "ai-sdk-express-example": "0.0.0", + "ai-sdk-fastify-example": "0.0.0", + "ai-sdk-hono-example": "0.0.0", + "ai-sdk-mcp-example": "0.0.0", + "ai-sdk-nest-example": "0.0.0", + "next-fastapi": "0.0.0", + "next-google-vertex": "0.0.0", + "next-langchain": "0.0.0", + "next-openai": "0.0.0", + "next-openai-kasada-bot-protection": "0.0.0", + "next-openai-pages": "0.0.0", + "next-openai-telemetry": "0.0.0", + "next-openai-telemetry-sentry": "0.0.0", + "next-openai-rate-limits": "0.0.0", + "ai-sdk-http-server-example": "0.0.0", + "nuxt-openai": "0.0.0", + "sveltekit-openai": "0.0.0", + "ai": "4.2.10", + "ai-core-e2e-next-server": "0.0.0", + "@ai-sdk/amazon-bedrock": "2.2.4", + "@ai-sdk/anthropic": "1.2.4", + "@ai-sdk/azure": "1.3.6", + "@ai-sdk/cerebras": "0.2.5", + "@ai-sdk/codemod": "1.2.0", + "@ai-sdk/cohere": "1.2.4", + "@ai-sdk/deepinfra": "0.2.5", + "@ai-sdk/deepseek": "0.2.5", + "@ai-sdk/fal": "0.1.4", + "@ai-sdk/fireworks": "0.2.5", + "@ai-sdk/google": "1.2.5", + "@ai-sdk/google-vertex": "2.2.7", + "@ai-sdk/groq": "1.2.3", + "@ai-sdk/luma": "0.1.3", + "@ai-sdk/mistral": "1.2.3", + "@ai-sdk/openai": "1.3.6", + "@ai-sdk/openai-compatible": "0.2.5", + "@ai-sdk/perplexity": "1.1.3", + "@ai-sdk/provider": "1.1.0", + "@ai-sdk/provider-utils": "2.2.3", + "@ai-sdk/react": "1.2.5", + "@ai-sdk/replicate": "0.2.3", + "@ai-sdk/svelte": "2.1.5", + "@ai-sdk/togetherai": "0.2.5", + "@ai-sdk/ui-utils": "1.2.4", + "@ai-sdk/valibot": "0.1.10", + "@ai-sdk/vue": "1.2.4", + "@ai-sdk/xai": "1.2.6", + "eslint-config-vercel-ai": "0.0.0", + "@vercel/ai-tsconfig": "0.0.0" + }, + "changesets": [] +} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 12eec2f442e2..03393e6fc2bd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,6 +4,7 @@ on: push: branches: - main + - v5 paths: - '.changeset/**' - '.github/workflows/release.yml' @@ -39,7 +40,6 @@ jobs: id: changesets uses: changesets/action@v1 with: - # This expects you to have a script called release which does a build for your packages and calls changeset publish version: pnpm ci:version publish: pnpm ci:release env: From 1641b0af9246abd78808577bf40d3258ccfa906f Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 3 Apr 2025 12:34:22 +0200 Subject: [PATCH 0011/1307] fix (ci): add package versions for pre-release builds (#5530) --- tools/analyze-downloads/package.json | 3 ++- tools/generate-llms-txt/package.json | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/analyze-downloads/package.json b/tools/analyze-downloads/package.json index da664ec1a401..7070b3b38d7e 100644 --- a/tools/analyze-downloads/package.json +++ b/tools/analyze-downloads/package.json @@ -1,6 +1,7 @@ { - "private": true, "name": "analyze-downloads", + "version": "0.0.0", + "private": "true", "scripts": { "analyze-versions": "tsx src/analyze-versions.ts", "analyze-providers": "tsx src/analyze-providers.ts", diff --git a/tools/generate-llms-txt/package.json b/tools/generate-llms-txt/package.json index 721fdf39493b..a30a5d708c72 100644 --- a/tools/generate-llms-txt/package.json +++ b/tools/generate-llms-txt/package.json @@ -1,6 +1,7 @@ { - "private": true, "name": "generate-llms-txt", + "version": "0.0.0", + "private": "true", "scripts": { "generate-llms-txt": "tsx src/generate-llms-txt.ts" }, From 802670525258386ce08f6c5f3be5bd03cdf6c1c4 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 3 Apr 2025 14:03:54 +0200 Subject: [PATCH 0012/1307] fix (core): send buffered text in smooth stream when stream parts change (#5531) Co-authored-by: Carl Brugger --- .changeset/smooth-mirrors-kneel.md | 5 + .../core/generate-text/smooth-stream.test.ts | 160 ++++++++++++++++++ .../ai/core/generate-text/smooth-stream.ts | 8 +- 3 files changed, 167 insertions(+), 6 deletions(-) create mode 100644 .changeset/smooth-mirrors-kneel.md diff --git a/.changeset/smooth-mirrors-kneel.md b/.changeset/smooth-mirrors-kneel.md new file mode 100644 index 000000000000..c90af6f5bd58 --- /dev/null +++ b/.changeset/smooth-mirrors-kneel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (core): send buffered text in smooth stream when stream parts change diff --git a/packages/ai/core/generate-text/smooth-stream.test.ts b/packages/ai/core/generate-text/smooth-stream.test.ts index 6b01f7565ab8..7d5bbd5db00d 100644 --- a/packages/ai/core/generate-text/smooth-stream.test.ts +++ b/packages/ai/core/generate-text/smooth-stream.test.ts @@ -173,6 +173,166 @@ describe('smoothStream', () => { }, ]); }); + + it('should send remaining text buffer before tool call starts', async () => { + const stream = convertArrayToReadableStream([ + { type: 'text-delta', textDelta: 'I will check the' }, + { type: 'text-delta', textDelta: ' weather in Lon' }, + { type: 'text-delta', textDelta: 'don.' }, + { type: 'tool-call', name: 'weather', args: { city: 'London' } }, + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ + delayInMs: 10, + _internal: { delay }, + })({ tools: {} }), + ); + + await consumeStream(stream); + + expect(events).toMatchInlineSnapshot(` + [ + "delay 10", + { + "textDelta": "I ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "will ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "check ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "the ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "weather ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "in ", + "type": "text-delta", + }, + { + "textDelta": "London.", + "type": "text-delta", + }, + { + "args": { + "city": "London", + }, + "name": "weather", + "type": "tool-call", + }, + { + "type": "step-finish", + }, + { + "type": "finish", + }, + ] + `); + }); + + it('should send remaining text buffer before tool call starts and tool call streaming is enabled', async () => { + const stream = convertArrayToReadableStream([ + { type: 'text-delta', textDelta: 'I will check the' }, + { type: 'text-delta', textDelta: ' weather in Lon' }, + { type: 'text-delta', textDelta: 'don.' }, + { + type: 'tool-call-streaming-start', + name: 'weather', + args: { city: 'London' }, + }, + { type: 'tool-call-delta', name: 'weather', args: { city: 'London' } }, + { type: 'tool-call', name: 'weather', args: { city: 'London' } }, + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ + delayInMs: 10, + _internal: { delay }, + })({ tools: {} }), + ); + + await consumeStream(stream); + + expect(events).toMatchInlineSnapshot(` + [ + "delay 10", + { + "textDelta": "I ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "will ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "check ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "the ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "weather ", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "in ", + "type": "text-delta", + }, + { + "textDelta": "London.", + "type": "text-delta", + }, + { + "args": { + "city": "London", + }, + "name": "weather", + "type": "tool-call-streaming-start", + }, + { + "args": { + "city": "London", + }, + "name": "weather", + "type": "tool-call-delta", + }, + { + "args": { + "city": "London", + }, + "name": "weather", + "type": "tool-call", + }, + { + "type": "step-finish", + }, + { + "type": "finish", + }, + ] + `); + }); }); describe('line chunking', () => { diff --git a/packages/ai/core/generate-text/smooth-stream.ts b/packages/ai/core/generate-text/smooth-stream.ts index 2f0ef085aee4..3cca81c15ee9 100644 --- a/packages/ai/core/generate-text/smooth-stream.ts +++ b/packages/ai/core/generate-text/smooth-stream.ts @@ -44,9 +44,10 @@ export function smoothStream({ return () => { let buffer = ''; + return new TransformStream, TextStreamPart>({ async transform(chunk, controller) { - if (chunk.type === 'step-finish') { + if (chunk.type !== 'text-delta') { if (buffer.length > 0) { controller.enqueue({ type: 'text-delta', textDelta: buffer }); buffer = ''; @@ -56,11 +57,6 @@ export function smoothStream({ return; } - if (chunk.type !== 'text-delta') { - controller.enqueue(chunk); - return; - } - buffer += chunk.textDelta; let match; From d5f588f0cb29228e2a6805d3092f65a50dee95e3 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 3 Apr 2025 14:17:24 +0200 Subject: [PATCH 0013/1307] chore: AI SDK 5 canary start (#5534) --- .changeset/cuddly-icons-kick.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .changeset/cuddly-icons-kick.md diff --git a/.changeset/cuddly-icons-kick.md b/.changeset/cuddly-icons-kick.md new file mode 100644 index 000000000000..f912f644f070 --- /dev/null +++ b/.changeset/cuddly-icons-kick.md @@ -0,0 +1,33 @@ +--- +'@ai-sdk/provider-utils': major +'@ai-sdk/google-vertex': major +'@ai-sdk/anthropic': major +'@ai-sdk/ui-utils': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +'@ai-sdk/amazon-bedrock': major +'@ai-sdk/azure': major +'@ai-sdk/cerebras': major +'@ai-sdk/codemod': major +'@ai-sdk/cohere': major +'@ai-sdk/deepinfra': major +'@ai-sdk/deepseek': major +'@ai-sdk/fal': major +'@ai-sdk/fireworks': major +'@ai-sdk/google': major +'@ai-sdk/groq': major +'@ai-sdk/luma': major +'@ai-sdk/mistral': major +'@ai-sdk/openai': major +'@ai-sdk/openai-compatible': major +'@ai-sdk/perplexity': major +'@ai-sdk/provider': major +'@ai-sdk/replicate': major +'@ai-sdk/svelte': major +'@ai-sdk/togetherai': major +'@ai-sdk/valibot': major +'@ai-sdk/xai': major +--- + +AI SDK 5 From 0751f8a469b0f10219c969771262e1b78dac45d1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 14:51:58 +0200 Subject: [PATCH 0014/1307] Version Packages (canary) (#5533) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .changeset/pre.json | 11 +- examples/ai-core/package.json | 46 +-- examples/express/package.json | 4 +- examples/fastify/package.json | 4 +- examples/hono/package.json | 4 +- examples/mcp/package.json | 4 +- examples/nest/package.json | 4 +- examples/next-fastapi/package.json | 6 +- examples/next-google-vertex/package.json | 4 +- examples/next-langchain/package.json | 4 +- .../package.json | 6 +- examples/next-openai-pages/package.json | 6 +- .../next-openai-telemetry-sentry/package.json | 6 +- examples/next-openai-telemetry/package.json | 6 +- .../package.json | 6 +- examples/next-openai/package.json | 20 +- examples/node-http-server/package.json | 4 +- examples/nuxt-openai/package.json | 6 +- examples/sveltekit-openai/package.json | 10 +- packages/ai/CHANGELOG.md | 17 ++ packages/ai/package.json | 10 +- .../ai/tests/e2e/next-server/CHANGELOG.md | 9 + packages/amazon-bedrock/CHANGELOG.md | 12 + packages/amazon-bedrock/package.json | 6 +- packages/anthropic/CHANGELOG.md | 13 + packages/anthropic/package.json | 6 +- packages/azure/CHANGELOG.md | 13 + packages/azure/package.json | 8 +- packages/cerebras/CHANGELOG.md | 13 + packages/cerebras/package.json | 8 +- packages/codemod/CHANGELOG.md | 6 + packages/codemod/package.json | 2 +- packages/cohere/CHANGELOG.md | 12 + packages/cohere/package.json | 6 +- packages/deepinfra/CHANGELOG.md | 13 + packages/deepinfra/package.json | 8 +- packages/deepseek/CHANGELOG.md | 13 + packages/deepseek/package.json | 8 +- packages/fal/CHANGELOG.md | 12 + packages/fal/package.json | 6 +- packages/fireworks/CHANGELOG.md | 13 + packages/fireworks/package.json | 8 +- packages/google-vertex/CHANGELOG.md | 16 + packages/google-vertex/package.json | 10 +- packages/google/CHANGELOG.md | 12 + packages/google/package.json | 6 +- packages/groq/CHANGELOG.md | 12 + packages/groq/package.json | 6 +- packages/luma/CHANGELOG.md | 12 + packages/luma/package.json | 6 +- packages/mistral/CHANGELOG.md | 12 + packages/mistral/package.json | 6 +- packages/openai-compatible/CHANGELOG.md | 12 + packages/openai-compatible/package.json | 6 +- packages/openai/CHANGELOG.md | 12 + packages/openai/package.json | 6 +- packages/perplexity/CHANGELOG.md | 12 + packages/perplexity/package.json | 6 +- packages/provider-utils/CHANGELOG.md | 11 + packages/provider-utils/package.json | 4 +- packages/provider/CHANGELOG.md | 6 + packages/provider/package.json | 2 +- packages/react/CHANGELOG.md | 14 + packages/react/package.json | 6 +- packages/replicate/CHANGELOG.md | 12 + packages/replicate/package.json | 6 +- packages/svelte/CHANGELOG.md | 13 + packages/svelte/package.json | 6 +- packages/togetherai/CHANGELOG.md | 13 + packages/togetherai/package.json | 8 +- packages/ui-utils/CHANGELOG.md | 13 + packages/ui-utils/package.json | 6 +- packages/valibot/CHANGELOG.md | 13 + packages/valibot/package.json | 4 +- packages/vue/CHANGELOG.md | 14 + packages/vue/package.json | 6 +- packages/xai/CHANGELOG.md | 13 + packages/xai/package.json | 8 +- pnpm-lock.yaml | 276 +++++++++--------- 79 files changed, 682 insertions(+), 307 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index cc72d3a7017e..3a069d09c502 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -51,7 +51,14 @@ "@ai-sdk/vue": "1.2.4", "@ai-sdk/xai": "1.2.6", "eslint-config-vercel-ai": "0.0.0", - "@vercel/ai-tsconfig": "0.0.0" + "@vercel/ai-tsconfig": "0.0.0", + "analyze-downloads": "0.0.0", + "generate-llms-txt": "0.0.0" }, - "changesets": [] + "changesets": [ + "cuddly-icons-kick", + "green-deers-scream", + "silent-nails-taste", + "smooth-mirrors-kneel" + ] } diff --git a/examples/ai-core/package.json b/examples/ai-core/package.json index 261e7ea931d7..ff6b006ee995 100644 --- a/examples/ai-core/package.json +++ b/examples/ai-core/package.json @@ -3,33 +3,33 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/amazon-bedrock": "2.2.4", - "@ai-sdk/anthropic": "1.2.4", - "@ai-sdk/azure": "1.3.6", - "@ai-sdk/cerebras": "0.2.5", - "@ai-sdk/cohere": "1.2.4", - "@ai-sdk/deepinfra": "0.2.5", - "@ai-sdk/deepseek": "0.2.5", - "@ai-sdk/fal": "0.1.4", - "@ai-sdk/fireworks": "0.2.5", - "@ai-sdk/google": "1.2.5", - "@ai-sdk/google-vertex": "2.2.7", - "@ai-sdk/groq": "1.2.3", - "@ai-sdk/luma": "0.1.3", - "@ai-sdk/mistral": "1.2.3", - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/perplexity": "1.1.3", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/replicate": "0.2.3", - "@ai-sdk/togetherai": "0.2.5", - "@ai-sdk/xai": "1.2.6", - "@ai-sdk/valibot": "0.1.10", + "@ai-sdk/amazon-bedrock": "3.0.0-canary.0", + "@ai-sdk/anthropic": "2.0.0-canary.0", + "@ai-sdk/azure": "2.0.0-canary.0", + "@ai-sdk/cerebras": "1.0.0-canary.0", + "@ai-sdk/cohere": "2.0.0-canary.0", + "@ai-sdk/deepinfra": "1.0.0-canary.0", + "@ai-sdk/deepseek": "1.0.0-canary.0", + "@ai-sdk/fal": "1.0.0-canary.0", + "@ai-sdk/fireworks": "1.0.0-canary.0", + "@ai-sdk/google": "2.0.0-canary.0", + "@ai-sdk/google-vertex": "3.0.0-canary.0", + "@ai-sdk/groq": "2.0.0-canary.0", + "@ai-sdk/luma": "1.0.0-canary.0", + "@ai-sdk/mistral": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/perplexity": "2.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/replicate": "1.0.0-canary.0", + "@ai-sdk/togetherai": "1.0.0-canary.0", + "@ai-sdk/xai": "2.0.0-canary.0", + "@ai-sdk/valibot": "1.0.0-canary.0", "@google/generative-ai": "0.21.0", "@opentelemetry/auto-instrumentations-node": "0.54.0", "@opentelemetry/sdk-node": "0.54.2", "@opentelemetry/sdk-trace-node": "1.28.0", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "dotenv": "16.4.5", "image-type": "^5.2.0", "mathjs": "14.0.0", diff --git a/examples/express/package.json b/examples/express/package.json index b4a92051eb38..b90610da1b18 100644 --- a/examples/express/package.json +++ b/examples/express/package.json @@ -7,8 +7,8 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "ai": "4.2.10", + "@ai-sdk/openai": "2.0.0-canary.0", + "ai": "5.0.0-canary.0", "dotenv": "16.4.5", "express": "5.0.1" }, diff --git a/examples/fastify/package.json b/examples/fastify/package.json index d22d2ec2b114..8e137218dde0 100644 --- a/examples/fastify/package.json +++ b/examples/fastify/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "ai": "4.2.10", + "@ai-sdk/openai": "2.0.0-canary.0", + "ai": "5.0.0-canary.0", "dotenv": "16.4.5", "fastify": "5.1.0" }, diff --git a/examples/hono/package.json b/examples/hono/package.json index bd21f6a1e2ab..2dc55889e0fd 100644 --- a/examples/hono/package.json +++ b/examples/hono/package.json @@ -3,9 +3,9 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "1.3.6", + "@ai-sdk/openai": "2.0.0-canary.0", "@hono/node-server": "1.13.7", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "dotenv": "16.4.5", "hono": "4.6.9" }, diff --git a/examples/mcp/package.json b/examples/mcp/package.json index 2fe5216c4508..c73449d76dc4 100644 --- a/examples/mcp/package.json +++ b/examples/mcp/package.json @@ -12,9 +12,9 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", + "@ai-sdk/openai": "2.0.0-canary.0", "@modelcontextprotocol/sdk": "^1.7.0", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "dotenv": "16.4.5", "express": "5.0.1", "zod": "3.23.8" diff --git a/examples/nest/package.json b/examples/nest/package.json index 83527c26dc83..6c4f85ca4f35 100644 --- a/examples/nest/package.json +++ b/examples/nest/package.json @@ -15,11 +15,11 @@ "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", + "@ai-sdk/openai": "2.0.0-canary.0", "@nestjs/common": "^10.4.15", "@nestjs/core": "^10.0.0", "@nestjs/platform-express": "^10.4.9", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1" }, diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index 23b28546a504..a6f061afb721 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -11,9 +11,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/ui-utils": "1.2.4", - "@ai-sdk/react": "1.2.5", - "ai": "4.2.10", + "@ai-sdk/ui-utils": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", + "ai": "5.0.0-canary.0", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-google-vertex/package.json b/examples/next-google-vertex/package.json index 6e3a9e99252a..37a9b405d834 100644 --- a/examples/next-google-vertex/package.json +++ b/examples/next-google-vertex/package.json @@ -9,8 +9,8 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/google-vertex": "2.2.7", - "ai": "4.2.10", + "@ai-sdk/google-vertex": "3.0.0-canary.0", + "ai": "5.0.0-canary.0", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-langchain/package.json b/examples/next-langchain/package.json index 88976a1fda17..0b0cf59ee4f0 100644 --- a/examples/next-langchain/package.json +++ b/examples/next-langchain/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/react": "1.2.5", + "@ai-sdk/react": "2.0.0-canary.0", "@langchain/openai": "0.0.28", "@langchain/core": "0.1.63", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "langchain": "0.1.36", "next": "latest", "react": "^18", diff --git a/examples/next-openai-kasada-bot-protection/package.json b/examples/next-openai-kasada-bot-protection/package.json index 7b69e7a3bc2e..e5258f289576 100644 --- a/examples/next-openai-kasada-bot-protection/package.json +++ b/examples/next-openai-kasada-bot-protection/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/react": "1.2.5", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", "@vercel/functions": "latest", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai-pages/package.json b/examples/next-openai-pages/package.json index 4ca73016cdfa..1eae5e1f38ec 100644 --- a/examples/next-openai-pages/package.json +++ b/examples/next-openai-pages/package.json @@ -9,9 +9,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/react": "1.2.5", - "ai": "4.2.10", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", + "ai": "5.0.0-canary.0", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry-sentry/package.json b/examples/next-openai-telemetry-sentry/package.json index b2bc9a0cbbe9..cf2a6a800201 100644 --- a/examples/next-openai-telemetry-sentry/package.json +++ b/examples/next-openai-telemetry-sentry/package.json @@ -9,15 +9,15 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/react": "1.2.5", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@opentelemetry/sdk-logs": "0.55.0", "@sentry/nextjs": "^8.42.0", "@sentry/opentelemetry": "8.22.0", "@vercel/otel": "1.10.0", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry/package.json b/examples/next-openai-telemetry/package.json index bf409af4266f..5e1e221350c0 100644 --- a/examples/next-openai-telemetry/package.json +++ b/examples/next-openai-telemetry/package.json @@ -9,13 +9,13 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/react": "1.2.5", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/sdk-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@vercel/otel": "1.10.0", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-upstash-rate-limits/package.json b/examples/next-openai-upstash-rate-limits/package.json index dfd8213067a5..50cbf8010b11 100644 --- a/examples/next-openai-upstash-rate-limits/package.json +++ b/examples/next-openai-upstash-rate-limits/package.json @@ -9,11 +9,11 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/react": "1.2.5", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", "@upstash/ratelimit": "^0.4.3", "@vercel/kv": "^0.2.2", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 03979804c95e..30ba065323dc 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -9,17 +9,17 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/anthropic": "1.2.4", - "@ai-sdk/deepseek": "0.2.5", - "@ai-sdk/fireworks": "0.2.5", - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/google": "1.2.5", - "@ai-sdk/google-vertex": "2.2.7", - "@ai-sdk/perplexity": "1.1.3", - "@ai-sdk/ui-utils": "1.2.4", - "@ai-sdk/react": "1.2.5", + "@ai-sdk/anthropic": "2.0.0-canary.0", + "@ai-sdk/deepseek": "1.0.0-canary.0", + "@ai-sdk/fireworks": "1.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/google": "2.0.0-canary.0", + "@ai-sdk/google-vertex": "3.0.0-canary.0", + "@ai-sdk/perplexity": "2.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", "@vercel/blob": "^0.26.0", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/node-http-server/package.json b/examples/node-http-server/package.json index 09ff6fecb6aa..a1e9df859fe4 100644 --- a/examples/node-http-server/package.json +++ b/examples/node-http-server/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "ai": "4.2.10", + "@ai-sdk/openai": "2.0.0-canary.0", + "ai": "5.0.0-canary.0", "dotenv": "16.4.5", "zod": "3.23.8", "zod-to-json-schema": "3.23.5" diff --git a/examples/nuxt-openai/package.json b/examples/nuxt-openai/package.json index 429177683a28..03b2908d6455 100644 --- a/examples/nuxt-openai/package.json +++ b/examples/nuxt-openai/package.json @@ -9,9 +9,9 @@ "postinstall": "nuxt prepare" }, "dependencies": { - "@ai-sdk/vue": "1.2.4", - "@ai-sdk/openai": "1.3.6", - "ai": "4.2.10", + "@ai-sdk/vue": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.0", + "ai": "5.0.0-canary.0", "zod": "3.23.8" }, "devDependencies": { diff --git a/examples/sveltekit-openai/package.json b/examples/sveltekit-openai/package.json index 71a50d46cd59..b478d62aba0c 100644 --- a/examples/sveltekit-openai/package.json +++ b/examples/sveltekit-openai/package.json @@ -16,16 +16,16 @@ }, "type": "module", "devDependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/provider-utils": "2.2.3", - "@ai-sdk/svelte": "2.1.5", - "@ai-sdk/ui-utils": "1.2.4", + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/svelte": "3.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.0", "@eslint/compat": "^1.2.5", "@eslint/js": "^9.18.0", "@sveltejs/adapter-vercel": "^5.5.2", "@sveltejs/kit": "^2.16.0", "@sveltejs/vite-plugin-svelte": "^5.0.0", - "ai": "4.2.10", + "ai": "5.0.0-canary.0", "autoprefixer": "^10.4.20", "bits-ui": "^1.3.9", "clsx": "^2.1.1", diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index b5e5cdf62b93..57feb8b448a1 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -1,5 +1,22 @@ # ai +## 5.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 +- 9477ebb: chore (ui): remove useAssistant hook (**breaking change**) + +### Patch Changes + +- 8026705: fix (core): send buffered text in smooth stream when stream parts change +- Updated dependencies [d5f588f] +- Updated dependencies [9477ebb] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/ui-utils@2.0.0-canary.0 + - @ai-sdk/react@2.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 4.2.10 ### Patch Changes diff --git a/packages/ai/package.json b/packages/ai/package.json index 6bebdcff74fd..dd4e07a4d058 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "ai", - "version": "4.2.10", + "version": "5.0.0-canary.0", "description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript", "license": "Apache-2.0", "sideEffects": false, @@ -66,10 +66,10 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3", - "@ai-sdk/react": "1.2.5", - "@ai-sdk/ui-utils": "1.2.4", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.0", "@opentelemetry/api": "1.9.0", "jsondiffpatch": "0.6.0" }, diff --git a/packages/ai/tests/e2e/next-server/CHANGELOG.md b/packages/ai/tests/e2e/next-server/CHANGELOG.md index d89e10b106c9..0033698732f5 100644 --- a/packages/ai/tests/e2e/next-server/CHANGELOG.md +++ b/packages/ai/tests/e2e/next-server/CHANGELOG.md @@ -1,5 +1,14 @@ # ai-core-e2e-next-server +## 0.0.1-canary.0 + +### Patch Changes + +- Updated dependencies [d5f588f] +- Updated dependencies [9477ebb] +- Updated dependencies [8026705] + - ai@5.0.0-canary.0 + ## 0.0.1 ### Patch Changes diff --git a/packages/amazon-bedrock/CHANGELOG.md b/packages/amazon-bedrock/CHANGELOG.md index 277c8e2d10bc..8813ddc85b83 100644 --- a/packages/amazon-bedrock/CHANGELOG.md +++ b/packages/amazon-bedrock/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/amazon-bedrock +## 3.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 2.2.4 ### Patch Changes diff --git a/packages/amazon-bedrock/package.json b/packages/amazon-bedrock/package.json index be1ea856e756..cbed052d4462 100644 --- a/packages/amazon-bedrock/package.json +++ b/packages/amazon-bedrock/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/amazon-bedrock", - "version": "2.2.4", + "version": "3.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" diff --git a/packages/anthropic/CHANGELOG.md b/packages/anthropic/CHANGELOG.md index 689856753b14..ebcb9c88bff8 100644 --- a/packages/anthropic/CHANGELOG.md +++ b/packages/anthropic/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/anthropic +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- 91715e5: fix (provider/google-vertex): fix anthropic support for image urls in messages +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.4 ### Patch Changes diff --git a/packages/anthropic/package.json b/packages/anthropic/package.json index 1e2f2d5ad6da..abd8b4ecbc17 100644 --- a/packages/anthropic/package.json +++ b/packages/anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/anthropic", - "version": "1.2.4", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,8 +37,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index c2a5f87fff91..db0b22deb542 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/azure +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai@2.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.3.6 ### Patch Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index ff2d92b8130f..9b1a23387535 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "1.3.6", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,9 +31,9 @@ } }, "dependencies": { - "@ai-sdk/openai": "1.3.6", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cerebras/CHANGELOG.md b/packages/cerebras/CHANGELOG.md index 45fe1286eeb7..07ea5234e859 100644 --- a/packages/cerebras/CHANGELOG.md +++ b/packages/cerebras/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/cerebras +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai-compatible@1.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.5 ### Patch Changes diff --git a/packages/cerebras/package.json b/packages/cerebras/package.json index 870a0dbcea4b..3ab7366ca219 100644 --- a/packages/cerebras/package.json +++ b/packages/cerebras/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cerebras", - "version": "0.2.5", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/codemod/CHANGELOG.md b/packages/codemod/CHANGELOG.md index 1ba04b8b1325..dfa4b4633f27 100644 --- a/packages/codemod/CHANGELOG.md +++ b/packages/codemod/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/codemod +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + ## 1.2.0 ### Minor Changes diff --git a/packages/codemod/package.json b/packages/codemod/package.json index 1156e2d0b649..4be1da38d4a4 100644 --- a/packages/codemod/package.json +++ b/packages/codemod/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/codemod", - "version": "1.2.0", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "files": [ diff --git a/packages/cohere/CHANGELOG.md b/packages/cohere/CHANGELOG.md index 1270637b8e16..8585748aaed8 100644 --- a/packages/cohere/CHANGELOG.md +++ b/packages/cohere/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/cohere +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.4 ### Patch Changes diff --git a/packages/cohere/package.json b/packages/cohere/package.json index 504eafbf1ca2..947929c5dfca 100644 --- a/packages/cohere/package.json +++ b/packages/cohere/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cohere", - "version": "1.2.4", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepinfra/CHANGELOG.md b/packages/deepinfra/CHANGELOG.md index 913480777e74..fb4d7a390f6e 100644 --- a/packages/deepinfra/CHANGELOG.md +++ b/packages/deepinfra/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/deepinfra +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai-compatible@1.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.5 ### Patch Changes diff --git a/packages/deepinfra/package.json b/packages/deepinfra/package.json index 9fbb3b5d730b..cf25cf3e27c6 100644 --- a/packages/deepinfra/package.json +++ b/packages/deepinfra/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepinfra", - "version": "0.2.5", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepseek/CHANGELOG.md b/packages/deepseek/CHANGELOG.md index e1fde2d52f9f..b06004773799 100644 --- a/packages/deepseek/CHANGELOG.md +++ b/packages/deepseek/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/deepseek +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai-compatible@1.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.5 ### Patch Changes diff --git a/packages/deepseek/package.json b/packages/deepseek/package.json index e07cfc9ccb50..74dc804436a2 100644 --- a/packages/deepseek/package.json +++ b/packages/deepseek/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepseek", - "version": "0.2.5", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fal/CHANGELOG.md b/packages/fal/CHANGELOG.md index caf5b088c60b..5683f4faab4a 100644 --- a/packages/fal/CHANGELOG.md +++ b/packages/fal/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/fal +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.1.4 ### Patch Changes diff --git a/packages/fal/package.json b/packages/fal/package.json index 092606a4d979..66b0f77da1c7 100644 --- a/packages/fal/package.json +++ b/packages/fal/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fal", - "version": "0.1.4", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fireworks/CHANGELOG.md b/packages/fireworks/CHANGELOG.md index 2126488c81f1..15cf7cdffa18 100644 --- a/packages/fireworks/CHANGELOG.md +++ b/packages/fireworks/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/fireworks +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai-compatible@1.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.5 ### Patch Changes diff --git a/packages/fireworks/package.json b/packages/fireworks/package.json index a21951f0aa80..b7626e554d3a 100644 --- a/packages/fireworks/package.json +++ b/packages/fireworks/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fireworks", - "version": "0.2.5", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/google-vertex/CHANGELOG.md b/packages/google-vertex/CHANGELOG.md index ca17793d1136..075e29612fc0 100644 --- a/packages/google-vertex/CHANGELOG.md +++ b/packages/google-vertex/CHANGELOG.md @@ -1,5 +1,21 @@ # @ai-sdk/google-vertex +## 3.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- 91715e5: fix (provider/google-vertex): fix anthropic support for image urls in messages +- Updated dependencies [d5f588f] +- Updated dependencies [91715e5] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/anthropic@2.0.0-canary.0 + - @ai-sdk/google@2.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 2.2.7 ### Patch Changes diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index 4d96920378f6..bf84d39f2782 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google-vertex", - "version": "2.2.7", + "version": "3.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -49,10 +49,10 @@ } }, "dependencies": { - "@ai-sdk/anthropic": "1.2.4", - "@ai-sdk/google": "1.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3", + "@ai-sdk/anthropic": "2.0.0-canary.0", + "@ai-sdk/google": "2.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0", "google-auth-library": "^9.15.0" }, "devDependencies": { diff --git a/packages/google/CHANGELOG.md b/packages/google/CHANGELOG.md index 44da41030ded..059921036f0a 100644 --- a/packages/google/CHANGELOG.md +++ b/packages/google/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/google +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.5 ### Patch Changes diff --git a/packages/google/package.json b/packages/google/package.json index 8ae46dd09ff0..b77ba4b124a9 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google", - "version": "1.2.5", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/groq/CHANGELOG.md b/packages/groq/CHANGELOG.md index 104016e417dc..4274cb7d8cf1 100644 --- a/packages/groq/CHANGELOG.md +++ b/packages/groq/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/groq +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.3 ### Patch Changes diff --git a/packages/groq/package.json b/packages/groq/package.json index c7875a77e49d..312ae518efd7 100644 --- a/packages/groq/package.json +++ b/packages/groq/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/groq", - "version": "1.2.3", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/luma/CHANGELOG.md b/packages/luma/CHANGELOG.md index f9efba8cf43a..e35bcbe886da 100644 --- a/packages/luma/CHANGELOG.md +++ b/packages/luma/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/luma +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.1.3 ### Patch Changes diff --git a/packages/luma/package.json b/packages/luma/package.json index 71644bd83c33..f0fefe0e3ecd 100644 --- a/packages/luma/package.json +++ b/packages/luma/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/luma", - "version": "0.1.3", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/mistral/CHANGELOG.md b/packages/mistral/CHANGELOG.md index ab6df6bd1df0..946965a5237a 100644 --- a/packages/mistral/CHANGELOG.md +++ b/packages/mistral/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/mistral +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.3 ### Patch Changes diff --git a/packages/mistral/package.json b/packages/mistral/package.json index 8186d77e0c6a..28390f995dff 100644 --- a/packages/mistral/package.json +++ b/packages/mistral/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/mistral", - "version": "1.2.3", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai-compatible/CHANGELOG.md b/packages/openai-compatible/CHANGELOG.md index 864481d86a8b..ae846be5a219 100644 --- a/packages/openai-compatible/CHANGELOG.md +++ b/packages/openai-compatible/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/openai-compatible +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.5 ### Patch Changes diff --git a/packages/openai-compatible/package.json b/packages/openai-compatible/package.json index e6c197f2747a..09e4a0410967 100644 --- a/packages/openai-compatible/package.json +++ b/packages/openai-compatible/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai-compatible", - "version": "0.2.5", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai/CHANGELOG.md b/packages/openai/CHANGELOG.md index ec4bb3a9da6a..3d949a7c9b92 100644 --- a/packages/openai/CHANGELOG.md +++ b/packages/openai/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/openai +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.3.6 ### Patch Changes diff --git a/packages/openai/package.json b/packages/openai/package.json index 66d1a887adf6..ed76d103b09c 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai", - "version": "1.3.6", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/perplexity/CHANGELOG.md b/packages/perplexity/CHANGELOG.md index efc8102c433c..766ae3773e4d 100644 --- a/packages/perplexity/CHANGELOG.md +++ b/packages/perplexity/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/perplexity +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.1.3 ### Patch Changes diff --git a/packages/perplexity/package.json b/packages/perplexity/package.json index b41c5c4ceedd..1ebbf3c199a8 100644 --- a/packages/perplexity/package.json +++ b/packages/perplexity/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/perplexity", - "version": "1.1.3", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/provider-utils/CHANGELOG.md b/packages/provider-utils/CHANGELOG.md index f1db57e93808..a11b1c1788fd 100644 --- a/packages/provider-utils/CHANGELOG.md +++ b/packages/provider-utils/CHANGELOG.md @@ -1,5 +1,16 @@ # @ai-sdk/provider-utils +## 3.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider@2.0.0-canary.0 + ## 2.2.3 ### Patch Changes diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index e42f89effcc0..d760ec3592e1 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider-utils", - "version": "2.2.3", + "version": "3.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,7 +37,7 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", + "@ai-sdk/provider": "2.0.0-canary.0", "nanoid": "^3.3.8", "secure-json-parse": "^2.7.0" }, diff --git a/packages/provider/CHANGELOG.md b/packages/provider/CHANGELOG.md index f57f90edb9bc..d5305a386dd8 100644 --- a/packages/provider/CHANGELOG.md +++ b/packages/provider/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/provider +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + ## 1.1.0 ### Minor Changes diff --git a/packages/provider/package.json b/packages/provider/package.json index 4f340332f119..b7c60385b76b 100644 --- a/packages/provider/package.json +++ b/packages/provider/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider", - "version": "1.1.0", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/react/CHANGELOG.md b/packages/react/CHANGELOG.md index 1d02fe297ba0..34425d33b860 100644 --- a/packages/react/CHANGELOG.md +++ b/packages/react/CHANGELOG.md @@ -1,5 +1,19 @@ # @ai-sdk/react +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 +- 9477ebb: chore (ui): remove useAssistant hook (**breaking change**) + +### Patch Changes + +- Updated dependencies [d5f588f] +- Updated dependencies [9477ebb] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/ui-utils@2.0.0-canary.0 + ## 1.2.5 ### Patch Changes diff --git a/packages/react/package.json b/packages/react/package.json index 778f112abe46..aeff131688fa 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/react", - "version": "1.2.5", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "2.2.3", - "@ai-sdk/ui-utils": "1.2.4", + "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.0", "swr": "^2.2.5", "throttleit": "2.1.0" }, diff --git a/packages/replicate/CHANGELOG.md b/packages/replicate/CHANGELOG.md index e7d78cc88cf6..4576643441cd 100644 --- a/packages/replicate/CHANGELOG.md +++ b/packages/replicate/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/replicate +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.3 ### Patch Changes diff --git a/packages/replicate/package.json b/packages/replicate/package.json index d06c01f23ec4..2c02e51c7659 100644 --- a/packages/replicate/package.json +++ b/packages/replicate/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/replicate", - "version": "0.2.3", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/svelte/CHANGELOG.md b/packages/svelte/CHANGELOG.md index a76f7634c401..2955dd330a02 100644 --- a/packages/svelte/CHANGELOG.md +++ b/packages/svelte/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/svelte +## 3.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] +- Updated dependencies [9477ebb] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/ui-utils@2.0.0-canary.0 + ## 2.1.5 ### Patch Changes diff --git a/packages/svelte/package.json b/packages/svelte/package.json index 5b9fb56c4176..a22860d8edf1 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/svelte", - "version": "2.1.5", + "version": "3.0.0-canary.0", "license": "Apache-2.0", "scripts": { "build": "pnpm prepack", @@ -51,8 +51,8 @@ } }, "dependencies": { - "@ai-sdk/provider-utils": "2.2.3", - "@ai-sdk/ui-utils": "1.2.4" + "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.0" }, "devDependencies": { "@eslint/compat": "^1.2.5", diff --git a/packages/togetherai/CHANGELOG.md b/packages/togetherai/CHANGELOG.md index 7c36b548a285..9c3fe0531343 100644 --- a/packages/togetherai/CHANGELOG.md +++ b/packages/togetherai/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/togetherai +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai-compatible@1.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 0.2.5 ### Patch Changes diff --git a/packages/togetherai/package.json b/packages/togetherai/package.json index 592157dd4737..b10ff4a117c7 100644 --- a/packages/togetherai/package.json +++ b/packages/togetherai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/togetherai", - "version": "0.2.5", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/ui-utils/CHANGELOG.md b/packages/ui-utils/CHANGELOG.md index f721b1262a9b..b9a9b8bc3ba9 100644 --- a/packages/ui-utils/CHANGELOG.md +++ b/packages/ui-utils/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/ui-utils +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 +- 9477ebb: chore (ui): remove useAssistant hook (**breaking change**) + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.4 ### Patch Changes diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index 615899297fdd..032a245ed1e7 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/ui-utils", - "version": "1.2.4", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,8 +37,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0", "zod-to-json-schema": "^3.24.1" }, "devDependencies": { diff --git a/packages/valibot/CHANGELOG.md b/packages/valibot/CHANGELOG.md index d0b6a4c6e3f6..bba2e8bae06f 100644 --- a/packages/valibot/CHANGELOG.md +++ b/packages/valibot/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/valibot +## 1.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] +- Updated dependencies [9477ebb] +- Updated dependencies [8026705] + - ai@5.0.0-canary.0 + ## 0.1.10 ### Patch Changes diff --git a/packages/valibot/package.json b/packages/valibot/package.json index f92be48a0f3e..d9c964928f15 100644 --- a/packages/valibot/package.json +++ b/packages/valibot/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/valibot", - "version": "0.1.10", + "version": "1.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -27,7 +27,7 @@ } }, "dependencies": { - "ai": "4.2.10" + "ai": "5.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/vue/CHANGELOG.md b/packages/vue/CHANGELOG.md index fed108ae7668..8039ef331024 100644 --- a/packages/vue/CHANGELOG.md +++ b/packages/vue/CHANGELOG.md @@ -1,5 +1,19 @@ # @ai-sdk/vue +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 +- 9477ebb: chore (ui): remove useAssistant hook (**breaking change**) + +### Patch Changes + +- Updated dependencies [d5f588f] +- Updated dependencies [9477ebb] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/ui-utils@2.0.0-canary.0 + ## 1.2.4 ### Patch Changes diff --git a/packages/vue/package.json b/packages/vue/package.json index 358e390ac03e..2891e060ed66 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/vue", - "version": "1.2.4", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "2.2.3", - "@ai-sdk/ui-utils": "1.2.4", + "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.0", "swrv": "^1.0.4" }, "devDependencies": { diff --git a/packages/xai/CHANGELOG.md b/packages/xai/CHANGELOG.md index fc2fc64a3d27..50074b1f2b6f 100644 --- a/packages/xai/CHANGELOG.md +++ b/packages/xai/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/xai +## 2.0.0-canary.0 + +### Major Changes + +- d5f588f: AI SDK 5 + +### Patch Changes + +- Updated dependencies [d5f588f] + - @ai-sdk/provider-utils@3.0.0-canary.0 + - @ai-sdk/openai-compatible@1.0.0-canary.0 + - @ai-sdk/provider@2.0.0-canary.0 + ## 1.2.6 ### Patch Changes diff --git a/packages/xai/package.json b/packages/xai/package.json index 7e339c17c07d..c38ebffe4bf0 100644 --- a/packages/xai/package.json +++ b/packages/xai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/xai", - "version": "1.2.6", + "version": "2.0.0-canary.0", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "0.2.5", - "@ai-sdk/provider": "1.1.0", - "@ai-sdk/provider-utils": "2.2.3" + "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.0" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f142f577594a..53b2743fdc2e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,70 +60,70 @@ importers: examples/ai-core: dependencies: '@ai-sdk/amazon-bedrock': - specifier: 2.2.4 + specifier: 3.0.0-canary.0 version: link:../../packages/amazon-bedrock '@ai-sdk/anthropic': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/anthropic '@ai-sdk/azure': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/azure '@ai-sdk/cerebras': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/cerebras '@ai-sdk/cohere': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/cohere '@ai-sdk/deepinfra': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/deepinfra '@ai-sdk/deepseek': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/deepseek '@ai-sdk/fal': - specifier: 0.1.4 + specifier: 1.0.0-canary.0 version: link:../../packages/fal '@ai-sdk/fireworks': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 2.2.7 + specifier: 3.0.0-canary.0 version: link:../../packages/google-vertex '@ai-sdk/groq': - specifier: 1.2.3 + specifier: 2.0.0-canary.0 version: link:../../packages/groq '@ai-sdk/luma': - specifier: 0.1.3 + specifier: 1.0.0-canary.0 version: link:../../packages/luma '@ai-sdk/mistral': - specifier: 1.2.3 + specifier: 2.0.0-canary.0 version: link:../../packages/mistral '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/openai-compatible '@ai-sdk/perplexity': - specifier: 1.1.3 + specifier: 2.0.0-canary.0 version: link:../../packages/perplexity '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../../packages/provider '@ai-sdk/replicate': - specifier: 0.2.3 + specifier: 1.0.0-canary.0 version: link:../../packages/replicate '@ai-sdk/togetherai': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/togetherai '@ai-sdk/valibot': - specifier: 0.1.10 + specifier: 1.0.0-canary.0 version: link:../../packages/valibot '@ai-sdk/xai': - specifier: 1.2.6 + specifier: 2.0.0-canary.0 version: link:../../packages/xai '@google/generative-ai': specifier: 0.21.0 @@ -138,7 +138,7 @@ importers: specifier: 1.28.0 version: 1.28.0(@opentelemetry/api@1.9.0) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -178,10 +178,10 @@ importers: examples/express: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -206,10 +206,10 @@ importers: examples/fastify: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -231,13 +231,13 @@ importers: examples/hono: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@hono/node-server': specifier: 1.13.7 version: 1.13.7(hono@4.6.9) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -259,13 +259,13 @@ importers: examples/mcp: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@modelcontextprotocol/sdk': specifier: ^1.7.0 version: 1.7.0 ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -293,7 +293,7 @@ importers: examples/nest: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@nestjs/common': specifier: ^10.4.15 @@ -305,7 +305,7 @@ importers: specifier: ^10.4.9 version: 10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai reflect-metadata: specifier: ^0.2.0 @@ -381,13 +381,13 @@ importers: examples/next-fastapi: dependencies: '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/ui-utils ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -436,10 +436,10 @@ importers: examples/next-google-vertex: dependencies: '@ai-sdk/google-vertex': - specifier: 2.2.7 + specifier: 3.0.0-canary.0 version: link:../../packages/google-vertex ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -479,7 +479,7 @@ importers: examples/next-langchain: dependencies: '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@langchain/core': specifier: 0.1.63 @@ -488,7 +488,7 @@ importers: specifier: 0.0.28 version: 0.0.28 ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai langchain: specifier: 0.1.36 @@ -534,37 +534,37 @@ importers: examples/next-openai: dependencies: '@ai-sdk/anthropic': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/anthropic '@ai-sdk/deepseek': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/deepseek '@ai-sdk/fireworks': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 2.2.7 + specifier: 3.0.0-canary.0 version: link:../../packages/google-vertex '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/perplexity': - specifier: 1.1.3 + specifier: 2.0.0-canary.0 version: link:../../packages/perplexity '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/ui-utils '@vercel/blob': specifier: ^0.26.0 version: 0.26.0 ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai next: specifier: latest @@ -616,16 +616,16 @@ importers: examples/next-openai-kasada-bot-protection: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@vercel/functions': specifier: latest version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0)) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai next: specifier: latest @@ -671,13 +671,13 @@ importers: examples/next-openai-pages: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai next: specifier: latest @@ -726,10 +726,10 @@ importers: examples/next-openai-telemetry: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -744,7 +744,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.29.0(@opentelemetry/api@1.9.0)) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai next: specifier: latest @@ -793,10 +793,10 @@ importers: examples/next-openai-telemetry-sentry: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -817,7 +817,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.28.0(@opentelemetry/api@1.9.0)) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai next: specifier: latest @@ -866,10 +866,10 @@ importers: examples/next-openai-upstash-rate-limits: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../../packages/react '@upstash/ratelimit': specifier: ^0.4.3 @@ -878,7 +878,7 @@ importers: specifier: ^0.2.2 version: 0.2.4 ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai next: specifier: latest @@ -924,10 +924,10 @@ importers: examples/node-http-server: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -952,13 +952,13 @@ importers: examples/nuxt-openai: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/vue': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/vue ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai zod: specifier: 3.23.8 @@ -1007,16 +1007,16 @@ importers: examples/sveltekit-openai: devDependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../../packages/openai '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../../packages/provider-utils '@ai-sdk/svelte': - specifier: 2.1.5 + specifier: 3.0.0-canary.0 version: link:../../packages/svelte '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../../packages/ui-utils '@eslint/compat': specifier: ^1.2.5 @@ -1034,7 +1034,7 @@ importers: specifier: ^5.0.0 version: 5.0.3(svelte@5.22.4)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../../packages/ai autoprefixer: specifier: ^10.4.20 @@ -1091,16 +1091,16 @@ importers: packages/ai: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils '@ai-sdk/react': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../react '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../ui-utils '@opentelemetry/api': specifier: 1.9.0 @@ -1170,10 +1170,10 @@ importers: packages/amazon-bedrock: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils '@smithy/eventstream-codec': specifier: ^4.0.1 @@ -1204,10 +1204,10 @@ importers: packages/anthropic: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1229,13 +1229,13 @@ importers: packages/azure: dependencies: '@ai-sdk/openai': - specifier: 1.3.6 + specifier: 2.0.0-canary.0 version: link:../openai '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1257,13 +1257,13 @@ importers: packages/cerebras: dependencies: '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1334,10 +1334,10 @@ importers: packages/cohere: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1359,13 +1359,13 @@ importers: packages/deepinfra: dependencies: '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1387,13 +1387,13 @@ importers: packages/deepseek: dependencies: '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1415,10 +1415,10 @@ importers: packages/fal: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1440,13 +1440,13 @@ importers: packages/fireworks: dependencies: '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1468,10 +1468,10 @@ importers: packages/google: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1493,16 +1493,16 @@ importers: packages/google-vertex: dependencies: '@ai-sdk/anthropic': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../anthropic '@ai-sdk/google': - specifier: 1.2.5 + specifier: 2.0.0-canary.0 version: link:../google '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils google-auth-library: specifier: ^9.15.0 @@ -1527,10 +1527,10 @@ importers: packages/groq: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1552,10 +1552,10 @@ importers: packages/luma: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1577,10 +1577,10 @@ importers: packages/mistral: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1602,10 +1602,10 @@ importers: packages/openai: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1627,10 +1627,10 @@ importers: packages/openai-compatible: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1652,10 +1652,10 @@ importers: packages/perplexity: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1699,7 +1699,7 @@ importers: packages/provider-utils: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider nanoid: specifier: ^3.3.8 @@ -1730,10 +1730,10 @@ importers: packages/react: dependencies: '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../ui-utils react: specifier: ^18 || ^19 || ^19.0.0-rc @@ -1794,10 +1794,10 @@ importers: packages/replicate: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1819,10 +1819,10 @@ importers: packages/svelte: dependencies: '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../ui-utils devDependencies: '@eslint/compat': @@ -1883,13 +1883,13 @@ importers: packages/togetherai: dependencies: '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': @@ -1911,10 +1911,10 @@ importers: packages/ui-utils: dependencies: '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils zod-to-json-schema: specifier: ^3.24.1 @@ -1948,7 +1948,7 @@ importers: specifier: ^1.0.0-rc.0 || ^1.0.0 version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.6.3)) ai: - specifier: 4.2.10 + specifier: 5.0.0-canary.0 version: link:../ai devDependencies: '@types/node': @@ -1970,10 +1970,10 @@ importers: packages/vue: dependencies: '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 1.2.4 + specifier: 2.0.0-canary.0 version: link:../ui-utils swrv: specifier: ^1.0.4 @@ -2025,13 +2025,13 @@ importers: packages/xai: dependencies: '@ai-sdk/openai-compatible': - specifier: 0.2.5 + specifier: 1.0.0-canary.0 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 1.1.0 + specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 2.2.3 + specifier: 3.0.0-canary.0 version: link:../provider-utils devDependencies: '@types/node': From 060370c7854f169174913cb6b00dc3eab9e7ef57 Mon Sep 17 00:00:00 2001 From: Sam Denty Date: Thu, 3 Apr 2025 13:45:09 +0100 Subject: [PATCH 0015/1307] chore (ui/react,vue): update more tests to unified test server (#5509) --- .changeset/clean-numbers-cover.md | 5 + .../src/test/unified-test-server.ts | 4 + packages/react/src/use-object.ui.test.tsx | 348 ++++++------- packages/svelte/src/completion.svelte.test.ts | 289 +++++------ .../src/structured-object.svelte.test.ts | 459 ++++++++---------- 5 files changed, 513 insertions(+), 592 deletions(-) create mode 100644 .changeset/clean-numbers-cover.md diff --git a/.changeset/clean-numbers-cover.md b/.changeset/clean-numbers-cover.md new file mode 100644 index 000000000000..590b5df0eec2 --- /dev/null +++ b/.changeset/clean-numbers-cover.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +feat(provider-utils): add TestServerCall#requestCredentials diff --git a/packages/provider-utils/src/test/unified-test-server.ts b/packages/provider-utils/src/test/unified-test-server.ts index 47b2688de3a6..b03798379c75 100644 --- a/packages/provider-utils/src/test/unified-test-server.ts +++ b/packages/provider-utils/src/test/unified-test-server.ts @@ -62,6 +62,10 @@ class TestServerCall { return this.request!.text().then(JSON.parse); } + get requestCredentials() { + return this.request!.credentials; + } + get requestHeaders() { const requestHeaders = this.request!.headers; diff --git a/packages/react/src/use-object.ui.test.tsx b/packages/react/src/use-object.ui.test.tsx index 275716079072..e77402d9cc37 100644 --- a/packages/react/src/use-object.ui.test.tsx +++ b/packages/react/src/use-object.ui.test.tsx @@ -1,6 +1,6 @@ import { - describeWithTestServer, - withTestServer, + createTestServer, + TestResponseController, } from '@ai-sdk/provider-utils/test'; import '@testing-library/jest-dom/vitest'; import { cleanup, render, screen, waitFor } from '@testing-library/react'; @@ -8,6 +8,10 @@ import userEvent from '@testing-library/user-event'; import { z } from 'zod'; import { experimental_useObject } from './use-object'; +const server = createTestServer({ + '/api/use-object': {}, +}); + describe('text stream', () => { let onErrorResult: Error | undefined; let onFinishCalls: Array<{ @@ -69,208 +73,178 @@ describe('text stream', () => { beforeEach(() => { render(); }); - describeWithTestServer( - "when the API returns 'Hello, world!'", - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"'], - }, - ({ call }) => { - beforeEach(async () => { - await userEvent.click(screen.getByTestId('submit-button')); - }); - it('should render stream', async () => { - await screen.findByTestId('object'); - expect(screen.getByTestId('object')).toHaveTextContent( - JSON.stringify({ content: 'Hello, world!' }), - ); - }); + describe("when the API returns 'Hello, world!'", () => { + beforeEach(async () => { + server.urls['/api/use-object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"'], + }; + await userEvent.click(screen.getByTestId('submit-button')); + }); - it("should send 'test' to the API", async () => { - expect(await call(0).getRequestBodyJson()).toBe('test-input'); - }); + it('should render stream', async () => { + await screen.findByTestId('object'); + expect(screen.getByTestId('object')).toHaveTextContent( + JSON.stringify({ content: 'Hello, world!' }), + ); + }); + + it("should send 'test' to the API", async () => { + expect(await server.calls[0].requestBody).toBe('test-input'); + }); + + it('should not have an error', async () => { + await screen.findByTestId('error'); + expect(screen.getByTestId('error')).toBeEmptyDOMElement(); + expect(onErrorResult).toBeUndefined(); + }); + }); + + describe('isLoading', () => { + it('should be true while loading', async () => { + const controller = new TestResponseController(); + server.urls['/api/use-object'].response = { + type: 'controlled-stream', + controller, + }; - it('should not have an error', async () => { - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toBeEmptyDOMElement(); - expect(onErrorResult).toBeUndefined(); + controller.write('{"content": '); + await userEvent.click(screen.getByTestId('submit-button')); + + // wait for element "loading" to have text content "true": + await waitFor(() => { + expect(screen.getByTestId('loading')).toHaveTextContent('true'); }); - }, - ); - describe('isLoading', async () => { - it( - 'should be true while loading', - withTestServer( - { url: '/api/use-object', type: 'controlled-stream' }, - async ({ streamController }) => { - streamController.enqueue('{"content": '); - - await userEvent.click(screen.getByTestId('submit-button')); - - // wait for element "loading" to have text content "true": - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('true'); - }); - - streamController.enqueue('"Hello, world!"}'); - streamController.close(); - - // wait for element "loading" to have text content "false": - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }); - }, - ), - ); + controller.write('"Hello, world!"}'); + controller.close(); + + // wait for element "loading" to have text content "false": + await waitFor(() => { + expect(screen.getByTestId('loading')).toHaveTextContent('false'); + }); + }); }); - describe('stop', async () => { - it( - 'should abort the stream and not consume any more data', - withTestServer( - { url: '/api/use-object', type: 'controlled-stream' }, - async ({ streamController }) => { - streamController.enqueue('{"content": "h'); - - userEvent.click(screen.getByTestId('submit-button')); - - // wait for element "loading" and "object" to have text content: - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('true'); - }); - await waitFor(() => { - expect(screen.getByTestId('object')).toHaveTextContent( - '{"content":"h"}', - ); - }); - - // click stop button: - await userEvent.click(screen.getByTestId('stop-button')); - - // wait for element "loading" to have text content "false": - await waitFor(() => { - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }); - - // this should not be consumed any more: - streamController.enqueue('ello, world!"}'); - streamController.close(); - - // should only show start of object: - expect(screen.getByTestId('object')).toHaveTextContent( - '{"content":"h"}', - ); - }, - ), - ); + it('should abort the stream and not consume any more data', async () => { + const controller = new TestResponseController(); + server.urls['/api/use-object'].response = { + type: 'controlled-stream', + controller, + }; + + controller.write('{"content": "h'); + await userEvent.click(screen.getByTestId('submit-button')); + + // wait for element "loading" and "object" to have text content: + await waitFor(() => { + expect(screen.getByTestId('loading')).toHaveTextContent('true'); + }); + await waitFor(() => { + expect(screen.getByTestId('object')).toHaveTextContent( + '{"content":"h"}', + ); + }); + + // click stop button: + await userEvent.click(screen.getByTestId('stop-button')); + + // wait for element "loading" to have text content "false": + await waitFor(() => { + expect(screen.getByTestId('loading')).toHaveTextContent('false'); + }); + + // this should not be consumed any more: + controller.write('ello, world!"}'); + controller.close(); + + // should only show start of object: + await waitFor(() => { + expect(screen.getByTestId('object')).toHaveTextContent( + '{"content":"h"}', + ); + }); }); describe('when the API returns a 404', () => { - it( - 'should render error', - withTestServer( - { - url: '/api/use-object', - type: 'error', - status: 404, - content: 'Not found', - }, - async () => { - await userEvent.click(screen.getByTestId('submit-button')); - - await screen.findByTestId('error'); - expect(screen.getByTestId('error')).toHaveTextContent('Not found'); - expect(onErrorResult).toBeInstanceOf(Error); - expect(screen.getByTestId('loading')).toHaveTextContent('false'); - }, - ), - ); + it('should render error', async () => { + server.urls['/api/use-object'].response = { + type: 'error', + status: 404, + body: 'Not found', + }; + + await userEvent.click(screen.getByTestId('submit-button')); + + await screen.findByTestId('error'); + expect(screen.getByTestId('error')).toHaveTextContent('Not found'); + expect(onErrorResult).toBeInstanceOf(Error); + expect(screen.getByTestId('loading')).toHaveTextContent('false'); + }); }); describe('onFinish', () => { - it( - 'should be called with an object when the stream finishes and the object matches the schema', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async () => { - await userEvent.click(screen.getByTestId('submit-button')); - - expect(onFinishCalls).toStrictEqual([ - { object: { content: 'Hello, world!' }, error: undefined }, - ]); - }, - ), - ); + it('should be called with an object when the stream finishes and the object matches the schema', async () => { + server.urls['/api/use-object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + }; + + await userEvent.click(screen.getByTestId('submit-button')); + + expect(onFinishCalls).toStrictEqual([ + { object: { content: 'Hello, world!' }, error: undefined }, + ]); + }); + + it('should be called with an error when the stream finishes and the object does not match the schema', async () => { + server.urls['/api/use-object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content-wrong": "Hello, ', 'world', '!"', '}'], + }; + + await userEvent.click(screen.getByTestId('submit-button')); + + expect(onFinishCalls).toStrictEqual([ + { object: undefined, error: expect.any(Error) }, + ]); + }); }); + }); - it( - 'should be called with an error when the stream finishes and the object does not match the schema', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content-wrong": "Hello, ', 'world', '!"', '}'], - }, - async () => { - await userEvent.click(screen.getByTestId('submit-button')); - - expect(onFinishCalls).toStrictEqual([ - { object: undefined, error: expect.any(Error) }, - ]); - }, - ), + it('should send custom headers', async () => { + server.urls['/api/use-object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + }; + + render( + , ); - }); - it( - 'should send custom headers', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async ({ call }) => { - render( - , - ); + await userEvent.click(screen.getByTestId('submit-button')); - await userEvent.click(screen.getByTestId('submit-button')); + expect(server.calls[0].requestHeaders).toStrictEqual({ + 'content-type': 'application/json', + authorization: 'Bearer TEST_TOKEN', + 'x-custom-header': 'CustomValue', + }); + }); - expect(call(0).getRequestHeaders()).toStrictEqual({ - 'content-type': 'application/json', - authorization: 'Bearer TEST_TOKEN', - 'x-custom-header': 'CustomValue', - }); - }, - ), - ); - - it( - 'should send custom credentials', - withTestServer( - { - url: '/api/use-object', - type: 'stream-values', - content: ['{ ', '"content": "Authenticated ', 'content', '!"', '}'], - }, - async ({ call }) => { - render(); - await userEvent.click(screen.getByTestId('submit-button')); - expect(call(0).getRequestCredentials()).toBe('include'); - }, - ), - ); + it('should send custom credentials', async () => { + server.urls['/api/use-object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Authenticated ', 'content', '!"', '}'], + }; + + render(); + await userEvent.click(screen.getByTestId('submit-button')); + expect(server.calls[0].requestCredentials).toBe('include'); + }); }); diff --git a/packages/svelte/src/completion.svelte.test.ts b/packages/svelte/src/completion.svelte.test.ts index 13f2a0fb0a86..2840ec8f8577 100644 --- a/packages/svelte/src/completion.svelte.test.ts +++ b/packages/svelte/src/completion.svelte.test.ts @@ -1,178 +1,151 @@ -import { withTestServer } from '@ai-sdk/provider-utils/test'; +import { + createTestServer, + TestResponseController, +} from '@ai-sdk/provider-utils/test'; import { Completion } from './completion.svelte.js'; import { render } from '@testing-library/svelte'; import CompletionSynchronization from './tests/completion-synchronization.svelte'; +const server = createTestServer({ + '/api/completion': {}, +}); + describe('Completion', () => { - it( - 'should render a data stream', - withTestServer( - { - type: 'stream-values', - url: '/api/completion', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - async () => { - const completion = new Completion(); - await completion.complete('hi'); - expect(completion.completion).toBe('Hello, world.'); - }, - ), - ); + it('should render a data stream', async () => { + server.urls['/api/completion'].response = { + type: 'stream-chunks', + chunks: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], + }; - it( - 'should render a text stream', - withTestServer( - { - type: 'stream-values', - url: '/api/completion', - content: ['Hello', ',', ' world', '.'], - }, - async () => { - const completion = new Completion({ streamProtocol: 'text' }); - await completion.complete('hi'); - expect(completion.completion).toBe('Hello, world.'); - }, - ), - ); + const completion = new Completion(); + await completion.complete('hi'); + expect(completion.completion).toBe('Hello, world.'); + }); - it( - 'should call `onFinish` callback', - withTestServer( - { - type: 'stream-values', - url: '/api/completion', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - async () => { - const onFinish = vi.fn(); - const completion = new Completion({ onFinish }); - await completion.complete('hi'); - expect(onFinish).toHaveBeenCalledExactlyOnceWith('hi', 'Hello, world.'); - }, - ), - ); - - it( - 'should show loading state', - withTestServer( - { url: '/api/completion', type: 'controlled-stream' }, - async ({ streamController }) => { - const completion = new Completion(); - const completionOperation = completion.complete('hi'); - streamController.enqueue('0:"Hello"\n'); - await vi.waitFor(() => expect(completion.loading).toBe(true)); - streamController.close(); - await completionOperation; - expect(completion.loading).toBe(false); - }, - ), - ); + it('should render a text stream', async () => { + server.urls['/api/completion'].response = { + type: 'stream-chunks', + chunks: ['Hello', ',', ' world', '.'], + }; + + const completion = new Completion({ streamProtocol: 'text' }); + await completion.complete('hi'); + expect(completion.completion).toBe('Hello, world.'); + }); + + it('should call `onFinish` callback', async () => { + server.urls['/api/completion'].response = { + type: 'stream-chunks', + chunks: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], + }; + + const onFinish = vi.fn(); + const completion = new Completion({ onFinish }); + await completion.complete('hi'); + expect(onFinish).toHaveBeenCalledExactlyOnceWith('hi', 'Hello, world.'); + }); + + it('should show loading state', async () => { + const controller = new TestResponseController(); + server.urls['/api/completion'].response = { + type: 'controlled-stream', + controller, + }; + + const completion = new Completion(); + const completionOperation = completion.complete('hi'); + controller.write('0:"Hello"\n'); + await vi.waitFor(() => expect(completion.loading).toBe(true)); + controller.close(); + await completionOperation; + expect(completion.loading).toBe(false); + }); + + it('should reset loading state on error', async () => { + server.urls['/api/completion'].response = { + type: 'error', + status: 404, + body: 'Not found', + }; - it( - 'should reset loading state on error', - withTestServer( + const completion = new Completion(); + await completion.complete('hi'); + expect(completion.error).toBeInstanceOf(Error); + expect(completion.loading).toBe(false); + }); + + it('should reset error state on subsequent completion', async () => { + server.urls['/api/completion'].response = [ { type: 'error', - url: '/api/completion', status: 404, - content: 'Not found', - }, - async () => { - const completion = new Completion(); - await completion.complete('hi'); - expect(completion.error).toBeInstanceOf(Error); - expect(completion.loading).toBe(false); + body: 'Not found', }, - ), - ); - - it( - 'should reset error state on subsequent completion', - withTestServer( - [ - { - type: 'error', - url: '/api/completion', - status: 404, - content: 'Not found', - }, - { - type: 'stream-values', - url: '/api/completion', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - ], - async () => { - const completion = new Completion(); - await completion.complete('hi'); - expect(completion.error).toBeInstanceOf(Error); - expect(completion.loading).toBe(false); - await completion.complete('hi'); - expect(completion.error).toBe(undefined); - expect(completion.completion).toBe('Hello, world.'); + { + type: 'stream-chunks', + chunks: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], }, - ), - ); + ]; + + const completion = new Completion(); + await completion.complete('hi'); + expect(completion.error).toBeInstanceOf(Error); + expect(completion.loading).toBe(false); + await completion.complete('hi'); + expect(completion.error).toBe(undefined); + expect(completion.completion).toBe('Hello, world.'); + }); }); describe('synchronization', () => { - it( - 'correctly synchronizes content between hook instances', - withTestServer( - { - type: 'stream-values', - url: '/api/completion', - content: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], - }, - async () => { - const { - component: { completion1, completion2 }, - } = render(CompletionSynchronization, { id: crypto.randomUUID() }); + it('correctly synchronizes content between hook instances', async () => { + server.urls['/api/completion'].response = { + type: 'stream-chunks', + chunks: ['0:"Hello"\n', '0:","\n', '0:" world"\n', '0:"."\n'], + }; - await completion1.complete('hi'); + const { + component: { completion1, completion2 }, + } = render(CompletionSynchronization, { id: crypto.randomUUID() }); - expect(completion1.completion).toBe('Hello, world.'); - expect(completion2.completion).toBe('Hello, world.'); - }, - ), - ); + await completion1.complete('hi'); - it( - 'correctly synchronizes loading and error state between hook instances', - withTestServer( - { - type: 'controlled-stream', - url: '/api/completion', - }, - async ({ streamController }) => { - const { - component: { completion1, completion2 }, - } = render(CompletionSynchronization, { id: crypto.randomUUID() }); - - const completionOperation = completion1.complete('hi'); - - await vi.waitFor(() => { - expect(completion1.loading).toBe(true); - expect(completion2.loading).toBe(true); - }); - - streamController.enqueue('0:"Hello"\n'); - await vi.waitFor(() => { - expect(completion1.completion).toBe('Hello'); - expect(completion2.completion).toBe('Hello'); - }); - - streamController.error(new Error('Failed to be cool enough')); - await completionOperation; - - expect(completion1.loading).toBe(false); - expect(completion2.loading).toBe(false); - expect(completion1.error).toBeInstanceOf(Error); - expect(completion1.error?.message).toBe('Failed to be cool enough'); - expect(completion2.error).toBeInstanceOf(Error); - expect(completion2.error?.message).toBe('Failed to be cool enough'); - }, - ), - ); + expect(completion1.completion).toBe('Hello, world.'); + expect(completion2.completion).toBe('Hello, world.'); + }); + + it('correctly synchronizes loading and error state between hook instances', async () => { + const controller = new TestResponseController(); + server.urls['/api/completion'].response = { + type: 'controlled-stream', + controller, + }; + + const { + component: { completion1, completion2 }, + } = render(CompletionSynchronization, { id: crypto.randomUUID() }); + + const completionOperation = completion1.complete('hi'); + + await vi.waitFor(() => { + expect(completion1.loading).toBe(true); + expect(completion2.loading).toBe(true); + }); + + controller.write('0:"Hello"\n'); + await vi.waitFor(() => { + expect(completion1.completion).toBe('Hello'); + expect(completion2.completion).toBe('Hello'); + }); + + controller.error(new Error('Failed to be cool enough')); + await completionOperation; + + expect(completion1.loading).toBe(false); + expect(completion2.loading).toBe(false); + expect(completion1.error).toBeInstanceOf(Error); + expect(completion1.error?.message).toBe('Failed to be cool enough'); + expect(completion2.error).toBeInstanceOf(Error); + expect(completion2.error?.message).toBe('Failed to be cool enough'); + }); }); diff --git a/packages/svelte/src/structured-object.svelte.test.ts b/packages/svelte/src/structured-object.svelte.test.ts index 7922ea055daf..35204383aa6e 100644 --- a/packages/svelte/src/structured-object.svelte.test.ts +++ b/packages/svelte/src/structured-object.svelte.test.ts @@ -1,12 +1,16 @@ import { - describeWithTestServer, - withTestServer, + createTestServer, + TestResponseController, } from '@ai-sdk/provider-utils/test'; import { render } from '@testing-library/svelte'; import { z } from 'zod'; import { StructuredObject } from './structured-object.svelte.js'; import StructuredObjectSynchronization from './tests/structured-object-synchronization.svelte'; +const server = createTestServer({ + '/api/object': {}, +}); + describe('text stream', () => { let structuredObject: StructuredObject<{ content: string }>; @@ -17,281 +21,242 @@ describe('text stream', () => { }); }); - describeWithTestServer( - 'when the API returns "Hello, world!"', - { - url: 'api/object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', ' }'], - }, - ({ call }) => { - beforeEach(async () => { - await structuredObject.submit('test-input'); - }); + describe('when the API returns "Hello, world!"', () => { + beforeEach(async () => { + server.urls['/api/object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', ' }'], + }; + await structuredObject.submit('test-input'); + }); - it('should render the stream', () => { - expect(structuredObject.object).toEqual({ content: 'Hello, world!' }); - }); + it('should render the stream', () => { + expect(structuredObject.object).toEqual({ content: 'Hello, world!' }); + }); - it('should send the correct input to the API', async () => { - expect(await call(0).getRequestBodyJson()).toBe('test-input'); - }); + it('should send the correct input to the API', async () => { + expect(await server.calls[0].requestBody).toBe('test-input'); + }); - it('should not have an error', () => { - expect(structuredObject.error).toBeUndefined(); - }); - }, - ); + it('should not have an error', () => { + expect(structuredObject.error).toBeUndefined(); + }); + }); describe('loading', () => { - it( - 'should be true while loading', - withTestServer( - { url: '/api/object', type: 'controlled-stream' }, - async ({ streamController }) => { - streamController.enqueue('{"content": '); - - const submitOperation = structuredObject.submit('test-input'); - - await vi.waitFor(() => { - expect(structuredObject.loading).toBe(true); - }); - - streamController.enqueue('"Hello, world!"}'); - streamController.close(); - await submitOperation; - - expect(structuredObject.loading).toBe(false); - }, - ), - ); + it('should be true while loading', async () => { + const controller = new TestResponseController(); + server.urls['/api/object'].response = { + type: 'controlled-stream', + controller, + }; + + controller.write('{"content": '); + const submitOperation = structuredObject.submit('test-input'); + + await vi.waitFor(() => { + expect(structuredObject.loading).toBe(true); + }); + + controller.write('"Hello, world!"}'); + controller.close(); + await submitOperation; + + expect(structuredObject.loading).toBe(false); + }); }); describe('stop', () => { - it( - 'should abort the stream and not consume any more data', - withTestServer( - { url: '/api/object', type: 'controlled-stream' }, - async ({ streamController }) => { - streamController.enqueue('{"content": "h'); - - const submitOperation = structuredObject.submit('test-input'); - - await vi.waitFor(() => { - expect(structuredObject.loading).toBe(true); - expect(structuredObject.object).toStrictEqual({ - content: 'h', - }); - }); - - structuredObject.stop(); - - await vi.waitFor(() => { - expect(structuredObject.loading).toBe(false); - }); - - streamController.enqueue('ello, world!"}'); - streamController.close(); - await submitOperation; - - expect(structuredObject.loading).toBe(false); - expect(structuredObject.object).toStrictEqual({ - content: 'h', - }); - }, - ), - ); + it('should abort the stream and not consume any more data', async () => { + const controller = new TestResponseController(); + server.urls['/api/object'].response = { + type: 'controlled-stream', + controller, + }; + + controller.write('{"content": "h'); + const submitOperation = structuredObject.submit('test-input'); + + await vi.waitFor(() => { + expect(structuredObject.loading).toBe(true); + expect(structuredObject.object).toStrictEqual({ + content: 'h', + }); + }); + + structuredObject.stop(); + + await vi.waitFor(() => { + expect(structuredObject.loading).toBe(false); + }); + + controller.write('ello, world!"}'); + controller.close(); + await submitOperation; + + expect(structuredObject.loading).toBe(false); + expect(structuredObject.object).toStrictEqual({ + content: 'h', + }); + }); }); describe('when the API returns a 404', () => { - it( - 'should produce the correct error state', - withTestServer( - { - url: '/api/object', - type: 'error', - status: 404, - content: 'Not found', - }, - async () => { - await structuredObject.submit('test-input'); - expect(structuredObject.error).toBeInstanceOf(Error); - expect(structuredObject.error?.message).toBe('Not found'); - expect(structuredObject.loading).toBe(false); - }, - ), - ); + it('should produce the correct error state', async () => { + server.urls['/api/object'].response = { + type: 'error', + status: 404, + body: 'Not found', + }; + + await structuredObject.submit('test-input'); + expect(structuredObject.error).toBeInstanceOf(Error); + expect(structuredObject.error?.message).toBe('Not found'); + expect(structuredObject.loading).toBe(false); + }); }); describe('onFinish', () => { - it( - 'should be called with an object when the stream finishes and the object matches the schema', - withTestServer( - { - url: '/api/object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async () => { - const onFinish = vi.fn(); - const structuredObjectWithOnFinish = new StructuredObject({ - api: '/api/object', - schema: z.object({ content: z.string() }), - onFinish, - }); - await structuredObjectWithOnFinish.submit('test-input'); - - expect(onFinish).toHaveBeenCalledExactlyOnceWith({ - object: { content: 'Hello, world!' }, - error: undefined, - }); - }, - ), - ); - - it( - 'should be called with an error when the stream finishes and the object does not match the schema', - withTestServer( - { - url: '/api/object', - type: 'stream-values', - content: ['{ ', '"content-wrong": "Hello, ', 'world', '!"', '}'], - }, - async () => { - const onFinish = vi.fn(); - const structuredObjectWithOnFinish = new StructuredObject({ - api: '/api/object', - schema: z.object({ content: z.string() }), - onFinish, - }); - await structuredObjectWithOnFinish.submit('test-input'); - - expect(onFinish).toHaveBeenCalledExactlyOnceWith({ - object: undefined, - error: expect.any(Error), - }); - }, - ), - ); + it('should be called with an object when the stream finishes and the object matches the schema', async () => { + server.urls['/api/object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + }; + + const onFinish = vi.fn(); + const structuredObjectWithOnFinish = new StructuredObject({ + api: '/api/object', + schema: z.object({ content: z.string() }), + onFinish, + }); + await structuredObjectWithOnFinish.submit('test-input'); + + expect(onFinish).toHaveBeenCalledExactlyOnceWith({ + object: { content: 'Hello, world!' }, + error: undefined, + }); + }); + + it('should be called with an error when the stream finishes and the object does not match the schema', async () => { + server.urls['/api/object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content-wrong": "Hello, ', 'world', '!"', '}'], + }; + + const onFinish = vi.fn(); + const structuredObjectWithOnFinish = new StructuredObject({ + api: '/api/object', + schema: z.object({ content: z.string() }), + onFinish, + }); + await structuredObjectWithOnFinish.submit('test-input'); + + expect(onFinish).toHaveBeenCalledExactlyOnceWith({ + object: undefined, + error: expect.any(Error), + }); + }); }); - it( - 'should send custom headers', - withTestServer( - { - url: '/api/object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + it('should send custom headers', async () => { + server.urls['/api/object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + }; + + const structuredObjectWithCustomHeaders = new StructuredObject({ + api: '/api/object', + schema: z.object({ content: z.string() }), + headers: { + Authorization: 'Bearer TEST_TOKEN', + 'X-Custom-Header': 'CustomValue', }, - async ({ call }) => { - const structuredObjectWithCustomHeaders = new StructuredObject({ - api: '/api/object', - schema: z.object({ content: z.string() }), - headers: { - Authorization: 'Bearer TEST_TOKEN', - 'X-Custom-Header': 'CustomValue', - }, - }); + }); - await structuredObjectWithCustomHeaders.submit('test-input'); + await structuredObjectWithCustomHeaders.submit('test-input'); - expect(call(0).getRequestHeaders()).toStrictEqual({ - 'content-type': 'application/json', - authorization: 'Bearer TEST_TOKEN', - 'x-custom-header': 'CustomValue', - }); - }, - ), - ); - - it( - 'should send custom credentials', - withTestServer( - { - url: '/api/object', - type: 'stream-values', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async ({ call }) => { - const structuredObjectWithCustomCredentials = new StructuredObject({ - api: '/api/object', - schema: z.object({ content: z.string() }), - credentials: 'include', - }); + expect(server.calls[0].requestHeaders).toStrictEqual({ + 'content-type': 'application/json', + authorization: 'Bearer TEST_TOKEN', + 'x-custom-header': 'CustomValue', + }); + }); - await structuredObjectWithCustomCredentials.submit('test-input'); + it('should send custom credentials', async () => { + server.urls['/api/object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + }; - expect(call(0).getRequestCredentials()).toBe('include'); - }, - ), - ); + const structuredObjectWithCustomCredentials = new StructuredObject({ + api: '/api/object', + schema: z.object({ content: z.string() }), + credentials: 'include', + }); + + await structuredObjectWithCustomCredentials.submit('test-input'); + + expect(server.calls[0].requestCredentials).toBe('include'); + }); }); describe('synchronization', () => { - it( - 'correctly synchronizes content between hook instances', - withTestServer( - { - type: 'stream-values', - url: '/api/object', - content: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], - }, - async () => { - const { - component: { object1, object2 }, - } = render(StructuredObjectSynchronization, { - id: crypto.randomUUID(), - api: '/api/object', - schema: z.object({ content: z.string() }), - }); + it('correctly synchronizes content between hook instances', async () => { + server.urls['/api/object'].response = { + type: 'stream-chunks', + chunks: ['{ ', '"content": "Hello, ', 'world', '!"', '}'], + }; + + const { + component: { object1, object2 }, + } = render(StructuredObjectSynchronization, { + id: crypto.randomUUID(), + api: '/api/object', + schema: z.object({ content: z.string() }), + }); - await object1.submit('hi'); + await object1.submit('hi'); - expect(object1.object).toStrictEqual({ content: 'Hello, world!' }); - expect(object2.object).toStrictEqual(object1.object); - }, - ), - ); + expect(object1.object).toStrictEqual({ content: 'Hello, world!' }); + expect(object2.object).toStrictEqual(object1.object); + }); - it( - 'correctly synchronizes loading and error state between hook instances', - withTestServer( - { - type: 'controlled-stream', - url: '/api/object', - }, - async ({ streamController }) => { - const { - component: { object1, object2 }, - } = render(StructuredObjectSynchronization, { - id: crypto.randomUUID(), - api: '/api/object', - schema: z.object({ content: z.string() }), - }); + it('correctly synchronizes loading and error state between hook instances', async () => { + const controller = new TestResponseController(); + server.urls['/api/object'].response = { + type: 'controlled-stream', + controller, + }; + + const { + component: { object1, object2 }, + } = render(StructuredObjectSynchronization, { + id: crypto.randomUUID(), + api: '/api/object', + schema: z.object({ content: z.string() }), + }); - const submitOperation = object1.submit('hi'); + const submitOperation = object1.submit('hi'); - await vi.waitFor(() => { - expect(object1.loading).toBe(true); - expect(object2.loading).toBe(true); - }); + await vi.waitFor(() => { + expect(object1.loading).toBe(true); + expect(object2.loading).toBe(true); + }); - streamController.enqueue('{ "content": "Hello"'); - await vi.waitFor(() => { - expect(object1.object).toStrictEqual({ content: 'Hello' }); - expect(object2.object).toStrictEqual(object1.object); - }); + controller.write('{ "content": "Hello"'); + await vi.waitFor(() => { + expect(object1.object).toStrictEqual({ content: 'Hello' }); + expect(object2.object).toStrictEqual(object1.object); + }); - streamController.error(new Error('Failed to be cool enough')); - await submitOperation; + controller.error(new Error('Failed to be cool enough')); + await submitOperation; - expect(object1.loading).toBe(false); - expect(object2.loading).toBe(false); - expect(object1.error).toBeInstanceOf(Error); - expect(object1.error?.message).toBe('Failed to be cool enough'); - expect(object2.error).toBeInstanceOf(Error); - expect(object2.error?.message).toBe('Failed to be cool enough'); - }, - ), - ); + expect(object1.loading).toBe(false); + expect(object2.loading).toBe(false); + expect(object1.error).toBeInstanceOf(Error); + expect(object1.error?.message).toBe('Failed to be cool enough'); + expect(object2.error).toBeInstanceOf(Error); + expect(object2.error?.message).toBe('Failed to be cool enough'); + }); }); From 63d791d2354550f10ee54f5a664f52dae4fb34b4 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 3 Apr 2025 16:38:18 +0200 Subject: [PATCH 0016/1307] chore (utils): remove unused test helpers (#5538) --- .changeset/seven-pens-itch.md | 6 + packages/provider-utils/src/test/index.ts | 1 - .../provider-utils/src/test/test-server.ts | 340 ++++++++---------- .../src/test/unified-test-server.ts | 249 ------------- packages/ui-utils/package.json | 6 - .../src/process-chat-response.test.ts | 12 +- .../src/test/create-data-protocol-stream.ts | 10 - packages/ui-utils/src/test/index.ts | 1 - packages/ui-utils/src/test/mock-fetch.ts | 135 ------- packages/ui-utils/tsup.config.ts | 8 - 10 files changed, 172 insertions(+), 596 deletions(-) create mode 100644 .changeset/seven-pens-itch.md delete mode 100644 packages/provider-utils/src/test/unified-test-server.ts delete mode 100644 packages/ui-utils/src/test/create-data-protocol-stream.ts delete mode 100644 packages/ui-utils/src/test/index.ts delete mode 100644 packages/ui-utils/src/test/mock-fetch.ts diff --git a/.changeset/seven-pens-itch.md b/.changeset/seven-pens-itch.md new file mode 100644 index 000000000000..d0c9f2a60649 --- /dev/null +++ b/.changeset/seven-pens-itch.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/provider-utils': patch +'@ai-sdk/ui-utils': patch +--- + +chore (utils): remove unused test helpers diff --git a/packages/provider-utils/src/test/index.ts b/packages/provider-utils/src/test/index.ts index 4fbea1b76ffc..97dabecd2a09 100644 --- a/packages/provider-utils/src/test/index.ts +++ b/packages/provider-utils/src/test/index.ts @@ -5,4 +5,3 @@ export * from './convert-readable-stream-to-array'; export * from './convert-response-stream-to-array'; export * from './mock-id'; export * from './test-server'; -export * from './unified-test-server'; diff --git a/packages/provider-utils/src/test/test-server.ts b/packages/provider-utils/src/test/test-server.ts index 51827c877ac7..b03798379c75 100644 --- a/packages/provider-utils/src/test/test-server.ts +++ b/packages/provider-utils/src/test/test-server.ts @@ -1,47 +1,72 @@ -import { HttpResponse, JsonBodyType, http } from 'msw'; +import { http, HttpResponse, JsonBodyType } from 'msw'; import { setupServer } from 'msw/node'; import { convertArrayToReadableStream } from './convert-array-to-readable-stream'; -export type TestServerJsonBodyType = JsonBodyType; - -export type TestServerResponse = { - url: string; - headers?: Record; -} & ( +export type UrlResponse = | { type: 'json-value'; - content: TestServerJsonBodyType; + headers?: Record; + body: JsonBodyType; } | { - type: 'stream-values'; - content: Array; + type: 'stream-chunks'; + headers?: Record; + chunks: Array; } | { - type: 'controlled-stream'; - id?: string; + type: 'binary'; + headers?: Record; + body: Buffer; + } + | { + type: 'empty'; + headers?: Record; + status?: number; } | { type: 'error'; - status: number; - content?: string; + headers?: Record; + status?: number; + body?: string; + } + | { + type: 'controlled-stream'; + headers?: Record; + controller: TestResponseController; } -); + | undefined; + +type UrlResponseParameter = + | UrlResponse + | UrlResponse[] + | ((options: { callNumber: number }) => UrlResponse); + +export type UrlHandler = { + response: UrlResponseParameter; +}; + +export type UrlHandlers< + URLS extends { + [url: string]: { + response?: UrlResponseParameter; + }; + }, +> = { + [url in keyof URLS]: UrlHandler; +}; class TestServerCall { constructor(private request: Request) {} - async getRequestBodyJson() { - expect(this.request).toBeDefined(); - return JSON.parse(await this.request!.text()); + get requestBody() { + return this.request!.text().then(JSON.parse); } - getRequestCredentials() { - expect(this.request).toBeDefined(); + get requestCredentials() { return this.request!.credentials; } - getRequestHeaders() { - expect(this.request).toBeDefined(); + get requestHeaders() { const requestHeaders = this.request!.headers; // convert headers to object for easier comparison @@ -53,82 +78,56 @@ class TestServerCall { return headersObject; } - getRequestUrlSearchParams() { - expect(this.request).toBeDefined(); + get requestUrlSearchParams() { return new URL(this.request!.url).searchParams; } -} -function createServer({ - responses, - pushCall, - pushController, -}: { - responses: Array | TestServerResponse; - pushCall: (call: TestServerCall) => void; - pushController: ( - id: string, - controller: () => ReadableStreamDefaultController, - ) => void; -}) { - // group responses by url - const responsesArray = Array.isArray(responses) ? responses : [responses]; - const responsesByUrl = responsesArray.reduce( - (responsesByUrl, response) => { - if (!responsesByUrl[response.url]) { - responsesByUrl[response.url] = []; - } - responsesByUrl[response.url].push(response); - return responsesByUrl; - }, - {} as Record>, - ); + get requestUrl() { + return this.request!.url; + } - // create stream/streamController pairs for controlled-stream responses - const streams = {} as Record>; - responsesArray - .filter( - ( - response, - ): response is TestServerResponse & { type: 'controlled-stream' } => - response.type === 'controlled-stream', - ) - .forEach(response => { - let streamController: ReadableStreamDefaultController; - - const stream = new ReadableStream({ - start(controller) { - streamController = controller; - }, - }); + get requestMethod() { + return this.request!.method; + } +} - pushController(response.id ?? '', () => streamController); - streams[response.id ?? ''] = stream; - }); +export function createTestServer< + URLS extends { + [url: string]: { + response?: UrlResponseParameter; + }; + }, +>( + routes: URLS, +): { + urls: UrlHandlers; + calls: TestServerCall[]; +} { + const originalRoutes = structuredClone(routes); // deep copy + + const mswServer = setupServer( + ...Object.entries(routes).map(([url, handler]) => { + return http.all(url, ({ request }) => { + const callNumber = calls.length; + + calls.push(new TestServerCall(request)); - // keep track of url invocation counts: - // TODO bug needs reset after each test - const urlInvocationCounts = Object.fromEntries( - Object.entries(responsesByUrl).map(([url]) => [url, 0]), - ); + const response = + typeof handler.response === 'function' + ? handler.response({ callNumber }) + : Array.isArray(handler.response) + ? handler.response[callNumber] + : handler.response; + + if (response === undefined) { + return HttpResponse.json({ error: 'Not Found' }, { status: 404 }); + } - return setupServer( - ...Object.entries(responsesByUrl).map(([url, responses]) => { - return http.post(url, ({ request }) => { - pushCall(new TestServerCall(request)); + const handlerType = response.type; - const invocationCount = urlInvocationCounts[url]++; - const response = - responses[ - // TODO bug needs to be >= - invocationCount > responses.length - ? responses.length - 1 - : invocationCount - ]; - - switch (response.type) { + switch (handlerType) { case 'json-value': - return HttpResponse.json(response.content, { + return HttpResponse.json(response.body, { status: 200, headers: { 'Content-Type': 'application/json', @@ -136,9 +135,9 @@ function createServer({ }, }); - case 'stream-values': + case 'stream-chunks': return new HttpResponse( - convertArrayToReadableStream(response.content).pipeThrough( + convertArrayToReadableStream(response.chunks).pipeThrough( new TextEncoderStream(), ), { @@ -154,7 +153,7 @@ function createServer({ case 'controlled-stream': { return new HttpResponse( - streams[response.id ?? ''].pipeThrough(new TextEncoderStream()), + response.controller.stream.pipeThrough(new TextEncoderStream()), { status: 200, headers: { @@ -167,111 +166,84 @@ function createServer({ ); } + case 'binary': { + return HttpResponse.arrayBuffer(response.body, { + status: 200, + headers: response.headers, + }); + } + case 'error': - return HttpResponse.text(response.content ?? 'Error', { - status: response.status, - headers: { - ...response.headers, - }, + return HttpResponse.text(response.body ?? 'Error', { + status: response.status ?? 500, + headers: response.headers, + }); + + case 'empty': + return new HttpResponse(null, { + status: response.status ?? 200, }); + + default: { + const _exhaustiveCheck: never = handlerType; + throw new Error(`Unknown response type: ${_exhaustiveCheck}`); + } } }); }), ); -} -export function withTestServer( - responses: Array | TestServerResponse, - testFunction: (options: { - calls: () => Array; - call: (index: number) => TestServerCall; - getStreamController: ( - id: string, - ) => ReadableStreamDefaultController; - streamController: ReadableStreamDefaultController; - }) => Promise, -) { - return async () => { - const calls: Array = []; - const controllers: Record< - string, - () => ReadableStreamDefaultController - > = {}; - const server = createServer({ - responses, - pushCall: call => calls.push(call), - pushController: (id, controller) => { - controllers[id] = controller; - }, + let calls: TestServerCall[] = []; + + beforeAll(() => { + mswServer.listen(); + }); + + beforeEach(() => { + mswServer.resetHandlers(); + + // set the responses back to the original values + Object.entries(originalRoutes).forEach(([url, handler]) => { + routes[url].response = handler.response; }); - try { - server.listen(); - - await testFunction({ - calls: () => calls, - call: (index: number) => calls[index], - getStreamController: (id: string) => { - return controllers[id](); - }, - get streamController() { - return controllers[''](); - }, - }); - } finally { - server.close(); - } + calls = []; + }); + + afterAll(() => { + mswServer.close(); + }); + + return { + urls: routes as UrlHandlers, + get calls() { + return calls; + }, }; } -export function describeWithTestServer( - description: string, - responses: Array | TestServerResponse, - testFunction: (options: { - calls: () => Array; - call: (index: number) => TestServerCall; - getStreamController: ( - id: string, - ) => ReadableStreamDefaultController; - streamController: ReadableStreamDefaultController; - }) => void, -) { - describe(description, () => { - let calls: Array; - let controllers: Record< - string, - () => ReadableStreamDefaultController - >; - let server: ReturnType; - - beforeAll(() => { - server = createServer({ - responses, - pushCall: call => calls.push(call), - pushController: (id, controller) => { - controllers[id] = controller; - }, - }); - server.listen(); - }); +export class TestResponseController { + private readonly transformStream: TransformStream; + private readonly writer: WritableStreamDefaultWriter; - beforeEach(() => { - calls = []; - controllers = {}; - server.resetHandlers(); - }); + constructor() { + this.transformStream = new TransformStream(); + this.writer = this.transformStream.writable.getWriter(); + } - afterAll(() => { - server.close(); - }); + get stream(): ReadableStream { + return this.transformStream.readable; + } - testFunction({ - calls: () => calls, - call: (index: number) => calls[index], - getStreamController: (id: string) => controllers[id](), - get streamController() { - return controllers[''](); - }, - }); - }); + async write(chunk: string): Promise { + await this.writer.write(chunk); + } + + async error(error: Error): Promise { + await this.writer.abort(error); + } + + async close(): Promise { + await this.writer.close(); + } } diff --git a/packages/provider-utils/src/test/unified-test-server.ts b/packages/provider-utils/src/test/unified-test-server.ts deleted file mode 100644 index b03798379c75..000000000000 --- a/packages/provider-utils/src/test/unified-test-server.ts +++ /dev/null @@ -1,249 +0,0 @@ -import { http, HttpResponse, JsonBodyType } from 'msw'; -import { setupServer } from 'msw/node'; -import { convertArrayToReadableStream } from './convert-array-to-readable-stream'; - -export type UrlResponse = - | { - type: 'json-value'; - headers?: Record; - body: JsonBodyType; - } - | { - type: 'stream-chunks'; - headers?: Record; - chunks: Array; - } - | { - type: 'binary'; - headers?: Record; - body: Buffer; - } - | { - type: 'empty'; - headers?: Record; - status?: number; - } - | { - type: 'error'; - headers?: Record; - status?: number; - body?: string; - } - | { - type: 'controlled-stream'; - headers?: Record; - controller: TestResponseController; - } - | undefined; - -type UrlResponseParameter = - | UrlResponse - | UrlResponse[] - | ((options: { callNumber: number }) => UrlResponse); - -export type UrlHandler = { - response: UrlResponseParameter; -}; - -export type UrlHandlers< - URLS extends { - [url: string]: { - response?: UrlResponseParameter; - }; - }, -> = { - [url in keyof URLS]: UrlHandler; -}; - -class TestServerCall { - constructor(private request: Request) {} - - get requestBody() { - return this.request!.text().then(JSON.parse); - } - - get requestCredentials() { - return this.request!.credentials; - } - - get requestHeaders() { - const requestHeaders = this.request!.headers; - - // convert headers to object for easier comparison - const headersObject: Record = {}; - requestHeaders.forEach((value, key) => { - headersObject[key] = value; - }); - - return headersObject; - } - - get requestUrlSearchParams() { - return new URL(this.request!.url).searchParams; - } - - get requestUrl() { - return this.request!.url; - } - - get requestMethod() { - return this.request!.method; - } -} - -export function createTestServer< - URLS extends { - [url: string]: { - response?: UrlResponseParameter; - }; - }, ->( - routes: URLS, -): { - urls: UrlHandlers; - calls: TestServerCall[]; -} { - const originalRoutes = structuredClone(routes); // deep copy - - const mswServer = setupServer( - ...Object.entries(routes).map(([url, handler]) => { - return http.all(url, ({ request }) => { - const callNumber = calls.length; - - calls.push(new TestServerCall(request)); - - const response = - typeof handler.response === 'function' - ? handler.response({ callNumber }) - : Array.isArray(handler.response) - ? handler.response[callNumber] - : handler.response; - - if (response === undefined) { - return HttpResponse.json({ error: 'Not Found' }, { status: 404 }); - } - - const handlerType = response.type; - - switch (handlerType) { - case 'json-value': - return HttpResponse.json(response.body, { - status: 200, - headers: { - 'Content-Type': 'application/json', - ...response.headers, - }, - }); - - case 'stream-chunks': - return new HttpResponse( - convertArrayToReadableStream(response.chunks).pipeThrough( - new TextEncoderStream(), - ), - { - status: 200, - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - ...response.headers, - }, - }, - ); - - case 'controlled-stream': { - return new HttpResponse( - response.controller.stream.pipeThrough(new TextEncoderStream()), - { - status: 200, - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - ...response.headers, - }, - }, - ); - } - - case 'binary': { - return HttpResponse.arrayBuffer(response.body, { - status: 200, - headers: response.headers, - }); - } - - case 'error': - return HttpResponse.text(response.body ?? 'Error', { - status: response.status ?? 500, - headers: response.headers, - }); - - case 'empty': - return new HttpResponse(null, { - status: response.status ?? 200, - }); - - default: { - const _exhaustiveCheck: never = handlerType; - throw new Error(`Unknown response type: ${_exhaustiveCheck}`); - } - } - }); - }), - ); - - let calls: TestServerCall[] = []; - - beforeAll(() => { - mswServer.listen(); - }); - - beforeEach(() => { - mswServer.resetHandlers(); - - // set the responses back to the original values - Object.entries(originalRoutes).forEach(([url, handler]) => { - routes[url].response = handler.response; - }); - - calls = []; - }); - - afterAll(() => { - mswServer.close(); - }); - - return { - urls: routes as UrlHandlers, - get calls() { - return calls; - }, - }; -} - -export class TestResponseController { - private readonly transformStream: TransformStream; - private readonly writer: WritableStreamDefaultWriter; - - constructor() { - this.transformStream = new TransformStream(); - this.writer = this.transformStream.writable.getWriter(); - } - - get stream(): ReadableStream { - return this.transformStream.readable; - } - - async write(chunk: string): Promise { - await this.writer.write(chunk); - } - - async error(error: Error): Promise { - await this.writer.abort(error); - } - - async close(): Promise { - await this.writer.close(); - } -} diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index 032a245ed1e7..4579503ea454 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -28,12 +28,6 @@ "types": "./dist/index.d.ts", "import": "./dist/index.mjs", "require": "./dist/index.js" - }, - "./test": { - "types": "./test/dist/index.d.ts", - "import": "./test/dist/index.mjs", - "module": "./test/dist/index.mjs", - "require": "./test/dist/index.js" } }, "dependencies": { diff --git a/packages/ui-utils/src/process-chat-response.test.ts b/packages/ui-utils/src/process-chat-response.test.ts index a1ea3e93a7d0..1c191828d62f 100644 --- a/packages/ui-utils/src/process-chat-response.test.ts +++ b/packages/ui-utils/src/process-chat-response.test.ts @@ -1,11 +1,19 @@ import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; import { describe, expect, it, vi } from 'vitest'; -import { formatDataStreamPart } from './data-stream-parts'; +import { DataStreamString, formatDataStreamPart } from './data-stream-parts'; import { LanguageModelUsage } from './duplicated/usage'; import { processChatResponse } from './process-chat-response'; -import { createDataProtocolStream } from './test/create-data-protocol-stream'; import { JSONValue, Message } from './types'; +function createDataProtocolStream( + dataPartTexts: DataStreamString[], +): ReadableStream { + return convertArrayToReadableStream(dataPartTexts).pipeThrough( + new TextEncoderStream(), + ); +} + let updateCalls: Array<{ message: Message; data: JSONValue[] | undefined; diff --git a/packages/ui-utils/src/test/create-data-protocol-stream.ts b/packages/ui-utils/src/test/create-data-protocol-stream.ts deleted file mode 100644 index f58aaec05906..000000000000 --- a/packages/ui-utils/src/test/create-data-protocol-stream.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; -import { DataStreamString } from '../data-stream-parts'; - -export function createDataProtocolStream( - dataPartTexts: DataStreamString[], -): ReadableStream { - return convertArrayToReadableStream(dataPartTexts).pipeThrough( - new TextEncoderStream(), - ); -} diff --git a/packages/ui-utils/src/test/index.ts b/packages/ui-utils/src/test/index.ts deleted file mode 100644 index 1126b219392e..000000000000 --- a/packages/ui-utils/src/test/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './mock-fetch'; diff --git a/packages/ui-utils/src/test/mock-fetch.ts b/packages/ui-utils/src/test/mock-fetch.ts deleted file mode 100644 index b1c85de9b53a..000000000000 --- a/packages/ui-utils/src/test/mock-fetch.ts +++ /dev/null @@ -1,135 +0,0 @@ -import { fail } from 'node:assert'; -import { vi } from 'vitest'; - -export function mockFetchTextStream({ - url, - chunks, -}: { - url: string; - chunks: string[]; -}) { - vi.spyOn(global, 'fetch').mockImplementation(async () => { - function* generateChunks() { - for (const chunk of chunks) { - yield new TextEncoder().encode(chunk); - } - } - - const chunkGenerator = generateChunks(); - - return { - url, - ok: true, - status: 200, - bodyUsed: false, - headers: new Map() as any as Headers, - body: { - getReader() { - return { - read() { - return Promise.resolve(chunkGenerator.next()); - }, - releaseLock() {}, - cancel() {}, - }; - }, - }, - } as unknown as Response; - }); -} - -export function mockFetchDataStream({ - url, - chunks, - maxCalls, -}: { - url: string; - chunks: string[]; - maxCalls?: number; -}) { - async function* generateChunks() { - const encoder = new TextEncoder(); - for (const chunk of chunks) { - yield encoder.encode(chunk); - } - } - - return mockFetchDataStreamWithGenerator({ - url, - chunkGenerator: generateChunks(), - maxCalls, - }); -} - -export function mockFetchDataStreamWithGenerator({ - url, - chunkGenerator, - maxCalls, -}: { - url: string; - chunkGenerator: AsyncGenerator; - maxCalls?: number; -}) { - let requestBodyResolve: ((value?: unknown) => void) | undefined; - const requestBodyPromise = new Promise(resolve => { - requestBodyResolve = resolve; - }); - - let callCount = 0; - - vi.spyOn(global, 'fetch').mockImplementation(async (url, init) => { - if (maxCalls !== undefined && ++callCount >= maxCalls) { - throw new Error('Too many calls'); - } - - requestBodyResolve?.(init!.body as string); - - return { - url, - ok: true, - status: 200, - bodyUsed: false, - body: new ReadableStream({ - async start(controller) { - for await (const chunk of chunkGenerator) { - controller.enqueue(chunk); - } - controller.close(); - }, - }), - } as Response; - }); - - return { - requestBody: requestBodyPromise, - }; -} - -export function mockFetchError({ - statusCode, - errorMessage, -}: { - statusCode: number; - errorMessage: string; -}) { - vi.spyOn(global, 'fetch').mockImplementation(async () => { - return { - url: 'https://example.com/api/chat', - ok: false, - status: statusCode, - bodyUsed: false, - body: { - getReader() { - return { - read() { - return Promise.resolve(errorMessage); - }, - releaseLock() {}, - cancel() {}, - }; - }, - }, - text: () => Promise.resolve(errorMessage), - } as unknown as Response; - }); -} diff --git a/packages/ui-utils/tsup.config.ts b/packages/ui-utils/tsup.config.ts index ce3c735ccbb5..3f92041b987c 100644 --- a/packages/ui-utils/tsup.config.ts +++ b/packages/ui-utils/tsup.config.ts @@ -7,12 +7,4 @@ export default defineConfig([ dts: true, sourcemap: true, }, - { - entry: ['src/test/index.ts'], - outDir: 'test/dist', - format: ['cjs', 'esm'], - external: ['vitest'], - dts: true, - sourcemap: true, - }, ]); From 5405b94a2ffc27bea8b316f13598df96a8e07370 Mon Sep 17 00:00:00 2001 From: Robert Soriano Date: Thu, 3 Apr 2025 07:17:49 -0700 Subject: [PATCH 0017/1307] docs: Use new `create-nuxt` tool when starting Nuxt projects (#5175) --- content/docs/02-getting-started/05-nuxt.mdx | 2 +- examples/nuxt-openai/README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/content/docs/02-getting-started/05-nuxt.mdx b/content/docs/02-getting-started/05-nuxt.mdx index 72ac0e1b7475..4d1818782cfd 100644 --- a/content/docs/02-getting-started/05-nuxt.mdx +++ b/content/docs/02-getting-started/05-nuxt.mdx @@ -24,7 +24,7 @@ If you haven't obtained your OpenAI API key, you can do so by [signing up](https Start by creating a new Nuxt application. This command will create a new directory named `my-ai-app` and set up a basic Nuxt application inside it. - + Navigate to the newly created directory: diff --git a/examples/nuxt-openai/README.md b/examples/nuxt-openai/README.md index cbadb669da44..bb847c0d912f 100644 --- a/examples/nuxt-openai/README.md +++ b/examples/nuxt-openai/README.md @@ -10,10 +10,10 @@ Deploy the example using [Vercel](https://vercel.com?utm_source=github&utm_mediu ## How to use -Execute `nuxi` to bootstrap the example: +Execute `create-nuxt` to bootstrap the example: ```bash -npx nuxi@latest init -t github:vercel/ai/examples/nuxt-openai nuxt-openai +npx create-nuxt -t github:vercel/ai/examples/nuxt-openai nuxt-openai ``` To run the example locally you need to: From e9688c01f557936960cf768562a7ad78d9ace03b Mon Sep 17 00:00:00 2001 From: colegottdank Date: Fri, 4 Apr 2025 00:01:59 -0700 Subject: [PATCH 0018/1307] feat (docs): Helicone observability (#5472) --- .../providers/05-observability/helicone.mdx | 223 ++++++++++++++++++ content/providers/05-observability/index.mdx | 1 + 2 files changed, 224 insertions(+) create mode 100644 content/providers/05-observability/helicone.mdx diff --git a/content/providers/05-observability/helicone.mdx b/content/providers/05-observability/helicone.mdx new file mode 100644 index 000000000000..cbf02779bfe0 --- /dev/null +++ b/content/providers/05-observability/helicone.mdx @@ -0,0 +1,223 @@ +--- +title: Helicone +description: Monitor and optimize your AI SDK applications with minimal configuration using Helicone +--- + +# Helicone Observability + +[Helicone](https://helicone.ai) is an open-source LLM observability platform that helps you monitor, analyze, and optimize your AI applications through a proxy-based approach, requiring minimal setup and zero additional dependencies. + +## Setup + +Setting up Helicone: + +1. Create a Helicone account at [helicone.ai](https://helicone.ai) +2. Set your API key as an environment variable: + ```bash filename=".env" + HELICONE_API_KEY=your-helicone-api-key + ``` +3. Update your model provider configuration to use Helicone's proxy: + + ```javascript + import { createOpenAI } from '@ai-sdk/openai'; + + const openai = createOpenAI({ + baseURL: 'https://oai.helicone.ai/v1', + headers: { + 'Helicone-Auth': `Bearer ${process.env.HELICONE_API_KEY}`, + }, + }); + + // Use normally with AI SDK + const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'Hello world', + }); + ``` + +That's it! Your requests are now being logged and monitored through Helicone. + +[→ Learn more about getting started with Helicone on AI SDK](https://docs.helicone.ai/getting-started/integration-method/vercelai) + +## Integration Approach + +While other observability solutions require OpenTelemetry instrumentation, Helicone uses a simple proxy approach: + + + + ```javascript + const openai = createOpenAI({ + baseURL: "https://oai.helicone.ai/v1", + headers: { "Helicone-Auth": `Bearer ${process.env.HELICONE_API_KEY}` }, + }); + ``` + + + + ```javascript + // Install multiple packages + // @vercel/otel, @opentelemetry/sdk-node, @opentelemetry/auto-instrumentations-node, etc. + + // Create exporter + const exporter = new OtherProviderExporter({ + projectApiKey: process.env.API_KEY + }); + + // Setup SDK + const sdk = new NodeSDK({ + traceExporter: exporter, + instrumentations: [getNodeAutoInstrumentations()], + resource: new Resource({...}), + }); + + // Start SDK + sdk.start(); + + // Enable telemetry on each request + const response = await generateText({ + model: openai("gpt-4o-mini"), + prompt: "Hello world", + experimental_telemetry: { isEnabled: true } + }); + + // Shutdown SDK to flush traces + await sdk.shutdown(); + ``` + + + +**Characteristics of Helicone's Proxy Approach:** + +- No additional packages required +- Compatible with JavaScript environments +- Minimal code changes to existing implementations +- Supports features such as caching and rate limiting + +[→ Learn more about Helicone's proxy approach](https://docs.helicone.ai/references/proxy-vs-async) + +## Core Features + +### User Tracking + +Monitor how individual users interact with your AI application: + +```javascript +const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'Hello world', + headers: { + 'Helicone-User-Id': 'user@example.com', + }, +}); +``` + +[→ Learn more about User Metrics](https://docs.helicone.ai/features/advanced-usage/user-metrics) + +### Custom Properties + +Add structured metadata to filter and analyze requests: + +```javascript +const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'Translate this text to French', + headers: { + 'Helicone-Property-Feature': 'translation', + 'Helicone-Property-Source': 'mobile-app', + 'Helicone-Property-Language': 'French', + }, +}); +``` + +[→ Learn more about Custom Properties](https://docs.helicone.ai/features/advanced-usage/custom-properties) + +### Session Tracking + +Group related requests into coherent conversations: + +```javascript +const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'Tell me more about that', + headers: { + 'Helicone-Session-Id': 'convo-123', + 'Helicone-Session-Name': 'Travel Planning', + 'Helicone-Session-Path': '/chats/travel', + }, +}); +``` + +[→ Learn more about Sessions](https://docs.helicone.ai/features/sessions) + +## Advanced Configuration + +### Request Caching + +Reduce costs by caching identical requests: + +```javascript +const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'What is the capital of France?', + headers: { + 'Helicone-Cache-Enabled': 'true', + }, +}); +``` + +[→ Learn more about Caching](https://docs.helicone.ai/features/advanced-usage/caching) + +### Rate Limiting + +Control usage by adding a rate limit policy: + +```javascript +const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'Generate creative content', + headers: { + // Allow 10,000 requests per hour + 'Helicone-RateLimit-Policy': '10000;w=3600', + + // Optional: limit by user + 'Helicone-User-Id': 'user@example.com', + }, +}); +``` + +Format: `[quota];w=[time_window];u=[unit];s=[segment]` where: + +- `quota`: Maximum requests allowed in the time window +- `w`: Time window in seconds (minimum 60s) +- `u`: Optional unit - "request" (default) or "cents" +- `s`: Optional segment - "user", custom property, or global (default) + +[→ Learn more about Rate Limiting](https://docs.helicone.ai/features/advanced-usage/custom-rate-limits) + +### LLM Security + +Protect against prompt injection, jailbreaking, and other LLM-specific threats: + +```javascript +const response = await generateText({ + model: openai('gpt-4o-mini'), + prompt: userInput, + headers: { + // Basic protection (Prompt Guard model) + 'Helicone-LLM-Security-Enabled': 'true', + + // Optional: Advanced protection (Llama Guard model) + 'Helicone-LLM-Security-Advanced': 'true', + }, +}); +``` + +Protects against multiple attack vectors in 8 languages with minimal latency. Advanced mode adds protection across 14 threat categories. + +[→ Learn more about LLM Security](https://docs.helicone.ai/features/advanced-usage/llm-security) + +## Resources + +- [Helicone Documentation](https://docs.helicone.ai) +- [GitHub Repository](https://github.com/Helicone/helicone) +- [Discord Community](https://discord.com/invite/2TkeWdXNPQ) diff --git a/content/providers/05-observability/index.mdx b/content/providers/05-observability/index.mdx index fa54df2979d7..2f846d3a748d 100644 --- a/content/providers/05-observability/index.mdx +++ b/content/providers/05-observability/index.mdx @@ -8,6 +8,7 @@ description: AI SDK Integration for monitoring and tracing LLM applications Several LLM observability providers offer integrations with the AI SDK telemetry data: - [Braintrust](/providers/observability/braintrust) +- [Helicone](/providers/observability/helicone) - [Traceloop](/providers/observability/traceloop) - [Langfuse](/providers/observability/langfuse) - [LangSmith](/providers/observability/langsmith) From 0b3eb8988dcafc471ff7a86098453a7b10ca8d90 Mon Sep 17 00:00:00 2001 From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> Date: Fri, 4 Apr 2025 10:08:00 +0100 Subject: [PATCH 0019/1307] docs: add local caching middleware recipe (#5540) --- .../05-node/80-local-caching-middleware.mdx | 242 ++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 content/cookbook/05-node/80-local-caching-middleware.mdx diff --git a/content/cookbook/05-node/80-local-caching-middleware.mdx b/content/cookbook/05-node/80-local-caching-middleware.mdx new file mode 100644 index 000000000000..e2a95a51287d --- /dev/null +++ b/content/cookbook/05-node/80-local-caching-middleware.mdx @@ -0,0 +1,242 @@ +--- +title: Local Caching Middleware +description: Learn how to create a caching middleware for local development. +tags: ['streaming', 'caching', 'middleware'] +--- + +# Local Caching Middleware + +When developing AI applications, you'll often find yourself repeatedly making the same API calls during development. This can lead to increased costs and slower development cycles. A caching middleware allows you to store responses locally and reuse them when the same inputs are provided. + +This approach is particularly useful in two scenarios: + +1. **Iterating on UI/UX** - When you're focused on styling and user experience, you don't want to regenerate AI responses for every code change. +2. **Working on evals** - When developing evals, you need to repeatedly test the same prompts, but don't need new generations each time. + +## Implementation + +In this implementation, you create a JSON file to store responses. When a request is made, you first check if you have already seen this exact request. If you have, you return the cached response immediately (as a one-off generation or chunks of tokens). If not, you trigger the generation, save the response, and return it. + + + Make sure to add the path of your local cache to your `.gitignore` so you do + not commit it. + + +### How it works + +For regular generations, you store and retrieve complete responses. Instead, the streaming implementation captures each token as it arrives, stores the full sequence, and on cache hits uses the SDK's `simulateReadableStream` utility to recreate the token-by-token streaming experience at a controlled speed (defaults to 10ms between chunks). + +This approach gives you the best of both worlds: + +- Instant responses for repeated queries +- Preserved streaming behavior for UI development + +The middleware handles all transformations needed to make cached responses indistinguishable from fresh ones, including normalizing tool calls and fixing timestamp formats. + +### Middleware + +```ts +import { + type LanguageModelV1, + type LanguageModelV1Middleware, + LanguageModelV1Prompt, + type LanguageModelV1StreamPart, + simulateReadableStream, + wrapLanguageModel, +} from 'ai'; +import 'dotenv/config'; +import fs from 'fs'; +import path from 'path'; + +const CACHE_FILE = path.join(process.cwd(), '.cache/ai-cache.json'); + +export const cached = (model: LanguageModelV1) => + wrapLanguageModel({ + middleware: cacheMiddleware, + model, + }); + +const ensureCacheFile = () => { + const cacheDir = path.dirname(CACHE_FILE); + if (!fs.existsSync(cacheDir)) { + fs.mkdirSync(cacheDir, { recursive: true }); + } + if (!fs.existsSync(CACHE_FILE)) { + fs.writeFileSync(CACHE_FILE, '{}'); + } +}; + +const getCachedResult = (key: string | object) => { + ensureCacheFile(); + const cacheKey = typeof key === 'object' ? JSON.stringify(key) : key; + try { + const cacheContent = fs.readFileSync(CACHE_FILE, 'utf-8'); + + const cache = JSON.parse(cacheContent); + + const result = cache[cacheKey]; + + return result ?? null; + } catch (error) { + console.error('Cache error:', error); + return null; + } +}; + +const updateCache = (key: string, value: any) => { + ensureCacheFile(); + try { + const cache = JSON.parse(fs.readFileSync(CACHE_FILE, 'utf-8')); + const updatedCache = { ...cache, [key]: value }; + fs.writeFileSync(CACHE_FILE, JSON.stringify(updatedCache, null, 2)); + console.log('Cache updated for key:', key); + } catch (error) { + console.error('Failed to update cache:', error); + } +}; +const cleanPrompt = (prompt: LanguageModelV1Prompt) => { + return prompt.map(m => { + if (m.role === 'assistant') { + return m.content.map(part => + part.type === 'tool-call' ? { ...part, toolCallId: 'cached' } : part, + ); + } + if (m.role === 'tool') { + return m.content.map(tc => ({ + ...tc, + toolCallId: 'cached', + result: {}, + })); + } + + return m; + }); +}; + +export const cacheMiddleware: LanguageModelV1Middleware = { + wrapGenerate: async ({ doGenerate, params }) => { + const cacheKey = JSON.stringify({ + ...cleanPrompt(params.prompt), + _function: 'generate', + }); + console.log('Cache Key:', cacheKey); + + const cached = getCachedResult(cacheKey) as Awaited< + ReturnType + > | null; + + if (cached && cached !== null) { + console.log('Cache Hit'); + return { + ...cached, + response: { + ...cached.response, + timestamp: cached?.response?.timestamp + ? new Date(cached?.response?.timestamp) + : undefined, + }, + }; + } + + console.log('Cache Miss'); + const result = await doGenerate(); + + updateCache(cacheKey, result); + + return result; + }, + wrapStream: async ({ doStream, params }) => { + const cacheKey = JSON.stringify({ + ...cleanPrompt(params.prompt), + _function: 'stream', + }); + console.log('Cache Key:', cacheKey); + + // Check if the result is in the cache + const cached = getCachedResult(cacheKey); + + // If cached, return a simulated ReadableStream that yields the cached result + if (cached && cached !== null) { + console.log('Cache Hit'); + // Format the timestamps in the cached response + const formattedChunks = (cached as LanguageModelV1StreamPart[]).map(p => { + if (p.type === 'response-metadata' && p.timestamp) { + return { ...p, timestamp: new Date(p.timestamp) }; + } else return p; + }); + return { + stream: simulateReadableStream({ + initialDelayInMs: 0, + chunkDelayInMs: 10, + chunks: formattedChunks, + }), + rawCall: { rawPrompt: null, rawSettings: {} }, + }; + } + + console.log('Cache Miss'); + // If not cached, proceed with streaming + const { stream, ...rest } = await doStream(); + + const fullResponse: LanguageModelV1StreamPart[] = []; + + const transformStream = new TransformStream< + LanguageModelV1StreamPart, + LanguageModelV1StreamPart + >({ + transform(chunk, controller) { + fullResponse.push(chunk); + controller.enqueue(chunk); + }, + flush() { + // Store the full response in the cache after streaming is complete + updateCache(cacheKey, fullResponse); + }, + }); + + return { + stream: stream.pipeThrough(transformStream), + ...rest, + }; + }, +}; +``` + +## Using the Middleware + +The middleware can be easily integrated into your existing AI SDK setup: + +```ts highlight="4,8" +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; +import 'dotenv/config'; +import { cached } from '../middleware/your-cache-middleware'; + +async function main() { + const result = streamText({ + model: cached(openai('gpt-4o')), + maxTokens: 512, + temperature: 0.3, + maxRetries: 5, + prompt: 'Invent a new holiday and describe its traditions.', + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } + + console.log(); + console.log('Token usage:', await result.usage); + console.log('Finish reason:', await result.finishReason); +} + +main().catch(console.error); +``` + +## Considerations + +When using this caching middleware, keep these points in mind: + +1. **Development Only** - This approach is intended for local development, not production environments +2. **Cache Invalidation** - You'll need to clear the cache (delete the cache file) when you want fresh responses +3. **Multi-Step Flows** - When using `maxSteps`, be aware that caching occurs at the individual language model response level, not across the entire execution flow. This means that while the model's generation is cached, the tool call is not and will run on each generation. From b30502c5a54c14564d58f5a3e6d85fe70208d9a9 Mon Sep 17 00:00:00 2001 From: Gen Tamura Date: Fri, 4 Apr 2025 19:06:13 +0900 Subject: [PATCH 0020/1307] fix (docs): highlight (#5545) --- content/docs/02-getting-started/02-nextjs-app-router.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/02-getting-started/02-nextjs-app-router.mdx b/content/docs/02-getting-started/02-nextjs-app-router.mdx index 8a8bc840e606..1de3a89c7c82 100644 --- a/content/docs/02-getting-started/02-nextjs-app-router.mdx +++ b/content/docs/02-getting-started/02-nextjs-app-router.mdx @@ -244,7 +244,7 @@ Notice the blank response in the UI? This is because instead of generating a tex To display the tool invocations in your UI, update your `app/page.tsx` file: -```tsx filename="app/page.tsx" highlight="18-24" +```tsx filename="app/page.tsx" highlight="16-21" 'use client'; import { useChat } from '@ai-sdk/react'; From 33373fb2152de8e60e91c9b52e7771359fa1aa89 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 4 Apr 2025 15:01:18 +0200 Subject: [PATCH 0021/1307] chore: introduce LanguageModelV2 (#5547) --- .../01-next/122-caching-middleware.mdx | 4 +- .../05-node/80-local-caching-middleware.mdx | 4 +- content/docs/03-ai-sdk-core/40-middleware.mdx | 16 +- content/docs/06-advanced/04-caching.mdx | 4 +- .../01-ai-sdk-core/60-wrap-language-model.mdx | 2 +- .../65-language-model-v1-middleware.mdx | 6 +- .../src/e2e/google-vertex-anthropic.test.ts | 14 +- .../ai-core/src/e2e/google-vertex.test.ts | 8 +- examples/ai-core/src/e2e/google.test.ts | 6 +- examples/ai-core/src/e2e/openai.test.ts | 4 +- .../ai-core/src/generate-object/mock-error.ts | 4 +- .../generate-object/mock-repair-add-close.ts | 4 +- examples/ai-core/src/generate-object/mock.ts | 4 +- .../mock-tool-call-repair-reask.ts | 4 +- .../mock-tool-call-repair-structured-model.ts | 4 +- examples/ai-core/src/generate-text/mock.ts | 4 +- .../openai-log-metadata-middleware.ts | 4 +- .../middleware/add-to-last-user-message.ts | 6 +- .../middleware/get-last-user-message-text.ts | 4 +- .../src/middleware/your-cache-middleware.ts | 4 +- .../middleware/your-guardrail-middleware.ts | 4 +- .../src/middleware/your-log-middleware.ts | 8 +- .../src/middleware/your-rag-middleware.ts | 4 +- examples/ai-core/src/stream-object/mock.ts | 4 +- examples/ai-core/src/stream-text/mock.ts | 4 +- .../generate-object/generate-object.test.ts | 78 ++-- .../generate-object/stream-object.test.ts | 88 ++--- .../ai/core/generate-object/stream-object.ts | 14 +- .../core/generate-text/generate-text.test.ts | 72 ++-- packages/ai/core/generate-text/output.ts | 4 +- .../ai/core/generate-text/parse-tool-call.ts | 8 +- .../run-tools-transformation.test.ts | 10 +- .../generate-text/run-tools-transformation.ts | 8 +- .../ai/core/generate-text/stream-text.test.ts | 60 +-- packages/ai/core/generate-text/stream-text.ts | 6 +- .../ai/core/generate-text/tool-call-repair.ts | 6 +- .../default-settings-middleware.test.ts | 4 +- .../middleware/default-settings-middleware.ts | 12 +- .../extract-reasoning-middleware.test.ts | 20 +- .../extract-reasoning-middleware.ts | 10 +- packages/ai/core/middleware/index.ts | 5 +- ...are.ts => language-model-v2-middleware.ts} | 36 +- .../simulate-streaming-middleware.test.ts | 18 +- .../simulate-streaming-middleware.ts | 8 +- .../middleware/wrap-language-model.test.ts | 50 +-- .../ai/core/middleware/wrap-language-model.ts | 32 +- .../convert-to-language-model-prompt.ts | 26 +- .../prompt/prepare-tools-and-tool-choice.ts | 10 +- .../ai/core/registry/custom-provider.test.ts | 4 +- packages/ai/core/registry/custom-provider.ts | 4 +- .../core/registry/provider-registry.test.ts | 29 +- .../ai/core/registry/provider-registry.ts | 10 +- .../ai/core/test/mock-language-model-v1.ts | 32 +- packages/ai/core/types/index.ts | 8 +- packages/ai/core/types/language-model.ts | 56 +-- packages/ai/core/types/provider-metadata.ts | 6 +- packages/ai/rsc/stream-ui/stream-ui.tsx | 6 +- .../ai/rsc/stream-ui/stream-ui.ui.test.tsx | 12 +- packages/ai/test/index.ts | 2 +- .../src/bedrock-chat-language-model.test.ts | 4 +- .../src/bedrock-chat-language-model.ts | 32 +- .../src/bedrock-prepare-tools.ts | 10 +- .../amazon-bedrock/src/bedrock-provider.ts | 10 +- .../src/convert-to-bedrock-chat-messages.ts | 18 +- .../src/map-bedrock-finish-reason.ts | 4 +- .../anthropic-messages-language-model.test.ts | 4 +- .../src/anthropic-messages-language-model.ts | 34 +- .../anthropic/src/anthropic-prepare-tools.ts | 10 +- packages/anthropic/src/anthropic-provider.ts | 18 +- ...nvert-to-anthropic-messages-prompt.test.ts | 10 +- .../convert-to-anthropic-messages-prompt.ts | 22 +- .../src/map-anthropic-stop-reason.ts | 4 +- .../azure/src/azure-openai-provider.test.ts | 4 +- packages/azure/src/azure-openai-provider.ts | 14 +- packages/cerebras/src/cerebras-provider.ts | 16 +- .../src/cohere-chat-language-model.test.ts | 4 +- .../cohere/src/cohere-chat-language-model.ts | 22 +- packages/cohere/src/cohere-prepare-tools.ts | 10 +- packages/cohere/src/cohere-provider.ts | 15 +- .../src/convert-to-cohere-chat-prompt.ts | 4 +- .../cohere/src/map-cohere-finish-reason.ts | 4 +- .../deepinfra/src/deepinfra-provider.test.ts | 8 +- packages/deepinfra/src/deepinfra-provider.ts | 14 +- packages/deepseek/src/deepseek-provider.ts | 16 +- packages/fal/src/fal-provider.ts | 4 +- .../fireworks/src/fireworks-provider.test.ts | 8 +- packages/fireworks/src/fireworks-provider.ts | 14 +- .../google-vertex-anthropic-provider.ts | 14 +- .../src/google-vertex-provider.ts | 8 +- ...onvert-to-google-generative-ai-messages.ts | 4 +- ...oogle-generative-ai-language-model.test.ts | 4 +- .../google-generative-ai-language-model.ts | 34 +- packages/google/src/google-prepare-tools.ts | 10 +- packages/google/src/google-provider.ts | 33 +- .../map-google-generative-ai-finish-reason.ts | 4 +- .../groq/src/convert-to-groq-chat-messages.ts | 4 +- .../groq/src/groq-chat-language-model.test.ts | 4 +- packages/groq/src/groq-chat-language-model.ts | 30 +- packages/groq/src/groq-prepare-tools.ts | 10 +- packages/groq/src/groq-provider.ts | 14 +- packages/groq/src/map-groq-finish-reason.ts | 4 +- packages/luma/src/luma-provider.ts | 4 +- .../src/convert-to-mistral-chat-messages.ts | 4 +- .../mistral/src/map-mistral-finish-reason.ts | 4 +- .../src/mistral-chat-language-model.test.ts | 4 +- .../src/mistral-chat-language-model.ts | 26 +- packages/mistral/src/mistral-prepare-tools.ts | 10 +- packages/mistral/src/mistral-provider.ts | 17 +- ...vert-to-openai-compatible-chat-messages.ts | 8 +- ...-to-openai-compatible-completion-prompt.ts | 4 +- .../map-openai-compatible-finish-reason.ts | 4 +- ...nai-compatible-chat-language-model.test.ts | 4 +- .../openai-compatible-chat-language-model.ts | 38 +- ...mpatible-completion-language-model.test.ts | 4 +- ...ai-compatible-completion-language-model.ts | 26 +- .../openai-compatible-metadata-extractor.ts | 6 +- .../src/openai-compatible-prepare-tools.ts | 10 +- .../src/openai-compatible-provider.ts | 14 +- .../src/convert-to-openai-chat-messages.ts | 10 +- .../convert-to-openai-completion-prompt.ts | 4 +- .../openai/src/map-openai-chat-logprobs.ts | 4 +- .../src/map-openai-completion-logprobs.ts | 4 +- .../openai/src/map-openai-finish-reason.ts | 4 +- .../src/openai-chat-language-model.test.ts | 6 +- .../openai/src/openai-chat-language-model.ts | 38 +- .../openai-completion-language-model.test.ts | 4 +- .../src/openai-completion-language-model.ts | 30 +- packages/openai/src/openai-prepare-tools.ts | 10 +- packages/openai/src/openai-provider.ts | 18 +- .../convert-to-openai-responses-messages.ts | 10 +- .../map-openai-responses-finish-reason.ts | 4 +- .../openai-responses-language-model.test.ts | 8 +- .../openai-responses-language-model.ts | 26 +- .../openai-responses-prepare-tools.ts | 10 +- .../src/convert-to-perplexity-messages.ts | 4 +- .../src/map-perplexity-finish-reason.ts | 4 +- .../src/perplexity-language-model.test.ts | 4 +- .../src/perplexity-language-model.ts | 26 +- .../perplexity/src/perplexity-provider.ts | 13 +- packages/provider/src/language-model/index.ts | 1 + .../provider/src/language-model/v2/index.ts | 12 + .../v2/language-model-v2-call-options.ts | 90 +++++ .../v2/language-model-v2-call-settings.ts | 92 +++++ .../v2/language-model-v2-call-warning.ts | 23 ++ .../v2/language-model-v2-finish-reason.ts | 20 + .../language-model-v2-function-tool-call.ts | 11 + .../v2/language-model-v2-function-tool.ts | 31 ++ .../v2/language-model-v2-logprobs.ts | 11 + .../v2/language-model-v2-prompt.ts | 261 +++++++++++++ ...language-model-v2-provider-defined-tool.ts | 24 ++ .../v2/language-model-v2-provider-metadata.ts | 26 ++ .../v2/language-model-v2-source.ts | 31 ++ .../v2/language-model-v2-tool-choice.ts | 5 + .../language-model/v2/language-model-v2.ts | 363 ++++++++++++++++++ packages/provider/src/provider/index.ts | 1 + packages/provider/src/provider/v2/index.ts | 1 + .../provider/src/provider/v2/provider-v2.ts | 42 ++ packages/replicate/src/replicate-provider.ts | 4 +- .../src/togetherai-provider.test.ts | 8 +- .../togetherai/src/togetherai-provider.ts | 14 +- packages/ui-utils/src/data-stream-parts.ts | 20 +- .../src/process-chat-response.test.ts | 6 +- .../ui-utils/src/process-chat-response.ts | 6 +- packages/ui-utils/src/types.ts | 8 +- packages/xai/src/xai-provider.ts | 12 +- 165 files changed, 2027 insertions(+), 926 deletions(-) rename packages/ai/core/middleware/{language-model-v1-middleware.ts => language-model-v2-middleware.ts} (67%) create mode 100644 packages/provider/src/language-model/v2/index.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-call-options.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-call-settings.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-call-warning.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-finish-reason.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-function-tool-call.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-function-tool.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-logprobs.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-prompt.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-provider-defined-tool.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-source.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2-tool-choice.ts create mode 100644 packages/provider/src/language-model/v2/language-model-v2.ts create mode 100644 packages/provider/src/provider/v2/index.ts create mode 100644 packages/provider/src/provider/v2/provider-v2.ts diff --git a/content/cookbook/01-next/122-caching-middleware.mdx b/content/cookbook/01-next/122-caching-middleware.mdx index 0208cfef7180..08ccf20540d4 100644 --- a/content/cookbook/01-next/122-caching-middleware.mdx +++ b/content/cookbook/01-next/122-caching-middleware.mdx @@ -66,7 +66,7 @@ You can control the initial delay and delay between chunks by adjusting the `ini import { Redis } from '@upstash/redis'; import { type LanguageModelV1, - type LanguageModelV1Middleware, + type LanguageModelV2Middleware, type LanguageModelV1StreamPart, simulateReadableStream, } from 'ai'; @@ -76,7 +76,7 @@ const redis = new Redis({ token: process.env.KV_TOKEN, }); -export const cacheMiddleware: LanguageModelV1Middleware = { +export const cacheMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { const cacheKey = JSON.stringify(params); diff --git a/content/cookbook/05-node/80-local-caching-middleware.mdx b/content/cookbook/05-node/80-local-caching-middleware.mdx index e2a95a51287d..98142062fa43 100644 --- a/content/cookbook/05-node/80-local-caching-middleware.mdx +++ b/content/cookbook/05-node/80-local-caching-middleware.mdx @@ -38,7 +38,7 @@ The middleware handles all transformations needed to make cached responses indis ```ts import { type LanguageModelV1, - type LanguageModelV1Middleware, + type LanguageModelV2Middleware, LanguageModelV1Prompt, type LanguageModelV1StreamPart, simulateReadableStream, @@ -113,7 +113,7 @@ const cleanPrompt = (prompt: LanguageModelV1Prompt) => { }); }; -export const cacheMiddleware: LanguageModelV1Middleware = { +export const cacheMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { const cacheKey = JSON.stringify({ ...cleanPrompt(params.prompt), diff --git a/content/docs/03-ai-sdk-core/40-middleware.mdx b/content/docs/03-ai-sdk-core/40-middleware.mdx index 6add69978363..58a6c05ee3bb 100644 --- a/content/docs/03-ai-sdk-core/40-middleware.mdx +++ b/content/docs/03-ai-sdk-core/40-middleware.mdx @@ -145,9 +145,9 @@ Here are some examples of how to implement language model middleware: This example shows how to log the parameters and generated text of a language model call. ```ts -import type { LanguageModelV1Middleware, LanguageModelV1StreamPart } from 'ai'; +import type { LanguageModelV2Middleware, LanguageModelV1StreamPart } from 'ai'; -export const yourLogMiddleware: LanguageModelV1Middleware = { +export const yourLogMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { console.log('doGenerate called'); console.log(`params: ${JSON.stringify(params, null, 2)}`); @@ -199,11 +199,11 @@ export const yourLogMiddleware: LanguageModelV1Middleware = { This example shows how to build a simple cache for the generated text of a language model call. ```ts -import type { LanguageModelV1Middleware } from 'ai'; +import type { LanguageModelV2Middleware } from 'ai'; const cache = new Map(); -export const yourCacheMiddleware: LanguageModelV1Middleware = { +export const yourCacheMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { const cacheKey = JSON.stringify(params); @@ -233,9 +233,9 @@ This example shows how to use RAG as middleware. ```ts -import type { LanguageModelV1Middleware } from 'ai'; +import type { LanguageModelV2Middleware } from 'ai'; -export const yourRagMiddleware: LanguageModelV1Middleware = { +export const yourRagMiddleware: LanguageModelV2Middleware = { transformParams: async ({ params }) => { const lastUserMessageText = getLastUserMessageText({ prompt: params.prompt, @@ -262,9 +262,9 @@ Guard rails are a way to ensure that the generated text of a language model call is safe and appropriate. This example shows how to use guardrails as middleware. ```ts -import type { LanguageModelV1Middleware } from 'ai'; +import type { LanguageModelV2Middleware } from 'ai'; -export const yourGuardrailMiddleware: LanguageModelV1Middleware = { +export const yourGuardrailMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate }) => { const { text, ...rest } = await doGenerate(); diff --git a/content/docs/06-advanced/04-caching.mdx b/content/docs/06-advanced/04-caching.mdx index d24a58b622c5..089222749701 100644 --- a/content/docs/06-advanced/04-caching.mdx +++ b/content/docs/06-advanced/04-caching.mdx @@ -19,7 +19,7 @@ Let's see how you can use language model middleware to cache responses. import { Redis } from '@upstash/redis'; import { type LanguageModelV1, - type LanguageModelV1Middleware, + type LanguageModelV2Middleware, type LanguageModelV1StreamPart, simulateReadableStream, } from 'ai'; @@ -29,7 +29,7 @@ const redis = new Redis({ token: process.env.KV_TOKEN, }); -export const cacheMiddleware: LanguageModelV1Middleware = { +export const cacheMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { const cacheKey = JSON.stringify(params); diff --git a/content/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx b/content/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx index f31cfb48aa52..944148965bec 100644 --- a/content/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx @@ -35,7 +35,7 @@ const wrappedLanguageModel = wrapLanguageModel({ }, { name: 'middleware', - type: 'LanguageModelV1Middleware | LanguageModelV1Middleware[]', + type: 'LanguageModelV2Middleware | LanguageModelV2Middleware[]', description: 'The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.', }, diff --git a/content/docs/07-reference/01-ai-sdk-core/65-language-model-v1-middleware.mdx b/content/docs/07-reference/01-ai-sdk-core/65-language-model-v1-middleware.mdx index 0cfe6dd669a4..0287e6d8cb98 100644 --- a/content/docs/07-reference/01-ai-sdk-core/65-language-model-v1-middleware.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/65-language-model-v1-middleware.mdx @@ -1,9 +1,9 @@ --- -title: LanguageModelV1Middleware +title: LanguageModelV2Middleware description: Middleware for enhancing language model behavior (API Reference) --- -# `LanguageModelV1Middleware` +# `LanguageModelV2Middleware` Language model middleware is an experimental feature. @@ -18,7 +18,7 @@ See [Language Model Middleware](/docs/ai-sdk-core/middleware) for more informati ## Import diff --git a/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts b/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts index 8532d37c4307..3fe37311c6d6 100644 --- a/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts +++ b/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts @@ -9,7 +9,7 @@ import { createVertexAnthropic as createVertexAnthropicEdge, vertexAnthropic as vertexAnthropicEdge, } from '@ai-sdk/google-vertex/anthropic/edge'; -import { generateText, APICallError, LanguageModelV1 } from 'ai'; +import { generateText, APICallError, LanguageModelV2 } from 'ai'; import fs from 'fs'; import { createFeatureTestSuite, @@ -31,8 +31,8 @@ const RUNTIME_VARIANTS = { } as const; const createModelObject = ( - model: LanguageModelV1, -): { model: LanguageModelV1; modelId: string } => ({ + model: LanguageModelV2, +): { model: LanguageModelV2; modelId: string } => ({ model: model, modelId: model.modelId, }); @@ -42,8 +42,8 @@ const createLanguageModel = ( | typeof createVertexAnthropicNode | typeof createVertexAnthropicEdge, modelId: string, - additionalTests: ((model: LanguageModelV1) => void)[] = [], -): ModelWithCapabilities => { + additionalTests: ((model: LanguageModelV2) => void)[] = [], +): ModelWithCapabilities => { const model = createVertexAnthropic({ project: process.env.GOOGLE_VERTEX_PROJECT!, // Anthropic models are typically only available in us-east5 region. @@ -67,7 +67,7 @@ const createModelVariants = ( | typeof createVertexAnthropicNode | typeof createVertexAnthropicEdge, modelId: string, -): ModelWithCapabilities[] => [ +): ModelWithCapabilities[] => [ createLanguageModel(createVertexAnthropic, modelId, [toolTests]), ]; @@ -112,7 +112,7 @@ describe.each(Object.values(RUNTIME_VARIANTS))( }, ); -const toolTests = (model: LanguageModelV1) => { +const toolTests = (model: LanguageModelV2) => { it.skipIf(!['claude-3-5-sonnet-v2@20241022'].includes(model.modelId))( 'should execute computer tool commands', async () => { diff --git a/examples/ai-core/src/e2e/google-vertex.test.ts b/examples/ai-core/src/e2e/google-vertex.test.ts index c8d77975eaa6..505465624074 100644 --- a/examples/ai-core/src/e2e/google-vertex.test.ts +++ b/examples/ai-core/src/e2e/google-vertex.test.ts @@ -4,7 +4,7 @@ import { vertex as vertexEdge } from '@ai-sdk/google-vertex/edge'; import { vertex as vertexNode } from '@ai-sdk/google-vertex'; import { APICallError, - LanguageModelV1, + LanguageModelV2, experimental_generateImage as generateImage, } from 'ai'; import { @@ -31,7 +31,7 @@ const RUNTIME_VARIANTS = { const createBaseModel = ( vertex: typeof vertexNode | typeof vertexEdge, modelId: string, -): ModelWithCapabilities => +): ModelWithCapabilities => createLanguageModelWithCapabilities(vertex(modelId), [ ...defaultChatModelCapabilities, 'audioInput', @@ -40,7 +40,7 @@ const createBaseModel = ( const createSearchGroundedModel = ( vertex: typeof vertexNode | typeof vertexEdge, modelId: string, -): ModelWithCapabilities => ({ +): ModelWithCapabilities => ({ model: vertex(modelId, { useSearchGrounding: true, }), @@ -75,7 +75,7 @@ const createImageModel = ( const createModelVariants = ( vertex: typeof vertexNode | typeof vertexEdge, modelId: string, -): ModelWithCapabilities[] => [ +): ModelWithCapabilities[] => [ createBaseModel(vertex, modelId), createSearchGroundedModel(vertex, modelId), ]; diff --git a/examples/ai-core/src/e2e/google.test.ts b/examples/ai-core/src/e2e/google.test.ts index 7680c17c1ca6..6514840d5fa9 100644 --- a/examples/ai-core/src/e2e/google.test.ts +++ b/examples/ai-core/src/e2e/google.test.ts @@ -1,7 +1,7 @@ import 'dotenv/config'; import { expect } from 'vitest'; import { GoogleErrorData, google as provider } from '@ai-sdk/google'; -import { APICallError, LanguageModelV1 } from 'ai'; +import { APICallError, LanguageModelV2 } from 'ai'; import { ModelWithCapabilities, createEmbeddingModelWithCapabilities, @@ -12,12 +12,12 @@ import { const createChatModel = ( modelId: string, -): ModelWithCapabilities => +): ModelWithCapabilities => createLanguageModelWithCapabilities(provider.chat(modelId)); const createSearchGroundedModel = ( modelId: string, -): ModelWithCapabilities => { +): ModelWithCapabilities => { const model = provider.chat(modelId, { useSearchGrounding: true }); return { model, diff --git a/examples/ai-core/src/e2e/openai.test.ts b/examples/ai-core/src/e2e/openai.test.ts index 7dabec6edd9a..9e3a7241d33a 100644 --- a/examples/ai-core/src/e2e/openai.test.ts +++ b/examples/ai-core/src/e2e/openai.test.ts @@ -1,7 +1,7 @@ import 'dotenv/config'; import { expect } from 'vitest'; import { openai as provider } from '@ai-sdk/openai'; -import { APICallError, LanguageModelV1 } from 'ai'; +import { APICallError, LanguageModelV2 } from 'ai'; import { ModelWithCapabilities, createEmbeddingModelWithCapabilities, @@ -11,7 +11,7 @@ import { const createChatModel = ( modelId: string, -): ModelWithCapabilities => +): ModelWithCapabilities => createLanguageModelWithCapabilities(provider.chat(modelId)); createFeatureTestSuite({ diff --git a/examples/ai-core/src/generate-object/mock-error.ts b/examples/ai-core/src/generate-object/mock-error.ts index 3c074b12d3df..f76c2a21aa5d 100644 --- a/examples/ai-core/src/generate-object/mock-error.ts +++ b/examples/ai-core/src/generate-object/mock-error.ts @@ -1,12 +1,12 @@ import { generateObject, NoObjectGeneratedError } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; import { z } from 'zod'; async function main() { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ defaultObjectGenerationMode: 'json', doGenerate: async () => ({ rawCall: { rawPrompt: null, rawSettings: {} }, diff --git a/examples/ai-core/src/generate-object/mock-repair-add-close.ts b/examples/ai-core/src/generate-object/mock-repair-add-close.ts index a69203411984..109a549fb62c 100644 --- a/examples/ai-core/src/generate-object/mock-repair-add-close.ts +++ b/examples/ai-core/src/generate-object/mock-repair-add-close.ts @@ -1,11 +1,11 @@ import { generateObject, JSONParseError } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; import { z } from 'zod'; async function main() { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ rawCall: { rawPrompt: null, rawSettings: {} }, usage: { promptTokens: 10, completionTokens: 20 }, diff --git a/examples/ai-core/src/generate-object/mock.ts b/examples/ai-core/src/generate-object/mock.ts index 0893214d3377..e62a26a2b503 100644 --- a/examples/ai-core/src/generate-object/mock.ts +++ b/examples/ai-core/src/generate-object/mock.ts @@ -1,11 +1,11 @@ import { generateObject } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; import { z } from 'zod'; async function main() { const { object, usage } = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ defaultObjectGenerationMode: 'json', doGenerate: async () => ({ rawCall: { rawPrompt: null, rawSettings: {} }, diff --git a/examples/ai-core/src/generate-text/mock-tool-call-repair-reask.ts b/examples/ai-core/src/generate-text/mock-tool-call-repair-reask.ts index a573a9e4052c..080fa22bb44f 100644 --- a/examples/ai-core/src/generate-text/mock-tool-call-repair-reask.ts +++ b/examples/ai-core/src/generate-text/mock-tool-call-repair-reask.ts @@ -1,12 +1,12 @@ import { openai } from '@ai-sdk/openai'; import { generateText, tool } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; import { z } from 'zod'; async function main() { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ rawCall: { rawPrompt: null, rawSettings: {} }, usage: { promptTokens: 10, completionTokens: 20 }, diff --git a/examples/ai-core/src/generate-text/mock-tool-call-repair-structured-model.ts b/examples/ai-core/src/generate-text/mock-tool-call-repair-structured-model.ts index 656fa5d036b4..e421e46aa40d 100644 --- a/examples/ai-core/src/generate-text/mock-tool-call-repair-structured-model.ts +++ b/examples/ai-core/src/generate-text/mock-tool-call-repair-structured-model.ts @@ -1,12 +1,12 @@ import { openai } from '@ai-sdk/openai'; import { generateObject, generateText, NoSuchToolError, tool } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; import { z } from 'zod'; async function main() { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ rawCall: { rawPrompt: null, rawSettings: {} }, usage: { promptTokens: 10, completionTokens: 20 }, diff --git a/examples/ai-core/src/generate-text/mock.ts b/examples/ai-core/src/generate-text/mock.ts index 2811563d7664..9583ff69a4f3 100644 --- a/examples/ai-core/src/generate-text/mock.ts +++ b/examples/ai-core/src/generate-text/mock.ts @@ -1,10 +1,10 @@ import { generateText } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; async function main() { const { text, usage } = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ rawCall: { rawPrompt: null, rawSettings: {} }, finishReason: 'stop', diff --git a/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts b/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts index 76a3ffad61b2..e353193989e7 100644 --- a/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts +++ b/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts @@ -1,8 +1,8 @@ import { openai } from '@ai-sdk/openai'; -import { generateText, LanguageModelV1Middleware, wrapLanguageModel } from 'ai'; +import { generateText, LanguageModelV2Middleware, wrapLanguageModel } from 'ai'; import 'dotenv/config'; -const logProviderMetadataMiddleware: LanguageModelV1Middleware = { +const logProviderMetadataMiddleware: LanguageModelV2Middleware = { transformParams: async ({ params }) => { console.log( 'providerMetadata: ' + JSON.stringify(params.providerMetadata, null, 2), diff --git a/examples/ai-core/src/middleware/add-to-last-user-message.ts b/examples/ai-core/src/middleware/add-to-last-user-message.ts index d99030db33c3..24df9f1e498c 100644 --- a/examples/ai-core/src/middleware/add-to-last-user-message.ts +++ b/examples/ai-core/src/middleware/add-to-last-user-message.ts @@ -1,12 +1,12 @@ -import { LanguageModelV1CallOptions } from 'ai'; +import { LanguageModelV2CallOptions } from 'ai'; export function addToLastUserMessage({ text, params, }: { text: string; - params: LanguageModelV1CallOptions; -}): LanguageModelV1CallOptions { + params: LanguageModelV2CallOptions; +}): LanguageModelV2CallOptions { const { prompt, ...rest } = params; const lastMessage = prompt.at(-1); diff --git a/examples/ai-core/src/middleware/get-last-user-message-text.ts b/examples/ai-core/src/middleware/get-last-user-message-text.ts index 5c4a5e7fc596..1baaed60d8df 100644 --- a/examples/ai-core/src/middleware/get-last-user-message-text.ts +++ b/examples/ai-core/src/middleware/get-last-user-message-text.ts @@ -1,9 +1,9 @@ -import { LanguageModelV1Prompt } from 'ai'; +import { LanguageModelV2Prompt } from 'ai'; export function getLastUserMessageText({ prompt, }: { - prompt: LanguageModelV1Prompt; + prompt: LanguageModelV2Prompt; }): string | undefined { const lastMessage = prompt.at(-1); diff --git a/examples/ai-core/src/middleware/your-cache-middleware.ts b/examples/ai-core/src/middleware/your-cache-middleware.ts index 71d35c747069..17a43c9bce02 100644 --- a/examples/ai-core/src/middleware/your-cache-middleware.ts +++ b/examples/ai-core/src/middleware/your-cache-middleware.ts @@ -1,8 +1,8 @@ -import type { LanguageModelV1Middleware } from 'ai'; +import type { LanguageModelV2Middleware } from 'ai'; const cache = new Map(); -export const yourCacheMiddleware: LanguageModelV1Middleware = { +export const yourCacheMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { const cacheKey = JSON.stringify(params); diff --git a/examples/ai-core/src/middleware/your-guardrail-middleware.ts b/examples/ai-core/src/middleware/your-guardrail-middleware.ts index 229415807228..191a6929683a 100644 --- a/examples/ai-core/src/middleware/your-guardrail-middleware.ts +++ b/examples/ai-core/src/middleware/your-guardrail-middleware.ts @@ -1,6 +1,6 @@ -import type { LanguageModelV1Middleware } from 'ai'; +import type { LanguageModelV2Middleware } from 'ai'; -export const yourGuardrailMiddleware: LanguageModelV1Middleware = { +export const yourGuardrailMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate }) => { const { text, ...rest } = await doGenerate(); diff --git a/examples/ai-core/src/middleware/your-log-middleware.ts b/examples/ai-core/src/middleware/your-log-middleware.ts index f6d14529cb0c..2fe934ea3f67 100644 --- a/examples/ai-core/src/middleware/your-log-middleware.ts +++ b/examples/ai-core/src/middleware/your-log-middleware.ts @@ -1,6 +1,6 @@ -import type { LanguageModelV1Middleware, LanguageModelV1StreamPart } from 'ai'; +import type { LanguageModelV2Middleware, LanguageModelV2StreamPart } from 'ai'; -export const yourLogMiddleware: LanguageModelV1Middleware = { +export const yourLogMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { console.log('doGenerate called'); console.log(`params: ${JSON.stringify(params, null, 2)}`); @@ -22,8 +22,8 @@ export const yourLogMiddleware: LanguageModelV1Middleware = { let generatedText = ''; const transformStream = new TransformStream< - LanguageModelV1StreamPart, - LanguageModelV1StreamPart + LanguageModelV2StreamPart, + LanguageModelV2StreamPart >({ transform(chunk, controller) { if (chunk.type === 'text-delta') { diff --git a/examples/ai-core/src/middleware/your-rag-middleware.ts b/examples/ai-core/src/middleware/your-rag-middleware.ts index 91169f60f185..63be3068a5d7 100644 --- a/examples/ai-core/src/middleware/your-rag-middleware.ts +++ b/examples/ai-core/src/middleware/your-rag-middleware.ts @@ -1,8 +1,8 @@ import { addToLastUserMessage } from './add-to-last-user-message'; import { getLastUserMessageText } from './get-last-user-message-text'; -import type { LanguageModelV1Middleware } from 'ai'; +import type { LanguageModelV2Middleware } from 'ai'; -export const yourRagMiddleware: LanguageModelV1Middleware = { +export const yourRagMiddleware: LanguageModelV2Middleware = { transformParams: async ({ params }) => { const lastUserMessageText = getLastUserMessageText({ prompt: params.prompt, diff --git a/examples/ai-core/src/stream-object/mock.ts b/examples/ai-core/src/stream-object/mock.ts index fcc4d3e3780b..a0601be70587 100644 --- a/examples/ai-core/src/stream-object/mock.ts +++ b/examples/ai-core/src/stream-object/mock.ts @@ -1,11 +1,11 @@ import { streamObject } from 'ai'; -import { convertArrayToReadableStream, MockLanguageModelV1 } from 'ai/test'; +import { convertArrayToReadableStream, MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; import { z } from 'zod'; async function main() { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ defaultObjectGenerationMode: 'json', doStream: async () => ({ stream: convertArrayToReadableStream([ diff --git a/examples/ai-core/src/stream-text/mock.ts b/examples/ai-core/src/stream-text/mock.ts index e808aca77b9e..571359812f92 100644 --- a/examples/ai-core/src/stream-text/mock.ts +++ b/examples/ai-core/src/stream-text/mock.ts @@ -1,10 +1,10 @@ import { streamText } from 'ai'; -import { convertArrayToReadableStream, MockLanguageModelV1 } from 'ai/test'; +import { convertArrayToReadableStream, MockLanguageModelV2 } from 'ai/test'; import 'dotenv/config'; async function main() { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: 'Hello' }, diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index 0686c05c4494..305485391df4 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -3,7 +3,7 @@ import { jsonSchema } from '@ai-sdk/ui-utils'; import assert, { fail } from 'node:assert'; import { z } from 'zod'; import { verifyNoObjectGeneratedError as originalVerifyNoObjectGeneratedError } from '../../errors/no-object-generated-error'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { MockTracer } from '../test/mock-tracer'; import { generateObject } from './generate-object'; import { JSONParseError, TypeValidationError } from '@ai-sdk/provider'; @@ -19,7 +19,7 @@ describe('output = "object"', () => { describe('result.object', () => { it('should generate object with json mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -65,7 +65,7 @@ describe('output = "object"', () => { it('should generate object with json mode when structured outputs are enabled', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: true, doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { @@ -105,7 +105,7 @@ describe('output = "object"', () => { it('should use name and description with json mode when structured outputs are enabled', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: true, doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { @@ -147,7 +147,7 @@ describe('output = "object"', () => { it('should generate object with tool mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-tool', @@ -196,7 +196,7 @@ describe('output = "object"', () => { it('should use name and description with tool mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-tool', @@ -248,7 +248,7 @@ describe('output = "object"', () => { describe('result.request', () => { it('should contain request information with json mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -269,7 +269,7 @@ describe('output = "object"', () => { it('should contain request information with tool mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, toolCalls: [ @@ -299,7 +299,7 @@ describe('output = "object"', () => { describe('result.response', () => { it('should contain response information with json mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -334,7 +334,7 @@ describe('output = "object"', () => { it('should contain response information with tool mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, toolCalls: [ @@ -378,7 +378,7 @@ describe('output = "object"', () => { describe('zod schema', () => { it('should generate object when using zod transform', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -425,7 +425,7 @@ describe('output = "object"', () => { it('should generate object with tool mode when using zod prePreprocess', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -477,7 +477,7 @@ describe('output = "object"', () => { describe('custom schema', () => { it('should generate object with json mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -528,7 +528,7 @@ describe('output = "object"', () => { describe('result.toJsonResponse', () => { it('should return JSON response', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -559,7 +559,7 @@ describe('output = "object"', () => { describe('result.providerMetadata', () => { it('should contain provider metadata', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -588,7 +588,7 @@ describe('output = "object"', () => { describe('options.headers', () => { it('should pass headers to model in json mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ headers }) => { expect(headers).toStrictEqual({ 'custom-request-header': 'request-header-value', @@ -611,7 +611,7 @@ describe('output = "object"', () => { it('should pass headers to model in tool mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ headers }) => { expect(headers).toStrictEqual({ 'custom-request-header': 'request-header-value', @@ -643,7 +643,7 @@ describe('output = "object"', () => { describe('options.repairText', () => { it('should be able to repair a JSONParseError', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => { return { ...dummyResponseValues, @@ -668,7 +668,7 @@ describe('output = "object"', () => { it('should be able to repair a TypeValidationError', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => { return { ...dummyResponseValues, @@ -695,7 +695,7 @@ describe('output = "object"', () => { it('should be able to handle repair that returns null', async () => { const result = generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => { return { ...dummyResponseValues, @@ -724,7 +724,7 @@ describe('output = "object"', () => { describe('options.providerOptions', () => { it('should pass provider options to model in json mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -751,7 +751,7 @@ describe('output = "object"', () => { it('should pass provider options to model in tool mode', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -807,7 +807,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when schema validation fails in tool model', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, toolCalls: [ @@ -836,7 +836,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when schema validation fails in json model', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `{ "content": 123 }`, @@ -858,7 +858,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when parsing fails in tool model', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, toolCalls: [ @@ -887,7 +887,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when parsing fails in json model', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: '{ broken json', @@ -909,7 +909,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when parsing fails in json model also with repairResponse', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: '{ broken json', @@ -932,7 +932,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when the tool was not called in tool mode', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: undefined, @@ -954,7 +954,7 @@ describe('output = "object"', () => { it('should throw NoObjectGeneratedError when no text is available in json model', async () => { try { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: undefined, @@ -978,7 +978,7 @@ describe('output = "object"', () => { describe('output = "array"', () => { it('should generate an array with 3 elements', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -1048,7 +1048,7 @@ describe('output = "array"', () => { describe('output = "enum"', () => { it('should generate an enum value', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { expect(mode).toEqual({ type: 'object-json', @@ -1099,7 +1099,7 @@ describe('output = "enum"', () => { describe('output = "no-schema"', () => { it('should generate object', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -1140,7 +1140,7 @@ describe('telemetry', () => { it('should not record any telemetry data when not explicitly enabled', async () => { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -1156,7 +1156,7 @@ describe('telemetry', () => { it('should record telemetry data when enabled with mode "json"', async () => { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -1197,7 +1197,7 @@ describe('telemetry', () => { it('should record telemetry data when enabled with mode "tool"', async () => { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, toolCalls: [ @@ -1245,7 +1245,7 @@ describe('telemetry', () => { it('should not record telemetry inputs / outputs when disabled with mode "json"', async () => { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, @@ -1272,7 +1272,7 @@ describe('telemetry', () => { it('should not record telemetry inputs / outputs when disabled with mode "tool"', async () => { await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, toolCalls: [ @@ -1308,7 +1308,7 @@ describe('telemetry', () => { describe('options.messages', () => { it('should detect and convert ui messages', async () => { const result = await generateObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt }) => { expect(prompt).toStrictEqual([ { @@ -1394,7 +1394,7 @@ describe('options.messages', () => { it('should support models that use "this" context in supportsUrl', async () => { let supportsUrlCalled = false; - class MockLanguageModelWithImageSupport extends MockLanguageModelV1 { + class MockLanguageModelWithImageSupport extends MockLanguageModelV2 { readonly supportsImageUrls = false; constructor() { diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index a645b1943159..68a43f17690e 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -10,7 +10,7 @@ import { NoObjectGeneratedError, verifyNoObjectGeneratedError, } from '../../errors/no-object-generated-error'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { createMockServerResponse } from '../test/mock-server-response'; import { MockTracer } from '../test/mock-tracer'; import { AsyncIterableStream } from '../util/async-iterable-stream'; @@ -22,7 +22,7 @@ describe('streamObject', () => { describe('result.objectStream', () => { it('should send object deltas with json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'object-json', @@ -88,7 +88,7 @@ describe('streamObject', () => { it('should send object deltas with json mode when structured outputs are enabled', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: true, doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { @@ -147,7 +147,7 @@ describe('streamObject', () => { it('should use name and description with json mode when structured outputs are enabled', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: true, doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { @@ -209,7 +209,7 @@ describe('streamObject', () => { it('should send object deltas with tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-tool', @@ -306,7 +306,7 @@ describe('streamObject', () => { it('should use name and description with tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-tool', @@ -405,7 +405,7 @@ describe('streamObject', () => { it('should suppress error in partialObjectStream', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => { throw new Error('test error'); }, @@ -424,7 +424,7 @@ describe('streamObject', () => { const result: Array<{ error: unknown }> = []; const resultObject = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => { throw new Error('test error'); }, @@ -447,7 +447,7 @@ describe('streamObject', () => { describe('result.fullStream', () => { it('should send full stream data', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -486,7 +486,7 @@ describe('streamObject', () => { describe('result.textStream', () => { it('should send text stream', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ ' }, @@ -519,7 +519,7 @@ describe('streamObject', () => { describe('result.toTextStreamResponse', () => { it('should create a Response with a text stream', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ ' }, @@ -564,7 +564,7 @@ describe('streamObject', () => { const mockResponse = createMockServerResponse(); const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ ' }, @@ -608,7 +608,7 @@ describe('streamObject', () => { describe('result.usage', () => { it('should resolve with token usage', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -643,7 +643,7 @@ describe('streamObject', () => { describe('result.providerMetadata', () => { it('should resolve with provider metadata', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -679,7 +679,7 @@ describe('streamObject', () => { describe('result.response', () => { it('should resolve with response information in json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -721,7 +721,7 @@ describe('streamObject', () => { it('should resolve with response information in tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -768,7 +768,7 @@ describe('streamObject', () => { describe('result.request', () => { it('should contain request information with json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -807,7 +807,7 @@ describe('streamObject', () => { it('should contain request information with tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -851,7 +851,7 @@ describe('streamObject', () => { describe('result.object', () => { it('should resolve with typed object', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ ' }, @@ -884,7 +884,7 @@ describe('streamObject', () => { it('should reject object promise when the streamed object does not match the schema', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ ' }, @@ -915,7 +915,7 @@ describe('streamObject', () => { it('should not lead to unhandled promise rejections when the streamed object does not match the schema', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ ' }, @@ -952,7 +952,7 @@ describe('streamObject', () => { >[0]; const { partialObjectStream } = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -997,7 +997,7 @@ describe('streamObject', () => { >[0]; const { partialObjectStream, object } = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1042,7 +1042,7 @@ describe('streamObject', () => { describe('options.headers', () => { it('should pass headers to model in json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ headers }) => { expect(headers).toStrictEqual({ 'custom-request-header': 'request-header-value', @@ -1077,7 +1077,7 @@ describe('streamObject', () => { it('should pass headers to model in tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ headers }) => { expect(headers).toStrictEqual({ 'custom-request-header': 'request-header-value', @@ -1117,7 +1117,7 @@ describe('streamObject', () => { describe('options.providerOptions', () => { it('should pass provider options to model in json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -1154,7 +1154,7 @@ describe('streamObject', () => { it('should pass provider options to model in tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -1196,7 +1196,7 @@ describe('streamObject', () => { describe('custom schema', () => { it('should send object deltas with json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -1268,7 +1268,7 @@ describe('streamObject', () => { describe('error handling', () => { it('should throw NoObjectGeneratedError when schema validation fails in tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1317,7 +1317,7 @@ describe('streamObject', () => { it('should throw NoObjectGeneratedError when schema validation fails in json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ "content": 123 }' }, @@ -1360,7 +1360,7 @@ describe('streamObject', () => { it('should throw NoObjectGeneratedError when parsing fails in tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1409,7 +1409,7 @@ describe('streamObject', () => { it('should throw NoObjectGeneratedError when parsing fails in json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { type: 'text-delta', textDelta: '{ broken json' }, @@ -1452,7 +1452,7 @@ describe('streamObject', () => { it('should throw NoObjectGeneratedError when no tool call is made in tool mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1494,7 +1494,7 @@ describe('streamObject', () => { it('should throw NoObjectGeneratedError when no text is generated in json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1550,7 +1550,7 @@ describe('streamObject', () => { beforeEach(async () => { result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -1705,7 +1705,7 @@ describe('streamObject', () => { beforeEach(async () => { result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -1817,7 +1817,7 @@ describe('streamObject', () => { describe('output = "no-schema"', () => { it('should send object deltas with json mode', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'object-json', @@ -1881,7 +1881,7 @@ describe('streamObject', () => { it('should not record any telemetry data when not explicitly enabled', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1919,7 +1919,7 @@ describe('streamObject', () => { it('should record telemetry data when enabled with mode "json"', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -1977,7 +1977,7 @@ describe('streamObject', () => { it('should record telemetry data when enabled with mode "tool"', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -2071,7 +2071,7 @@ describe('streamObject', () => { it('should not record telemetry inputs / outputs when disabled with mode "json"', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -2115,7 +2115,7 @@ describe('streamObject', () => { it('should not record telemetry inputs / outputs when disabled with mode "tool"', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -2197,7 +2197,7 @@ describe('streamObject', () => { describe('options.messages', () => { it('should detect and convert ui messages', async () => { const result = streamObject({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt }) => { expect(prompt).toStrictEqual([ { @@ -2302,7 +2302,7 @@ describe('streamObject', () => { it('should support models that use "this" context in supportsUrl', async () => { let supportsUrlCalled = false; - class MockLanguageModelWithImageSupport extends MockLanguageModelV1 { + class MockLanguageModelWithImageSupport extends MockLanguageModelV2 { readonly supportsImageUrls = false; constructor() { diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index 0cc5607bcf59..8113adc3ce78 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -1,8 +1,8 @@ import { JSONValue, - LanguageModelV1CallOptions, - LanguageModelV1FinishReason, - LanguageModelV1StreamPart, + LanguageModelV2CallOptions, + LanguageModelV2FinishReason, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { createIdGenerator } from '@ai-sdk/provider-utils'; import { @@ -577,10 +577,10 @@ class DefaultStreamObjectResult mode = model.defaultObjectGenerationMode; } - let callOptions: LanguageModelV1CallOptions; + let callOptions: LanguageModelV2CallOptions; let transformer: Transformer< - LanguageModelV1StreamPart, - string | Omit + LanguageModelV2StreamPart, + string | Omit >; switch (mode) { @@ -746,7 +746,7 @@ class DefaultStreamObjectResult // store information for onFinish callback: let usage: LanguageModelUsage | undefined; - let finishReason: LanguageModelV1FinishReason | undefined; + let finishReason: LanguageModelV2FinishReason | undefined; let providerMetadata: ProviderMetadata | undefined; let object: RESULT | undefined; let error: unknown | undefined; diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index c946446270a9..26635d5a9884 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -1,11 +1,11 @@ -import { LanguageModelV1CallOptions } from '@ai-sdk/provider'; +import { LanguageModelV2CallOptions } from '@ai-sdk/provider'; import { mockId } from '@ai-sdk/provider-utils/test'; import { jsonSchema } from '@ai-sdk/ui-utils'; import assert from 'node:assert'; import { z } from 'zod'; import { Output } from '.'; import { ToolExecutionError } from '../../errors'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { MockTracer } from '../test/mock-tracer'; import { tool } from '../tool/tool'; import { generateText } from './generate-text'; @@ -18,7 +18,7 @@ const dummyResponseValues = { usage: { promptTokens: 10, completionTokens: 20 }, }; -const modelWithSources = new MockLanguageModelV1({ +const modelWithSources = new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, sources: [ @@ -40,7 +40,7 @@ const modelWithSources = new MockLanguageModelV1({ }), }); -const modelWithFiles = new MockLanguageModelV1({ +const modelWithFiles = new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, files: [ @@ -56,7 +56,7 @@ const modelWithFiles = new MockLanguageModelV1({ }), }); -const modelWithReasoning = new MockLanguageModelV1({ +const modelWithReasoning = new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, reasoning: [ @@ -77,7 +77,7 @@ const modelWithReasoning = new MockLanguageModelV1({ describe('result.text', () => { it('should generate text', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -190,7 +190,7 @@ describe('result.steps', () => { describe('result.toolCalls', () => { it('should contain tool calls', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'regular', @@ -276,7 +276,7 @@ describe('result.toolCalls', () => { describe('result.toolResults', () => { it('should contain tool results', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -350,7 +350,7 @@ describe('result.toolResults', () => { describe('result.providerMetadata', () => { it('should contain provider metadata', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, providerMetadata: { @@ -376,7 +376,7 @@ describe('result.providerMetadata', () => { describe('result.response.messages', () => { it('should contain assistant response message when there are no tool calls', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: 'Hello, world!', @@ -391,7 +391,7 @@ describe('result.response.messages', () => { it('should contain assistant response message and tool message when there are tool calls with results', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: 'Hello, world!', @@ -446,7 +446,7 @@ describe('result.response.messages', () => { describe('result.request', () => { it('should contain request body', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `Hello, world!`, @@ -467,7 +467,7 @@ describe('result.request', () => { describe('result.response', () => { it('should contain response body and headers', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `Hello, world!`, @@ -503,7 +503,7 @@ describe('options.maxSteps', () => { let responseCount = 0; result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { switch (responseCount++) { case 0: @@ -697,7 +697,7 @@ describe('options.maxSteps', () => { let responseCount = 0; result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { switch (responseCount++) { case 0: { @@ -981,7 +981,7 @@ describe('options.maxSteps', () => { describe('options.headers', () => { it('should pass headers to model', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ headers }) => { assert.deepStrictEqual(headers, { 'custom-request-header': 'request-header-value', @@ -1004,7 +1004,7 @@ describe('options.headers', () => { describe('options.providerOptions', () => { it('should pass provider options to model', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -1029,7 +1029,7 @@ describe('options.abortSignal', () => { const toolExecuteMock = vi.fn().mockResolvedValue('tool result'); const generateTextPromise = generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, toolCalls: [ @@ -1077,7 +1077,7 @@ describe('telemetry', () => { it('should not record any telemetry data when not explicitly enabled', async () => { await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `Hello, world!`, @@ -1092,7 +1092,7 @@ describe('telemetry', () => { it('should record telemetry data when enabled', async () => { await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, text: `Hello, world!`, @@ -1130,7 +1130,7 @@ describe('telemetry', () => { it('should record successful tool call', async () => { await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, toolCalls: [ @@ -1165,7 +1165,7 @@ describe('telemetry', () => { it('should not record telemetry inputs / outputs when disabled', async () => { await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({}) => ({ ...dummyResponseValues, toolCalls: [ @@ -1204,7 +1204,7 @@ describe('telemetry', () => { describe('tools with custom schema', () => { it('should contain tool calls', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt, mode }) => { assert.deepStrictEqual(mode, { type: 'regular', @@ -1302,7 +1302,7 @@ describe('tools with custom schema', () => { describe('options.messages', () => { it('should detect and convert ui messages', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async ({ prompt }) => { expect(prompt).toStrictEqual([ { @@ -1379,7 +1379,7 @@ describe('options.messages', () => { it('should support models that use "this" context in supportsUrl', async () => { let supportsUrlCalled = false; - class MockLanguageModelWithImageSupport extends MockLanguageModelV1 { + class MockLanguageModelWithImageSupport extends MockLanguageModelV2 { readonly supportsImageUrls = false; constructor() { @@ -1418,7 +1418,7 @@ describe('options.output', () => { describe('no output', () => { it('should throw error when accessing output', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `Hello, world!`, @@ -1436,7 +1436,7 @@ describe('options.output', () => { describe('text output', () => { it('should forward text as output', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, text: `Hello, world!`, @@ -1450,10 +1450,10 @@ describe('options.output', () => { }); it('should set responseFormat to text and not change the prompt', async () => { - let callOptions: LanguageModelV1CallOptions; + let callOptions: LanguageModelV2CallOptions; await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async args => { callOptions = args; return { @@ -1486,7 +1486,7 @@ describe('options.output', () => { describe('without structured output model', () => { it('should parse the output', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: false, doGenerate: async () => ({ ...dummyResponseValues, @@ -1503,10 +1503,10 @@ describe('options.output', () => { }); it('should set responseFormat to json and inject schema and JSON instruction into the prompt', async () => { - let callOptions: LanguageModelV1CallOptions; + let callOptions: LanguageModelV2CallOptions; await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: false, doGenerate: async args => { callOptions = args; @@ -1548,7 +1548,7 @@ describe('options.output', () => { describe('with structured output model', () => { it('should parse the output', async () => { const result = await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: true, doGenerate: async () => ({ ...dummyResponseValues, @@ -1565,10 +1565,10 @@ describe('options.output', () => { }); it('should set responseFormat to json and send schema as part of the responseFormat', async () => { - let callOptions: LanguageModelV1CallOptions; + let callOptions: LanguageModelV2CallOptions; await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: true, doGenerate: async args => { callOptions = args; @@ -1615,7 +1615,7 @@ describe('tool execution errors', () => { it('should throw a ToolExecutionError when a tool execution throws an error', async () => { await expect(async () => { await generateText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doGenerate: async () => ({ ...dummyResponseValues, toolCalls: [ diff --git a/packages/ai/core/generate-text/output.ts b/packages/ai/core/generate-text/output.ts index 7d54fccbbb11..03fa79363d1a 100644 --- a/packages/ai/core/generate-text/output.ts +++ b/packages/ai/core/generate-text/output.ts @@ -10,7 +10,7 @@ import { NoObjectGeneratedError } from '../../errors'; import { injectJsonInstruction } from '../generate-object/inject-json-instruction'; import { LanguageModel, - LanguageModelV1CallOptions, + LanguageModelV2CallOptions, } from '../types/language-model'; import { LanguageModelResponseMetadata } from '../types/language-model-response-metadata'; import { LanguageModelUsage } from '../types/usage'; @@ -24,7 +24,7 @@ export interface Output { responseFormat: (options: { model: LanguageModel; - }) => LanguageModelV1CallOptions['responseFormat']; + }) => LanguageModelV2CallOptions['responseFormat']; parsePartial(options: { text: string }): { partial: PARTIAL } | undefined; diff --git a/packages/ai/core/generate-text/parse-tool-call.ts b/packages/ai/core/generate-text/parse-tool-call.ts index 51228ebf8779..177724f24db2 100644 --- a/packages/ai/core/generate-text/parse-tool-call.ts +++ b/packages/ai/core/generate-text/parse-tool-call.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1FunctionToolCall } from '@ai-sdk/provider'; +import { LanguageModelV2FunctionToolCall } from '@ai-sdk/provider'; import { safeParseJSON, safeValidateTypes } from '@ai-sdk/provider-utils'; import { Schema, asSchema } from '@ai-sdk/ui-utils'; import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; @@ -17,7 +17,7 @@ export async function parseToolCall({ system, messages, }: { - toolCall: LanguageModelV1FunctionToolCall; + toolCall: LanguageModelV2FunctionToolCall; tools: TOOLS | undefined; repairToolCall: ToolCallRepairFunction | undefined; system: string | undefined; @@ -40,7 +40,7 @@ export async function parseToolCall({ throw error; } - let repairedToolCall: LanguageModelV1FunctionToolCall | null = null; + let repairedToolCall: LanguageModelV2FunctionToolCall | null = null; try { repairedToolCall = await repairToolCall({ @@ -72,7 +72,7 @@ async function doParseToolCall({ toolCall, tools, }: { - toolCall: LanguageModelV1FunctionToolCall; + toolCall: LanguageModelV2FunctionToolCall; tools: TOOLS; }): Promise> { const toolName = toolCall.toolName as keyof TOOLS & string; diff --git a/packages/ai/core/generate-text/run-tools-transformation.test.ts b/packages/ai/core/generate-text/run-tools-transformation.test.ts index f4340ebf6059..5f7b15315669 100644 --- a/packages/ai/core/generate-text/run-tools-transformation.test.ts +++ b/packages/ai/core/generate-text/run-tools-transformation.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1StreamPart } from '@ai-sdk/provider'; +import { LanguageModelV2StreamPart } from '@ai-sdk/provider'; import { delay } from '@ai-sdk/provider-utils'; import { convertArrayToReadableStream, @@ -10,7 +10,7 @@ import { MockTracer } from '../test/mock-tracer'; import { runToolsTransformation } from './run-tools-transformation'; it('should forward text deltas correctly', async () => { - const inputStream: ReadableStream = + const inputStream: ReadableStream = convertArrayToReadableStream([ { type: 'text-delta', textDelta: 'text' }, { @@ -48,7 +48,7 @@ it('should forward text deltas correctly', async () => { }); it('should handle immediate tool execution', async () => { - const inputStream: ReadableStream = + const inputStream: ReadableStream = convertArrayToReadableStream([ { type: 'tool-call', @@ -109,7 +109,7 @@ it('should handle immediate tool execution', async () => { }); it('should hold off on sending finish until the delayed tool result is received', async () => { - const inputStream: ReadableStream = + const inputStream: ReadableStream = convertArrayToReadableStream([ { type: 'tool-call', @@ -173,7 +173,7 @@ it('should hold off on sending finish until the delayed tool result is received' }); it('should try to repair tool call when the tool name is not found', async () => { - const inputStream: ReadableStream = + const inputStream: ReadableStream = convertArrayToReadableStream([ { type: 'tool-call', diff --git a/packages/ai/core/generate-text/run-tools-transformation.ts b/packages/ai/core/generate-text/run-tools-transformation.ts index 61fb9472d2cc..3b0a4291e3cb 100644 --- a/packages/ai/core/generate-text/run-tools-transformation.ts +++ b/packages/ai/core/generate-text/run-tools-transformation.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1StreamPart } from '@ai-sdk/provider'; +import { LanguageModelV2StreamPart } from '@ai-sdk/provider'; import { generateId } from '@ai-sdk/ui-utils'; import { Tracer } from '@opentelemetry/api'; import { ToolExecutionError } from '../../errors'; @@ -93,7 +93,7 @@ export function runToolsTransformation({ repairToolCall, }: { tools: TOOLS | undefined; - generatorStream: ReadableStream; + generatorStream: ReadableStream; toolCallStreaming: boolean; tracer: Tracer; telemetry: TelemetrySettings | undefined; @@ -141,11 +141,11 @@ export function runToolsTransformation({ // forward stream const forwardStream = new TransformStream< - LanguageModelV1StreamPart, + LanguageModelV2StreamPart, SingleRequestTextStreamPart >({ async transform( - chunk: LanguageModelV1StreamPart, + chunk: LanguageModelV2StreamPart, controller: TransformStreamDefaultController< SingleRequestTextStreamPart >, diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index bc31157575d3..00de777bf73c 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -1,8 +1,8 @@ import { - LanguageModelV1, - LanguageModelV1CallOptions, - LanguageModelV1CallWarning, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallOptions, + LanguageModelV2CallWarning, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { delay } from '@ai-sdk/provider-utils'; import { @@ -18,7 +18,7 @@ import { z } from 'zod'; import { ToolExecutionError } from '../../errors/tool-execution-error'; import { StreamData } from '../../streams/stream-data'; import { createDataStream } from '../data-stream/create-data-stream'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { createMockServerResponse } from '../test/mock-server-response'; import { MockTracer } from '../test/mock-tracer'; import { mockValues } from '../test/mock-values'; @@ -62,18 +62,18 @@ function createTestModel({ request = undefined, warnings, }: { - stream?: ReadableStream; + stream?: ReadableStream; rawResponse?: { headers: Record }; rawCall?: { rawPrompt: string; rawSettings: Record }; request?: { body: string }; - warnings?: LanguageModelV1CallWarning[]; -} = {}): LanguageModelV1 { - return new MockLanguageModelV1({ + warnings?: LanguageModelV2CallWarning[]; +} = {}): LanguageModelV2 { + return new MockLanguageModelV2({ doStream: async () => ({ stream, rawCall, rawResponse, request, warnings }), }); } -const modelWithSources = new MockLanguageModelV1({ +const modelWithSources = new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -108,7 +108,7 @@ const modelWithSources = new MockLanguageModelV1({ }), }); -const modelWithFiles = new MockLanguageModelV1({ +const modelWithFiles = new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -133,7 +133,7 @@ const modelWithFiles = new MockLanguageModelV1({ }), }); -const modelWithReasoning = new MockLanguageModelV1({ +const modelWithReasoning = new MockLanguageModelV2({ doStream: async () => ({ stream: convertArrayToReadableStream([ { @@ -169,7 +169,7 @@ describe('streamText', () => { describe('result.textStream', () => { it('should send text deltas', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -249,7 +249,7 @@ describe('streamText', () => { it('should swallow error to prevent server crash', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => { throw new Error('test error'); }, @@ -266,7 +266,7 @@ describe('streamText', () => { describe('result.fullStream', () => { it('should send text deltas', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -348,7 +348,7 @@ describe('streamText', () => { it('should use fallback response metadata when response metadata is not provided', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -395,7 +395,7 @@ describe('streamText', () => { it('should send tool calls', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -467,7 +467,7 @@ describe('streamText', () => { it('should not send tool call deltas when toolCallStreaming is disabled', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -801,7 +801,7 @@ describe('streamText', () => { it('should forward error in doStream as error stream part', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => { throw new Error('test error'); }, @@ -2036,7 +2036,7 @@ describe('streamText', () => { const result: Array<{ error: unknown }> = []; const resultObject = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => { throw new Error('test error'); }, @@ -2145,7 +2145,7 @@ describe('streamText', () => { it('should not prevent error from being forwarded', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => { throw new Error('test error'); }, @@ -2244,7 +2244,7 @@ describe('streamText', () => { let responseCount = 0; result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { switch (responseCount++) { case 0: { @@ -2482,7 +2482,7 @@ describe('streamText', () => { let responseCount = 0; result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { switch (responseCount++) { case 0: { @@ -2831,7 +2831,7 @@ describe('streamText', () => { describe('options.headers', () => { it('should set headers', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ headers }) => { expect(headers).toStrictEqual({ 'custom-request-header': 'request-header-value', @@ -2867,7 +2867,7 @@ describe('streamText', () => { describe('options.providerMetadata', () => { it('should pass provider metadata to model', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -3090,7 +3090,7 @@ describe('streamText', () => { describe('tools with custom schema', () => { it('should send tool calls', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt, mode }) => { expect(mode).toStrictEqual({ type: 'regular', @@ -3171,7 +3171,7 @@ describe('streamText', () => { describe('options.messages', () => { it('should detect and convert ui messages', async () => { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ prompt }) => { expect(prompt).toStrictEqual([ { @@ -3258,7 +3258,7 @@ describe('streamText', () => { it('should support models that use "this" context in supportsUrl', async () => { let supportsUrlCalled = false; - class MockLanguageModelWithImageSupport extends MockLanguageModelV1 { + class MockLanguageModelWithImageSupport extends MockLanguageModelV2 { readonly supportsImageUrls = false; constructor() { @@ -4342,10 +4342,10 @@ describe('streamText', () => { describe('object output', () => { it('should set responseFormat to json and send schema as part of the responseFormat', async () => { - let callOptions!: LanguageModelV1CallOptions; + let callOptions!: LanguageModelV2CallOptions; const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ supportsStructuredOutputs: false, doStream: async args => { callOptions = args; diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index b7d5c3a54efb..673d1b69f1b3 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -1,4 +1,4 @@ -import { AISDKError, LanguageModelV1Source } from '@ai-sdk/provider'; +import { AISDKError, LanguageModelV2Source } from '@ai-sdk/provider'; import { createIdGenerator, IDGenerator } from '@ai-sdk/provider-utils'; import { DataStreamString, formatDataStreamPart } from '@ai-sdk/ui-utils'; import { Span } from '@opentelemetry/api'; @@ -602,8 +602,8 @@ class DefaultStreamTextResult let activeReasoningText: undefined | (ReasoningDetail & { type: 'text' }) = undefined; - let recordedStepSources: LanguageModelV1Source[] = []; - const recordedSources: LanguageModelV1Source[] = []; + let recordedStepSources: LanguageModelV2Source[] = []; + const recordedSources: LanguageModelV2Source[] = []; const recordedResponse: LanguageModelResponseMetadata & { messages: Array; diff --git a/packages/ai/core/generate-text/tool-call-repair.ts b/packages/ai/core/generate-text/tool-call-repair.ts index 2396584d61f0..d19894774822 100644 --- a/packages/ai/core/generate-text/tool-call-repair.ts +++ b/packages/ai/core/generate-text/tool-call-repair.ts @@ -1,4 +1,4 @@ -import { JSONSchema7, LanguageModelV1FunctionToolCall } from '@ai-sdk/provider'; +import { JSONSchema7, LanguageModelV2FunctionToolCall } from '@ai-sdk/provider'; import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; import { NoSuchToolError } from '../../errors/no-such-tool-error'; import { CoreMessage } from '../prompt'; @@ -20,8 +20,8 @@ import { ToolSet } from './tool-set'; export type ToolCallRepairFunction = (options: { system: string | undefined; messages: CoreMessage[]; - toolCall: LanguageModelV1FunctionToolCall; + toolCall: LanguageModelV2FunctionToolCall; tools: TOOLS; parameterSchema: (options: { toolName: string }) => JSONSchema7; error: NoSuchToolError | InvalidToolArgumentsError; -}) => Promise; +}) => Promise; diff --git a/packages/ai/core/middleware/default-settings-middleware.test.ts b/packages/ai/core/middleware/default-settings-middleware.test.ts index 02c2f3cb27c0..49e84568508c 100644 --- a/packages/ai/core/middleware/default-settings-middleware.test.ts +++ b/packages/ai/core/middleware/default-settings-middleware.test.ts @@ -1,7 +1,7 @@ -import { LanguageModelV1CallOptions } from '@ai-sdk/provider'; +import { LanguageModelV2CallOptions } from '@ai-sdk/provider'; import { defaultSettingsMiddleware } from './default-settings-middleware'; -const BASE_PARAMS: LanguageModelV1CallOptions = { +const BASE_PARAMS: LanguageModelV2CallOptions = { mode: { type: 'regular' }, prompt: [ { role: 'user', content: [{ type: 'text', text: 'Hello, world!' }] }, diff --git a/packages/ai/core/middleware/default-settings-middleware.ts b/packages/ai/core/middleware/default-settings-middleware.ts index 0ba609f0197d..f8ad8126cce9 100644 --- a/packages/ai/core/middleware/default-settings-middleware.ts +++ b/packages/ai/core/middleware/default-settings-middleware.ts @@ -1,8 +1,8 @@ import { - LanguageModelV1CallOptions, - LanguageModelV1ProviderMetadata, + LanguageModelV2CallOptions, + LanguageModelV2ProviderMetadata, } from '@ai-sdk/provider'; -import type { LanguageModelV1Middleware } from './language-model-v1-middleware'; +import type { LanguageModelV2Middleware } from './language-model-v2-middleware'; import { mergeObjects } from '../util/merge-objects'; /** @@ -12,11 +12,11 @@ export function defaultSettingsMiddleware({ settings, }: { settings: Partial< - LanguageModelV1CallOptions & { - providerMetadata?: LanguageModelV1ProviderMetadata; + LanguageModelV2CallOptions & { + providerMetadata?: LanguageModelV2ProviderMetadata; } >; -}): LanguageModelV1Middleware { +}): LanguageModelV2Middleware { return { middlewareVersion: 'v1', transformParams: async ({ params }) => { diff --git a/packages/ai/core/middleware/extract-reasoning-middleware.test.ts b/packages/ai/core/middleware/extract-reasoning-middleware.test.ts index a0862a937087..f78b9d7de671 100644 --- a/packages/ai/core/middleware/extract-reasoning-middleware.test.ts +++ b/packages/ai/core/middleware/extract-reasoning-middleware.test.ts @@ -5,13 +5,13 @@ import { } from '@ai-sdk/provider-utils/test'; import { generateText, streamText } from '../generate-text'; import { wrapLanguageModel } from '../middleware/wrap-language-model'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { extractReasoningMiddleware } from './extract-reasoning-middleware'; describe('extractReasoningMiddleware', () => { describe('wrapGenerate', () => { it('should extract reasoning from tags', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'analyzing the requestHere is the response', @@ -35,7 +35,7 @@ describe('extractReasoningMiddleware', () => { }); it('should extract reasoning from tags when there is no text', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'analyzing the request\n', @@ -59,7 +59,7 @@ describe('extractReasoningMiddleware', () => { }); it('should extract reasoning from multiple tags', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'analyzing the requestHere is the responsethinking about the responsemore', @@ -85,7 +85,7 @@ describe('extractReasoningMiddleware', () => { }); it('should preprend tag IFF startWithReasoning is true', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'analyzing the requestHere is the response', @@ -126,7 +126,7 @@ describe('extractReasoningMiddleware', () => { }); it('should preserve reasoning property even when rest contains other properties', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'analyzing the requestHere is the response', @@ -153,7 +153,7 @@ describe('extractReasoningMiddleware', () => { describe('wrapStream', () => { it('should extract reasoning from split tags', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doStream() { return { stream: convertArrayToReadableStream([ @@ -259,7 +259,7 @@ describe('extractReasoningMiddleware', () => { }); it('should extract reasoning from single chunk with multiple tags', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doStream() { return { stream: convertArrayToReadableStream([ @@ -364,7 +364,7 @@ describe('extractReasoningMiddleware', () => { }); it('should extract reasoning from when there is no text', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doStream() { return { stream: convertArrayToReadableStream([ @@ -460,7 +460,7 @@ describe('extractReasoningMiddleware', () => { }); it('should preprend tag IFF startWithReasoning is true', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doStream() { return { stream: convertArrayToReadableStream([ diff --git a/packages/ai/core/middleware/extract-reasoning-middleware.ts b/packages/ai/core/middleware/extract-reasoning-middleware.ts index 43d4eb1820cd..55b8625b1bc9 100644 --- a/packages/ai/core/middleware/extract-reasoning-middleware.ts +++ b/packages/ai/core/middleware/extract-reasoning-middleware.ts @@ -1,6 +1,6 @@ -import type { LanguageModelV1StreamPart } from '@ai-sdk/provider'; +import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'; import { getPotentialStartIndex } from '../util/get-potential-start-index'; -import type { LanguageModelV1Middleware } from './language-model-v1-middleware'; +import type { LanguageModelV2Middleware } from './language-model-v2-middleware'; /** * Extract an XML-tagged reasoning section from the generated text and exposes it @@ -18,7 +18,7 @@ export function extractReasoningMiddleware({ tagName: string; separator?: string; startWithReasoning?: boolean; -}): LanguageModelV1Middleware { +}): LanguageModelV2Middleware { const openingTag = `<${tagName}>`; const closingTag = `<\/${tagName}>`; @@ -72,8 +72,8 @@ export function extractReasoningMiddleware({ return { stream: stream.pipeThrough( new TransformStream< - LanguageModelV1StreamPart, - LanguageModelV1StreamPart + LanguageModelV2StreamPart, + LanguageModelV2StreamPart >({ transform: (chunk, controller) => { if (chunk.type !== 'text-delta') { diff --git a/packages/ai/core/middleware/index.ts b/packages/ai/core/middleware/index.ts index ebdabe1ac8b3..b4b30244c12f 100644 --- a/packages/ai/core/middleware/index.ts +++ b/packages/ai/core/middleware/index.ts @@ -1,9 +1,6 @@ export { defaultSettingsMiddleware } from './default-settings-middleware'; export { extractReasoningMiddleware } from './extract-reasoning-middleware'; -export type { - Experimental_LanguageModelV1Middleware, - LanguageModelV1Middleware, -} from './language-model-v1-middleware'; +export type { LanguageModelV2Middleware } from './language-model-v2-middleware'; export { simulateStreamingMiddleware } from './simulate-streaming-middleware'; export { experimental_wrapLanguageModel, diff --git a/packages/ai/core/middleware/language-model-v1-middleware.ts b/packages/ai/core/middleware/language-model-v2-middleware.ts similarity index 67% rename from packages/ai/core/middleware/language-model-v1-middleware.ts rename to packages/ai/core/middleware/language-model-v2-middleware.ts index 844de391a7b6..1a5a84ad1402 100644 --- a/packages/ai/core/middleware/language-model-v1-middleware.ts +++ b/packages/ai/core/middleware/language-model-v2-middleware.ts @@ -1,11 +1,11 @@ -import { LanguageModelV1, LanguageModelV1CallOptions } from '@ai-sdk/provider'; +import { LanguageModelV2, LanguageModelV2CallOptions } from '@ai-sdk/provider'; /** - * Experimental middleware for LanguageModelV1. + * Experimental middleware for LanguageModelV2. * This type defines the structure for middleware that can be used to modify - * the behavior of LanguageModelV1 operations. + * the behavior of LanguageModelV2 operations. */ -export type LanguageModelV1Middleware = { +export type LanguageModelV2Middleware = { /** * Middleware specification version. Use `v1` for the current version. */ @@ -20,8 +20,8 @@ export type LanguageModelV1Middleware = { */ transformParams?: (options: { type: 'generate' | 'stream'; - params: LanguageModelV1CallOptions; - }) => PromiseLike; + params: LanguageModelV2CallOptions; + }) => PromiseLike; /** * Wraps the generate operation of the language model. @@ -34,11 +34,11 @@ export type LanguageModelV1Middleware = { * @returns A promise that resolves to the result of the generate operation. */ wrapGenerate?: (options: { - doGenerate: () => ReturnType; - doStream: () => ReturnType; - params: LanguageModelV1CallOptions; - model: LanguageModelV1; - }) => Promise>>; + doGenerate: () => ReturnType; + doStream: () => ReturnType; + params: LanguageModelV2CallOptions; + model: LanguageModelV2; + }) => Promise>>; /** * Wraps the stream operation of the language model. @@ -52,15 +52,15 @@ export type LanguageModelV1Middleware = { * @returns A promise that resolves to the result of the stream operation. */ wrapStream?: (options: { - doGenerate: () => ReturnType; - doStream: () => ReturnType; - params: LanguageModelV1CallOptions; - model: LanguageModelV1; - }) => PromiseLike>>; + doGenerate: () => ReturnType; + doStream: () => ReturnType; + params: LanguageModelV2CallOptions; + model: LanguageModelV2; + }) => PromiseLike>>; }; /** - * @deprecated Use `LanguageModelV1Middleware` instead. + * @deprecated Use `LanguageModelV2Middleware` instead. */ // TODO remove in v5 -export type Experimental_LanguageModelV1Middleware = LanguageModelV1Middleware; +export type Experimental_LanguageModelV2Middleware = LanguageModelV2Middleware; diff --git a/packages/ai/core/middleware/simulate-streaming-middleware.test.ts b/packages/ai/core/middleware/simulate-streaming-middleware.test.ts index a7508b48f219..c8a1d7008adc 100644 --- a/packages/ai/core/middleware/simulate-streaming-middleware.test.ts +++ b/packages/ai/core/middleware/simulate-streaming-middleware.test.ts @@ -4,7 +4,7 @@ import { } from '@ai-sdk/provider-utils/test'; import { streamText } from '../generate-text'; import { wrapLanguageModel } from '../middleware/wrap-language-model'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { simulateStreamingMiddleware } from './simulate-streaming-middleware'; const DEFAULT_SETTINGs = { @@ -18,7 +18,7 @@ const DEFAULT_SETTINGs = { describe('simulateStreamingMiddleware', () => { it('should simulate streaming with text response', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', @@ -43,7 +43,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should simulate streaming with reasoning as string', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', @@ -69,7 +69,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should simulate streaming with reasoning as array of text objects', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', @@ -98,7 +98,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should simulate streaming with reasoning as array of mixed objects', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', @@ -127,7 +127,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should simulate streaming with tool calls', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', @@ -166,7 +166,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should preserve additional metadata in the response', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', @@ -192,7 +192,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should handle empty text response', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: '', @@ -217,7 +217,7 @@ describe('simulateStreamingMiddleware', () => { }); it('should pass through warnings from the model', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ async doGenerate() { return { text: 'This is a test response', diff --git a/packages/ai/core/middleware/simulate-streaming-middleware.ts b/packages/ai/core/middleware/simulate-streaming-middleware.ts index e3355315eb60..756cd5109b5a 100644 --- a/packages/ai/core/middleware/simulate-streaming-middleware.ts +++ b/packages/ai/core/middleware/simulate-streaming-middleware.ts @@ -1,16 +1,16 @@ -import type { LanguageModelV1StreamPart } from '@ai-sdk/provider'; -import type { LanguageModelV1Middleware } from './language-model-v1-middleware'; +import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'; +import type { LanguageModelV2Middleware } from './language-model-v2-middleware'; /** * Simulates streaming chunks with the response from a generate call. */ -export function simulateStreamingMiddleware(): LanguageModelV1Middleware { +export function simulateStreamingMiddleware(): LanguageModelV2Middleware { return { middlewareVersion: 'v1', wrapStream: async ({ doGenerate }) => { const result = await doGenerate(); - const simulatedStream = new ReadableStream({ + const simulatedStream = new ReadableStream({ start(controller) { controller.enqueue({ type: 'response-metadata', ...result.response }); diff --git a/packages/ai/core/middleware/wrap-language-model.test.ts b/packages/ai/core/middleware/wrap-language-model.test.ts index 25691c80dd83..3e2e0fda4bbc 100644 --- a/packages/ai/core/middleware/wrap-language-model.test.ts +++ b/packages/ai/core/middleware/wrap-language-model.test.ts @@ -1,11 +1,11 @@ -import { LanguageModelV1, LanguageModelV1CallOptions } from '@ai-sdk/provider'; +import { LanguageModelV2, LanguageModelV2CallOptions } from '@ai-sdk/provider'; import { wrapLanguageModel } from '../middleware/wrap-language-model'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; describe('wrapLanguageModel', () => { it('should pass through model properties', () => { const wrappedModel = wrapLanguageModel({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ provider: 'test-provider', modelId: 'test-model', defaultObjectGenerationMode: 'json', @@ -24,7 +24,7 @@ describe('wrapLanguageModel', () => { it('should override provider and modelId if provided', () => { const wrappedModel = wrapLanguageModel({ - model: new MockLanguageModelV1(), + model: new MockLanguageModelV2(), middleware: { middlewareVersion: 'v1', }, @@ -37,7 +37,7 @@ describe('wrapLanguageModel', () => { }); it('should call transformParams middleware for doGenerate', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doGenerate: vi.fn().mockResolvedValue('mock result'), }); const transformParams = vi.fn().mockImplementation(({ params }) => ({ @@ -53,7 +53,7 @@ describe('wrapLanguageModel', () => { }, }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -73,7 +73,7 @@ describe('wrapLanguageModel', () => { }); it('should call wrapGenerate middleware', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doGenerate: vi.fn().mockResolvedValue('mock result'), }); const wrapGenerate = vi @@ -88,7 +88,7 @@ describe('wrapLanguageModel', () => { }, }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -105,7 +105,7 @@ describe('wrapLanguageModel', () => { }); it('should call transformParams middleware for doStream', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doStream: vi.fn().mockResolvedValue('mock stream'), }); const transformParams = vi.fn().mockImplementation(({ params }) => ({ @@ -121,7 +121,7 @@ describe('wrapLanguageModel', () => { }, }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -140,7 +140,7 @@ describe('wrapLanguageModel', () => { }); it('should call wrapStream middleware', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doStream: vi.fn().mockResolvedValue('mock stream'), }); const wrapStream = vi.fn().mockImplementation(({ doStream }) => doStream()); @@ -153,7 +153,7 @@ describe('wrapLanguageModel', () => { }, }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -170,7 +170,7 @@ describe('wrapLanguageModel', () => { }); it('should pass through empty supportsUrl', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doGenerate: vi.fn().mockResolvedValue('mock result'), }); @@ -185,7 +185,7 @@ describe('wrapLanguageModel', () => { }); it('should pass through supportsUrl when it is defined on the model', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doGenerate: vi.fn().mockResolvedValue('mock result'), supportsUrl: vi.fn().mockReturnValue(true), }); @@ -205,15 +205,15 @@ describe('wrapLanguageModel', () => { it('should support models that use "this" context in supportsUrl', async () => { let supportsUrlCalled = false; - class MockLanguageModelWithImageSupport implements LanguageModelV1 { + class MockLanguageModelWithImageSupport implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly provider = 'test-provider'; readonly modelId = 'test-model'; readonly defaultObjectGenerationMode = 'json'; readonly supportsImageUrls = false; - readonly doGenerate: LanguageModelV1['doGenerate'] = vi.fn(); - readonly doStream: LanguageModelV1['doStream'] = vi.fn(); + readonly doGenerate: LanguageModelV2['doGenerate'] = vi.fn(); + readonly doStream: LanguageModelV2['doStream'] = vi.fn(); private readonly value = true; @@ -239,7 +239,7 @@ describe('wrapLanguageModel', () => { describe('wrapLanguageModel with multiple middlewares', () => { it('should call multiple transformParams middlewares in sequence for doGenerate', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doGenerate: vi.fn().mockResolvedValue('final result'), }); @@ -266,7 +266,7 @@ describe('wrapLanguageModel', () => { ], }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -293,7 +293,7 @@ describe('wrapLanguageModel', () => { }); it('should call multiple transformParams middlewares in sequence for doStream', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doStream: vi.fn().mockResolvedValue('final stream'), }); @@ -320,7 +320,7 @@ describe('wrapLanguageModel', () => { ], }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -345,7 +345,7 @@ describe('wrapLanguageModel', () => { }); it('should chain multiple wrapGenerate middlewares in the correct order', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doGenerate: vi.fn().mockResolvedValue('final generate result'), }); @@ -376,7 +376,7 @@ describe('wrapLanguageModel', () => { ], }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, @@ -393,7 +393,7 @@ describe('wrapLanguageModel', () => { }); it('should chain multiple wrapStream middlewares in the correct order', async () => { - const mockModel = new MockLanguageModelV1({ + const mockModel = new MockLanguageModelV2({ doStream: vi.fn().mockResolvedValue('final stream result'), }); @@ -424,7 +424,7 @@ describe('wrapLanguageModel', () => { ], }); - const params: LanguageModelV1CallOptions = { + const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], mode: { type: 'regular' }, diff --git a/packages/ai/core/middleware/wrap-language-model.ts b/packages/ai/core/middleware/wrap-language-model.ts index ee9825ead0ae..0912204b9e1d 100644 --- a/packages/ai/core/middleware/wrap-language-model.ts +++ b/packages/ai/core/middleware/wrap-language-model.ts @@ -1,18 +1,18 @@ -import { LanguageModelV1, LanguageModelV1CallOptions } from '@ai-sdk/provider'; -import { LanguageModelV1Middleware } from './language-model-v1-middleware'; +import { LanguageModelV2, LanguageModelV2CallOptions } from '@ai-sdk/provider'; +import { LanguageModelV2Middleware } from './language-model-v2-middleware'; import { asArray } from '../../util/as-array'; /** - * Wraps a LanguageModelV1 instance with middleware functionality. + * Wraps a LanguageModelV2 instance with middleware functionality. * This function allows you to apply middleware to transform parameters, * wrap generate operations, and wrap stream operations of a language model. * * @param options - Configuration options for wrapping the language model. - * @param options.model - The original LanguageModelV1 instance to be wrapped. + * @param options.model - The original LanguageModelV2 instance to be wrapped. * @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model. * @param options.modelId - Optional custom model ID to override the original model's ID. * @param options.providerId - Optional custom provider ID to override the original model's provider. - * @returns A new LanguageModelV1 instance with middleware applied. + * @returns A new LanguageModelV2 instance with middleware applied. */ export const wrapLanguageModel = ({ model, @@ -20,11 +20,11 @@ export const wrapLanguageModel = ({ modelId, providerId, }: { - model: LanguageModelV1; - middleware: LanguageModelV1Middleware | LanguageModelV1Middleware[]; + model: LanguageModelV2; + middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[]; modelId?: string; providerId?: string; -}): LanguageModelV1 => { +}): LanguageModelV2 => { return asArray(middlewareArg) .reverse() .reduce((wrappedModel, middleware) => { @@ -38,16 +38,16 @@ const doWrap = ({ modelId, providerId, }: { - model: LanguageModelV1; - middleware: LanguageModelV1Middleware; + model: LanguageModelV2; + middleware: LanguageModelV2Middleware; modelId?: string; providerId?: string; -}): LanguageModelV1 => { +}): LanguageModelV2 => { async function doTransform({ params, type, }: { - params: LanguageModelV1CallOptions; + params: LanguageModelV2CallOptions; type: 'generate' | 'stream'; }) { return transformParams ? await transformParams({ params, type }) : params; @@ -65,8 +65,8 @@ const doWrap = ({ supportsStructuredOutputs: model.supportsStructuredOutputs, async doGenerate( - params: LanguageModelV1CallOptions, - ): Promise>> { + params: LanguageModelV2CallOptions, + ): Promise>> { const transformedParams = await doTransform({ params, type: 'generate' }); const doGenerate = async () => model.doGenerate(transformedParams); const doStream = async () => model.doStream(transformedParams); @@ -81,8 +81,8 @@ const doWrap = ({ }, async doStream( - params: LanguageModelV1CallOptions, - ): Promise>> { + params: LanguageModelV2CallOptions, + ): Promise>> { const transformedParams = await doTransform({ params, type: 'stream' }); const doGenerate = async () => model.doGenerate(transformedParams); const doStream = async () => model.doStream(transformedParams); diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index 61c9240362aa..d6ebece4b1a0 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -1,9 +1,9 @@ import { - LanguageModelV1FilePart, - LanguageModelV1ImagePart, - LanguageModelV1Message, - LanguageModelV1Prompt, - LanguageModelV1TextPart, + LanguageModelV2FilePart, + LanguageModelV2ImagePart, + LanguageModelV2Message, + LanguageModelV2Prompt, + LanguageModelV2TextPart, } from '@ai-sdk/provider'; import { download } from '../../util/download'; import { CoreMessage } from '../prompt/message'; @@ -28,7 +28,7 @@ export async function convertToLanguageModelPrompt({ modelSupportsImageUrls: boolean | undefined; modelSupportsUrl: undefined | ((url: URL) => boolean); downloadImplementation?: typeof download; -}): Promise { +}): Promise { const downloadedAssets = await downloadAssets( prompt.messages, downloadImplementation, @@ -47,7 +47,7 @@ export async function convertToLanguageModelPrompt({ } /** - * Convert a CoreMessage to a LanguageModelV1Message. + * Convert a CoreMessage to a LanguageModelV2Message. * * @param message The CoreMessage to convert. * @param downloadedAssets A map of URLs to their downloaded data. Only @@ -59,7 +59,7 @@ export function convertToLanguageModelMessage( string, { mimeType: string | undefined; data: Uint8Array } >, -): LanguageModelV1Message { +): LanguageModelV2Message { const role = message.role; switch (role) { case 'system': { @@ -245,7 +245,7 @@ async function downloadAssets( } /** - * Convert part of a message to a LanguageModelV1Part. + * Convert part of a message to a LanguageModelV2Part. * @param part The part to convert. * @param downloadedAssets A map of URLs to their downloaded data. Only * available if the model does not support URLs, null otherwise. @@ -259,9 +259,9 @@ function convertPartToLanguageModelPart( { mimeType: string | undefined; data: Uint8Array } >, ): - | LanguageModelV1TextPart - | LanguageModelV1ImagePart - | LanguageModelV1FilePart { + | LanguageModelV2TextPart + | LanguageModelV2ImagePart + | LanguageModelV2FilePart { if (part.type === 'text') { return { type: 'text', @@ -333,7 +333,7 @@ function convertPartToLanguageModelPart( } // Now that we have the normalized data either as a URL or a Uint8Array, - // we can create the LanguageModelV1Part. + // we can create the LanguageModelV2Part. switch (type) { case 'image': { // When possible, try to detect the mimetype automatically diff --git a/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts b/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts index 92bb78752753..200c6175f3e4 100644 --- a/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts +++ b/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1FunctionTool, - LanguageModelV1ProviderDefinedTool, - LanguageModelV1ToolChoice, + LanguageModelV2FunctionTool, + LanguageModelV2ProviderDefinedTool, + LanguageModelV2ToolChoice, } from '@ai-sdk/provider'; import { asSchema } from '@ai-sdk/ui-utils'; import { ToolSet } from '../generate-text'; @@ -18,9 +18,9 @@ export function prepareToolsAndToolChoice({ activeTools: Array | undefined; }): { tools: - | Array + | Array | undefined; - toolChoice: LanguageModelV1ToolChoice | undefined; + toolChoice: LanguageModelV2ToolChoice | undefined; } { if (!isNonEmptyObject(tools)) { return { diff --git a/packages/ai/core/registry/custom-provider.test.ts b/packages/ai/core/registry/custom-provider.test.ts index a41f813dba2a..c96c86d5cf31 100644 --- a/packages/ai/core/registry/custom-provider.test.ts +++ b/packages/ai/core/registry/custom-provider.test.ts @@ -2,10 +2,10 @@ import { NoSuchModelError } from '@ai-sdk/provider'; import { describe, expect, it, vi } from 'vitest'; import { MockEmbeddingModelV1 } from '../test/mock-embedding-model-v1'; import { MockImageModelV1 } from '../test/mock-image-model-v1'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { customProvider } from './custom-provider'; -const mockLanguageModel = new MockLanguageModelV1(); +const mockLanguageModel = new MockLanguageModelV2(); const mockEmbeddingModel = new MockEmbeddingModelV1(); const mockFallbackProvider = { languageModel: vi.fn(), diff --git a/packages/ai/core/registry/custom-provider.ts b/packages/ai/core/registry/custom-provider.ts index 9ace5d640314..ce4d74a31c8f 100644 --- a/packages/ai/core/registry/custom-provider.ts +++ b/packages/ai/core/registry/custom-provider.ts @@ -1,4 +1,4 @@ -import { NoSuchModelError, ProviderV1 } from '@ai-sdk/provider'; +import { NoSuchModelError, ProviderV2 } from '@ai-sdk/provider'; import { EmbeddingModel, ImageModel, LanguageModel, Provider } from '../types'; /** @@ -26,7 +26,7 @@ export function customProvider< languageModels?: LANGUAGE_MODELS; textEmbeddingModels?: EMBEDDING_MODELS; imageModels?: IMAGE_MODELS; - fallbackProvider?: ProviderV1; + fallbackProvider?: ProviderV2; }): Provider & { languageModel(modelId: ExtractModelId): LanguageModel; textEmbeddingModel( diff --git a/packages/ai/core/registry/provider-registry.test.ts b/packages/ai/core/registry/provider-registry.test.ts index 58ad82693c11..e8029e00d46c 100644 --- a/packages/ai/core/registry/provider-registry.test.ts +++ b/packages/ai/core/registry/provider-registry.test.ts @@ -1,13 +1,13 @@ import { NoSuchModelError } from '@ai-sdk/provider'; import { MockEmbeddingModelV1 } from '../test/mock-embedding-model-v1'; -import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; import { NoSuchProviderError } from './no-such-provider-error'; import { createProviderRegistry } from './provider-registry'; import { MockImageModelV1 } from '../test/mock-image-model-v1'; describe('languageModel', () => { it('should return language model from provider', () => { - const model = new MockLanguageModelV1(); + const model = new MockLanguageModelV2(); const modelRegistry = createProviderRegistry({ provider: { @@ -18,6 +18,9 @@ describe('languageModel', () => { textEmbeddingModel: (id: string) => { return null as any; }, + imageModel: (id: string) => { + return null as any; + }, }, }); @@ -25,7 +28,7 @@ describe('languageModel', () => { }); it('should return language model with additional colon from provider', () => { - const model = new MockLanguageModelV1(); + const model = new MockLanguageModelV2(); const modelRegistry = createProviderRegistry({ provider: { @@ -36,6 +39,9 @@ describe('languageModel', () => { textEmbeddingModel: () => { return null as any; }, + imageModel: () => { + return null as any; + }, }, }); @@ -60,6 +66,9 @@ describe('languageModel', () => { textEmbeddingModel: () => { return null as any; }, + imageModel: () => { + return null as any; + }, }, }); @@ -78,7 +87,7 @@ describe('languageModel', () => { }); it('should support custom separator', () => { - const model = new MockLanguageModelV1(); + const model = new MockLanguageModelV2(); const modelRegistry = createProviderRegistry( { @@ -90,6 +99,9 @@ describe('languageModel', () => { textEmbeddingModel: () => { return null as any; }, + imageModel: () => { + return null as any; + }, }, }, { separator: '|' }, @@ -112,6 +124,9 @@ describe('textEmbeddingModel', () => { languageModel: () => { return null as any; }, + imageModel: () => { + return null as any; + }, }, }); @@ -136,6 +151,9 @@ describe('textEmbeddingModel', () => { languageModel: () => { return null as any; }, + imageModel: () => { + return null as any; + }, }, }); @@ -166,6 +184,9 @@ describe('textEmbeddingModel', () => { languageModel: () => { return null as any; }, + imageModel: () => { + return null as any; + }, }, }, { separator: '|' }, diff --git a/packages/ai/core/registry/provider-registry.ts b/packages/ai/core/registry/provider-registry.ts index 9a6bc74fe9bf..e1c4b377f692 100644 --- a/packages/ai/core/registry/provider-registry.ts +++ b/packages/ai/core/registry/provider-registry.ts @@ -1,4 +1,4 @@ -import { NoSuchModelError, ProviderV1 } from '@ai-sdk/provider'; +import { NoSuchModelError, ProviderV2 } from '@ai-sdk/provider'; import { EmbeddingModel, ImageModel, LanguageModel } from '../types'; import { NoSuchProviderError } from './no-such-provider-error'; @@ -9,7 +9,7 @@ type ExtractLiteralUnion = T extends string : never; export interface ProviderRegistryProvider< - PROVIDERS extends Record = Record, + PROVIDERS extends Record = Record, SEPARATOR extends string = ':', > { languageModel( @@ -44,7 +44,7 @@ export interface ProviderRegistryProvider< * Creates a registry for the given providers. */ export function createProviderRegistry< - PROVIDERS extends Record, + PROVIDERS extends Record, SEPARATOR extends string = ':', >( providers: PROVIDERS, @@ -74,7 +74,7 @@ export function createProviderRegistry< export const experimental_createProviderRegistry = createProviderRegistry; class DefaultProviderRegistry< - PROVIDERS extends Record, + PROVIDERS extends Record, SEPARATOR extends string, > implements ProviderRegistryProvider { @@ -95,7 +95,7 @@ class DefaultProviderRegistry< this.providers[id] = provider; } - private getProvider(id: string): ProviderV1 { + private getProvider(id: string): ProviderV2 { const provider = this.providers[id as keyof PROVIDERS]; if (provider == null) { diff --git a/packages/ai/core/test/mock-language-model-v1.ts b/packages/ai/core/test/mock-language-model-v1.ts index 62258c413152..acbd42ad6d4c 100644 --- a/packages/ai/core/test/mock-language-model-v1.ts +++ b/packages/ai/core/test/mock-language-model-v1.ts @@ -1,18 +1,18 @@ -import { LanguageModelV1 } from '@ai-sdk/provider'; +import { LanguageModelV2 } from '@ai-sdk/provider'; import { notImplemented } from './not-implemented'; -export class MockLanguageModelV1 implements LanguageModelV1 { +export class MockLanguageModelV2 implements LanguageModelV2 { readonly specificationVersion = 'v1'; - readonly provider: LanguageModelV1['provider']; - readonly modelId: LanguageModelV1['modelId']; + readonly provider: LanguageModelV2['provider']; + readonly modelId: LanguageModelV2['modelId']; - supportsUrl: LanguageModelV1['supportsUrl']; - doGenerate: LanguageModelV1['doGenerate']; - doStream: LanguageModelV1['doStream']; + supportsUrl: LanguageModelV2['supportsUrl']; + doGenerate: LanguageModelV2['doGenerate']; + doStream: LanguageModelV2['doStream']; - readonly defaultObjectGenerationMode: LanguageModelV1['defaultObjectGenerationMode']; - readonly supportsStructuredOutputs: LanguageModelV1['supportsStructuredOutputs']; + readonly defaultObjectGenerationMode: LanguageModelV2['defaultObjectGenerationMode']; + readonly supportsStructuredOutputs: LanguageModelV2['supportsStructuredOutputs']; constructor({ provider = 'mock-provider', modelId = 'mock-model-id', @@ -22,13 +22,13 @@ export class MockLanguageModelV1 implements LanguageModelV1 { defaultObjectGenerationMode = undefined, supportsStructuredOutputs = undefined, }: { - provider?: LanguageModelV1['provider']; - modelId?: LanguageModelV1['modelId']; - supportsUrl?: LanguageModelV1['supportsUrl']; - doGenerate?: LanguageModelV1['doGenerate']; - doStream?: LanguageModelV1['doStream']; - defaultObjectGenerationMode?: LanguageModelV1['defaultObjectGenerationMode']; - supportsStructuredOutputs?: LanguageModelV1['supportsStructuredOutputs']; + provider?: LanguageModelV2['provider']; + modelId?: LanguageModelV2['modelId']; + supportsUrl?: LanguageModelV2['supportsUrl']; + doGenerate?: LanguageModelV2['doGenerate']; + doStream?: LanguageModelV2['doStream']; + defaultObjectGenerationMode?: LanguageModelV2['defaultObjectGenerationMode']; + supportsStructuredOutputs?: LanguageModelV2['supportsStructuredOutputs']; } = {}) { this.provider = provider; this.modelId = modelId; diff --git a/packages/ai/core/types/index.ts b/packages/ai/core/types/index.ts index 0aea0780458a..78c7d580d7b3 100644 --- a/packages/ai/core/types/index.ts +++ b/packages/ai/core/types/index.ts @@ -9,10 +9,10 @@ export type { CoreToolChoice, FinishReason, LanguageModel, - LanguageModelV1, - LanguageModelV1CallOptions, - LanguageModelV1Prompt, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallOptions, + LanguageModelV2Prompt, + LanguageModelV2StreamPart, LogProbs, ToolChoice, } from './language-model'; diff --git a/packages/ai/core/types/language-model.ts b/packages/ai/core/types/language-model.ts index d7c73dff46b9..808005b7d94d 100644 --- a/packages/ai/core/types/language-model.ts +++ b/packages/ai/core/types/language-model.ts @@ -1,36 +1,36 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1LogProbs, - LanguageModelV1Source, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2LogProbs, + LanguageModelV2Source, } from '@ai-sdk/provider'; -// Re-export LanguageModelV1 types for the middleware: +// Re-export LanguageModelV2 types for the middleware: export type { - LanguageModelV1, - LanguageModelV1CallOptions, - LanguageModelV1CallWarning, - LanguageModelV1FilePart, - LanguageModelV1FinishReason, - LanguageModelV1FunctionToolCall, - LanguageModelV1ImagePart, - LanguageModelV1Message, - LanguageModelV1ObjectGenerationMode, - LanguageModelV1Prompt, - LanguageModelV1ProviderDefinedTool, - LanguageModelV1ProviderMetadata, - LanguageModelV1StreamPart, - LanguageModelV1TextPart, - LanguageModelV1ToolCallPart, - LanguageModelV1ToolChoice, - LanguageModelV1ToolResultPart, + LanguageModelV2, + LanguageModelV2CallOptions, + LanguageModelV2CallWarning, + LanguageModelV2FilePart, + LanguageModelV2FinishReason, + LanguageModelV2FunctionToolCall, + LanguageModelV2ImagePart, + LanguageModelV2Message, + LanguageModelV2ObjectGenerationMode, + LanguageModelV2Prompt, + LanguageModelV2ProviderDefinedTool, + LanguageModelV2ProviderMetadata, + LanguageModelV2StreamPart, + LanguageModelV2TextPart, + LanguageModelV2ToolCallPart, + LanguageModelV2ToolChoice, + LanguageModelV2ToolResultPart, } from '@ai-sdk/provider'; /** Language model that is used by the AI SDK Core functions. */ -export type LanguageModel = LanguageModelV1; +export type LanguageModel = LanguageModelV2; /** Reason why a language model finished generating a response. @@ -43,25 +43,25 @@ Can be one of the following: - `error`: model stopped because of an error - `other`: model stopped for other reasons */ -export type FinishReason = LanguageModelV1FinishReason; +export type FinishReason = LanguageModelV2FinishReason; /** Log probabilities for each token and its top log probabilities. @deprecated Will become a provider extension in the future. */ -export type LogProbs = LanguageModelV1LogProbs; +export type LogProbs = LanguageModelV2LogProbs; /** Warning from the model provider for this call. The call will proceed, but e.g. some settings might not be supported, which can lead to suboptimal results. */ -export type CallWarning = LanguageModelV1CallWarning; +export type CallWarning = LanguageModelV2CallWarning; /** A source that has been used as input to generate the response. */ -export type Source = LanguageModelV1Source; +export type Source = LanguageModelV2Source; /** Tool choice for the generation. It supports the following settings: diff --git a/packages/ai/core/types/provider-metadata.ts b/packages/ai/core/types/provider-metadata.ts index cd2193b8fc06..2489775ace67 100644 --- a/packages/ai/core/types/provider-metadata.ts +++ b/packages/ai/core/types/provider-metadata.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1ProviderMetadata } from '@ai-sdk/provider'; +import { LanguageModelV2ProviderMetadata } from '@ai-sdk/provider'; import { z } from 'zod'; import { jsonValueSchema } from './json-value'; @@ -8,7 +8,7 @@ Additional provider-specific metadata that is returned from the provider. This is needed to enable provider-specific functionality that can be fully encapsulated in the provider. */ -export type ProviderMetadata = LanguageModelV1ProviderMetadata; +export type ProviderMetadata = LanguageModelV2ProviderMetadata; /** Additional provider-specific options. @@ -17,7 +17,7 @@ They are passed through to the provider from the AI SDK and enable provider-specific functionality that can be fully encapsulated in the provider. */ // TODO change to LanguageModelV2ProviderOptions in language model v2 -export type ProviderOptions = LanguageModelV1ProviderMetadata; +export type ProviderOptions = LanguageModelV2ProviderMetadata; export const providerMetadataSchema: z.ZodType = z.record( z.string(), diff --git a/packages/ai/rsc/stream-ui/stream-ui.tsx b/packages/ai/rsc/stream-ui/stream-ui.tsx index 5404e6b36069..ff25c9f11669 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.tsx +++ b/packages/ai/rsc/stream-ui/stream-ui.tsx @@ -1,4 +1,4 @@ -import { LanguageModelV1 } from '@ai-sdk/provider'; +import { LanguageModelV2 } from '@ai-sdk/provider'; import { safeParseJSON } from '@ai-sdk/provider-utils'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -74,7 +74,7 @@ type RenderText = Renderer< type RenderResult = { value: ReactNode; -} & Awaited>; +} & Awaited>; const defaultTextRenderer: RenderText = ({ content }: { content: string }) => content; @@ -105,7 +105,7 @@ export async function streamUI< /** * The language model to use. */ - model: LanguageModelV1; + model: LanguageModelV2; /** * The tools that the model can call. The model needs to support calling tools. diff --git a/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx b/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx index 02bd1107ae6d..8f98466ee9e5 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx +++ b/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx @@ -1,6 +1,6 @@ import { delay } from '@ai-sdk/provider-utils'; import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; -import { MockLanguageModelV1 } from '../../core/test/mock-language-model-v1'; +import { MockLanguageModelV2 } from '../../core/test/mock-language-model-v1'; import { streamUI } from './stream-ui'; import { z } from 'zod'; @@ -50,7 +50,7 @@ async function simulateFlightServerRender(node: React.ReactNode) { return traverse(node); } -const mockTextModel = new MockLanguageModelV1({ +const mockTextModel = new MockLanguageModelV2({ doStream: async () => { return { stream: convertArrayToReadableStream([ @@ -72,7 +72,7 @@ const mockTextModel = new MockLanguageModelV1({ }, }); -const mockToolModel = new MockLanguageModelV1({ +const mockToolModel = new MockLanguageModelV2({ doStream: async () => { return { stream: convertArrayToReadableStream([ @@ -234,7 +234,7 @@ describe('rsc - streamUI() onFinish callback', () => { describe('options.headers', () => { it('should pass headers to model', async () => { const result = await streamUI({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ headers }) => { expect(headers).toStrictEqual({ 'custom-request-header': 'request-header-value', @@ -268,7 +268,7 @@ describe('options.headers', () => { describe('options.providerMetadata', () => { it('should pass provider metadata to model', async () => { const result = await streamUI({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async ({ providerMetadata }) => { expect(providerMetadata).toStrictEqual({ aProvider: { someKey: 'someValue' }, @@ -305,7 +305,7 @@ describe('model.supportsUrl binding', () => { it('should support models that use "this" context in supportsUrl', async () => { let supportsUrlCalled = false; - class MockLanguageModelWithImageSupport extends MockLanguageModelV1 { + class MockLanguageModelWithImageSupport extends MockLanguageModelV2 { readonly supportsImageUrls = false; constructor() { diff --git a/packages/ai/test/index.ts b/packages/ai/test/index.ts index 9cd5f65fd064..9c576ad9f6e8 100644 --- a/packages/ai/test/index.ts +++ b/packages/ai/test/index.ts @@ -3,7 +3,7 @@ export { mockId, } from '@ai-sdk/provider-utils/test'; export { MockEmbeddingModelV1 } from '../core/test/mock-embedding-model-v1'; -export { MockLanguageModelV1 } from '../core/test/mock-language-model-v1'; +export { MockLanguageModelV2 } from '../core/test/mock-language-model-v1'; export { mockValues } from '../core/test/mock-values'; import { simulateReadableStream as originalSimulateReadableStream } from '../core/util/simulate-readable-stream'; diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts index f787c158eef0..8b8b45bb9bd9 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { createTestServer, convertReadableStreamToArray, @@ -11,7 +11,7 @@ import { BedrockRedactedReasoningContentBlock, } from './bedrock-api-types'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'system', content: 'System Prompt' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts index adb81f8bf027..3f1a6b18654e 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts @@ -1,11 +1,11 @@ import { InvalidArgumentError, JSONObject, - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1ProviderMetadata, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2ProviderMetadata, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -41,7 +41,7 @@ type BedrockChatConfig = { generateId: () => string; }; -export class BedrockChatLanguageModel implements LanguageModelV1 { +export class BedrockChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly provider = 'amazon-bedrock'; readonly defaultObjectGenerationMode = 'tool'; @@ -66,13 +66,13 @@ export class BedrockChatLanguageModel implements LanguageModelV1 { responseFormat, seed, providerMetadata, - }: Parameters[0]): { + }: Parameters[0]): { command: BedrockConverseInput; - warnings: LanguageModelV1CallWarning[]; + warnings: LanguageModelV2CallWarning[]; } { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (frequencyPenalty != null) { warnings.push({ @@ -235,8 +235,8 @@ export class BedrockChatLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { command: args, warnings } = this.getArgs(options); const url = `${this.getUrl(this.modelId)}/converse`; @@ -338,8 +338,8 @@ export class BedrockChatLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { command: args, warnings } = this.getArgs(options); const url = `${this.getUrl(this.modelId)}/converse-stream`; @@ -362,12 +362,12 @@ export class BedrockChatLanguageModel implements LanguageModelV1 { const { messages: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage = { promptTokens: Number.NaN, completionTokens: Number.NaN, }; - let providerMetadata: LanguageModelV1ProviderMetadata | undefined = + let providerMetadata: LanguageModelV2ProviderMetadata | undefined = undefined; const toolCallContentBlocks: Record< @@ -383,7 +383,7 @@ export class BedrockChatLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { function enqueueError(bedrockError: Record) { diff --git a/packages/amazon-bedrock/src/bedrock-prepare-tools.ts b/packages/amazon-bedrock/src/bedrock-prepare-tools.ts index 52c85a9668fb..c90b7a3dbe0b 100644 --- a/packages/amazon-bedrock/src/bedrock-prepare-tools.ts +++ b/packages/amazon-bedrock/src/bedrock-prepare-tools.ts @@ -1,18 +1,18 @@ import { JSONObject, - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { BedrockTool, BedrockToolConfiguration } from './bedrock-api-types'; export function prepareTools( - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }, ): { toolConfig: BedrockToolConfiguration; // note: do not rename, name required by Bedrock - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; @@ -24,7 +24,7 @@ export function prepareTools( }; } - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; const bedrockTools: BedrockTool[] = []; for (const tool of tools) { diff --git a/packages/amazon-bedrock/src/bedrock-provider.ts b/packages/amazon-bedrock/src/bedrock-provider.ts index d5dfb12bb572..ebeeda40c23b 100644 --- a/packages/amazon-bedrock/src/bedrock-provider.ts +++ b/packages/amazon-bedrock/src/bedrock-provider.ts @@ -1,8 +1,8 @@ import { EmbeddingModelV1, ImageModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -83,16 +83,16 @@ and `sessionToken` settings. generateId?: () => string; } -export interface AmazonBedrockProvider extends ProviderV1 { +export interface AmazonBedrockProvider extends ProviderV2 { ( modelId: BedrockChatModelId, settings?: BedrockChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; languageModel( modelId: BedrockChatModelId, settings?: BedrockChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; embedding( modelId: BedrockEmbeddingModelId, diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts index eab5748f57a5..8e840812150e 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts @@ -10,9 +10,9 @@ import { } from './bedrock-api-types'; import { JSONObject, - LanguageModelV1Message, - LanguageModelV1Prompt, - LanguageModelV1ProviderMetadata, + LanguageModelV2Message, + LanguageModelV2Prompt, + LanguageModelV2ProviderMetadata, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -23,12 +23,12 @@ import { const generateFileId = createIdGenerator({ prefix: 'file', size: 16 }); function getCachePoint( - providerMetadata: LanguageModelV1ProviderMetadata | undefined, + providerMetadata: LanguageModelV2ProviderMetadata | undefined, ): BedrockCachePoint | undefined { return providerMetadata?.bedrock?.cachePoint as BedrockCachePoint | undefined; } -export function convertToBedrockChatMessages(prompt: LanguageModelV1Prompt): { +export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { system: BedrockSystemMessages; messages: BedrockMessages; } { @@ -297,19 +297,19 @@ function trimIfLast( type SystemBlock = { type: 'system'; - messages: Array; + messages: Array; }; type AssistantBlock = { type: 'assistant'; - messages: Array; + messages: Array; }; type UserBlock = { type: 'user'; - messages: Array; + messages: Array; }; function groupIntoBlocks( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): Array { const blocks: Array = []; let currentBlock: SystemBlock | AssistantBlock | UserBlock | undefined = diff --git a/packages/amazon-bedrock/src/map-bedrock-finish-reason.ts b/packages/amazon-bedrock/src/map-bedrock-finish-reason.ts index b10310efc05b..458b6d34a670 100644 --- a/packages/amazon-bedrock/src/map-bedrock-finish-reason.ts +++ b/packages/amazon-bedrock/src/map-bedrock-finish-reason.ts @@ -1,9 +1,9 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; import { BedrockStopReason } from './bedrock-api-types'; export function mapBedrockFinishReason( finishReason?: BedrockStopReason, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'stop_sequence': case 'end_turn': diff --git a/packages/anthropic/src/anthropic-messages-language-model.test.ts b/packages/anthropic/src/anthropic-messages-language-model.test.ts index 6a1970946358..504649a5468d 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.test.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, @@ -6,7 +6,7 @@ import { import { createAnthropic } from './anthropic-provider'; import { AnthropicProviderOptions } from './anthropic-messages-language-model'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index 167a5a9b46af..87980fb3ea98 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -1,10 +1,10 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1FunctionToolCall, - LanguageModelV1ProviderMetadata, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2FunctionToolCall, + LanguageModelV2ProviderMetadata, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -38,7 +38,7 @@ type AnthropicMessagesConfig = { transformRequestBody?: (args: Record) => Record; }; -export class AnthropicMessagesLanguageModel implements LanguageModelV1 { +export class AnthropicMessagesLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'tool'; @@ -78,10 +78,10 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { responseFormat, seed, providerMetadata: providerOptions, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (frequencyPenalty != null) { warnings.push({ @@ -256,8 +256,8 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings, betas } = await this.getArgs(options); const { @@ -287,7 +287,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { } // extract tool calls - let toolCalls: LanguageModelV1FunctionToolCall[] | undefined = undefined; + let toolCalls: LanguageModelV2FunctionToolCall[] | undefined = undefined; if (response.content.some(content => content.type === 'tool_use')) { toolCalls = []; for (const content of response.content) { @@ -351,8 +351,8 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings, betas } = await this.getArgs(options); const body = { ...args, stream: true }; @@ -370,7 +370,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { const { messages: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; const usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, @@ -385,7 +385,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { } > = {}; - let providerMetadata: LanguageModelV1ProviderMetadata | undefined = + let providerMetadata: LanguageModelV2ProviderMetadata | undefined = undefined; let blockType: @@ -399,7 +399,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { if (!chunk.success) { diff --git a/packages/anthropic/src/anthropic-prepare-tools.ts b/packages/anthropic/src/anthropic-prepare-tools.ts index 645236722200..be0a11d3539d 100644 --- a/packages/anthropic/src/anthropic-prepare-tools.ts +++ b/packages/anthropic/src/anthropic-prepare-tools.ts @@ -1,24 +1,24 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { AnthropicTool, AnthropicToolChoice } from './anthropic-api-types'; export function prepareTools( - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }, ): { tools: Array | undefined; tool_choice: AnthropicToolChoice | undefined; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; betas: Set; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; const betas = new Set(); if (tools == null) { diff --git a/packages/anthropic/src/anthropic-provider.ts b/packages/anthropic/src/anthropic-provider.ts index 7a598e834f22..a844fd9d3ecb 100644 --- a/packages/anthropic/src/anthropic-provider.ts +++ b/packages/anthropic/src/anthropic-provider.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -15,14 +15,14 @@ import { } from './anthropic-messages-settings'; import { anthropicTools } from './anthropic-tools'; -export interface AnthropicProvider extends ProviderV1 { +export interface AnthropicProvider extends ProviderV2 { /** Creates a model for text generation. */ ( modelId: AnthropicMessagesModelId, settings?: AnthropicMessagesSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a model for text generation. @@ -30,7 +30,7 @@ Creates a model for text generation. languageModel( modelId: AnthropicMessagesModelId, settings?: AnthropicMessagesSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** @deprecated Use `.languageModel()` instead. @@ -38,7 +38,7 @@ Creates a model for text generation. chat( modelId: AnthropicMessagesModelId, settings?: AnthropicMessagesSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** @deprecated Use `.languageModel()` instead. @@ -46,7 +46,7 @@ Creates a model for text generation. messages( modelId: AnthropicMessagesModelId, settings?: AnthropicMessagesSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Anthropic-specific computer use tool. @@ -128,9 +128,13 @@ export function createAnthropic( provider.languageModel = createChatModel; provider.chat = createChatModel; provider.messages = createChatModel; + provider.textEmbeddingModel = (modelId: string) => { throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' }); }; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; provider.tools = anthropicTools; diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts index a0faf40e49e7..75b154ac2c58 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1CallWarning } from '@ai-sdk/provider'; +import { LanguageModelV2CallWarning } from '@ai-sdk/provider'; import { convertToAnthropicMessagesPrompt } from './convert-to-anthropic-messages-prompt'; describe('system messages', () => { @@ -551,7 +551,7 @@ describe('assistant messages', () => { }); it('should convert assistant message reasoning parts with signature into thinking parts when sendReasoning is true', async () => { - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; const result = convertToAnthropicMessagesPrompt({ prompt: [ { @@ -599,7 +599,7 @@ describe('assistant messages', () => { }); it('should convert reasoning parts without signature into thinking parts when sendReasoning is true', async () => { - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; const result = convertToAnthropicMessagesPrompt({ prompt: [ { @@ -645,7 +645,7 @@ describe('assistant messages', () => { }); it('should omit assistant message reasoning parts with signature when sendReasoning is false', async () => { - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; const result = convertToAnthropicMessagesPrompt({ prompt: [ { @@ -692,7 +692,7 @@ describe('assistant messages', () => { }); it('should omit reasoning parts without signature when sendReasoning is false', async () => { - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; const result = convertToAnthropicMessagesPrompt({ prompt: [ { diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts index 731ad6478f1b..72fa374c9dc8 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts @@ -1,8 +1,8 @@ import { - LanguageModelV1CallWarning, - LanguageModelV1Message, - LanguageModelV1Prompt, - LanguageModelV1ProviderMetadata, + LanguageModelV2CallWarning, + LanguageModelV2Message, + LanguageModelV2Prompt, + LanguageModelV2ProviderMetadata, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; @@ -18,9 +18,9 @@ export function convertToAnthropicMessagesPrompt({ sendReasoning, warnings, }: { - prompt: LanguageModelV1Prompt; + prompt: LanguageModelV2Prompt; sendReasoning: boolean; - warnings: LanguageModelV1CallWarning[]; + warnings: LanguageModelV2CallWarning[]; }): { prompt: AnthropicMessagesPrompt; betas: Set; @@ -32,7 +32,7 @@ export function convertToAnthropicMessagesPrompt({ const messages: AnthropicMessagesPrompt['messages'] = []; function getCacheControl( - providerMetadata: LanguageModelV1ProviderMetadata | undefined, + providerMetadata: LanguageModelV2ProviderMetadata | undefined, ): AnthropicCacheControl | undefined { const anthropic = providerMetadata?.anthropic; @@ -315,19 +315,19 @@ export function convertToAnthropicMessagesPrompt({ type SystemBlock = { type: 'system'; - messages: Array; + messages: Array; }; type AssistantBlock = { type: 'assistant'; - messages: Array; + messages: Array; }; type UserBlock = { type: 'user'; - messages: Array; + messages: Array; }; function groupIntoBlocks( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): Array { const blocks: Array = []; let currentBlock: SystemBlock | AssistantBlock | UserBlock | undefined = diff --git a/packages/anthropic/src/map-anthropic-stop-reason.ts b/packages/anthropic/src/map-anthropic-stop-reason.ts index 04eea2ad6bdb..e98bd66dec7f 100644 --- a/packages/anthropic/src/map-anthropic-stop-reason.ts +++ b/packages/anthropic/src/map-anthropic-stop-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapAnthropicStopReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'end_turn': case 'stop_sequence': diff --git a/packages/azure/src/azure-openai-provider.test.ts b/packages/azure/src/azure-openai-provider.test.ts index 4958b07d5981..22224020956e 100644 --- a/packages/azure/src/azure-openai-provider.test.ts +++ b/packages/azure/src/azure-openai-provider.test.ts @@ -1,11 +1,11 @@ import { EmbeddingModelV1Embedding, - LanguageModelV1Prompt, + LanguageModelV2Prompt, } from '@ai-sdk/provider'; import { createTestServer } from '@ai-sdk/provider-utils/test'; import { createAzure } from './azure-openai-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/azure/src/azure-openai-provider.ts b/packages/azure/src/azure-openai-provider.ts index 8d51750f0727..d5b082e910ac 100644 --- a/packages/azure/src/azure-openai-provider.ts +++ b/packages/azure/src/azure-openai-provider.ts @@ -10,14 +10,14 @@ import { } from '@ai-sdk/openai/internal'; import { EmbeddingModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + ProviderV2, ImageModelV1, } from '@ai-sdk/provider'; import { FetchFunction, loadApiKey, loadSetting } from '@ai-sdk/provider-utils'; -export interface AzureOpenAIProvider extends ProviderV1 { - (deploymentId: string, settings?: OpenAIChatSettings): LanguageModelV1; +export interface AzureOpenAIProvider extends ProviderV2 { + (deploymentId: string, settings?: OpenAIChatSettings): LanguageModelV2; /** Creates an Azure OpenAI chat model for text generation. @@ -25,12 +25,12 @@ Creates an Azure OpenAI chat model for text generation. languageModel( deploymentId: string, settings?: OpenAIChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates an Azure OpenAI chat model for text generation. */ - chat(deploymentId: string, settings?: OpenAIChatSettings): LanguageModelV1; + chat(deploymentId: string, settings?: OpenAIChatSettings): LanguageModelV2; /** Creates an Azure OpenAI completion model for text generation. @@ -38,7 +38,7 @@ Creates an Azure OpenAI completion model for text generation. completion( deploymentId: string, settings?: OpenAICompletionSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** @deprecated Use `textEmbeddingModel` instead. diff --git a/packages/cerebras/src/cerebras-provider.ts b/packages/cerebras/src/cerebras-provider.ts index fd31bbc13f1e..6a5ecd46cafc 100644 --- a/packages/cerebras/src/cerebras-provider.ts +++ b/packages/cerebras/src/cerebras-provider.ts @@ -1,8 +1,8 @@ import { OpenAICompatibleChatLanguageModel } from '@ai-sdk/openai-compatible'; import { - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -51,14 +51,14 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface CerebrasProvider extends ProviderV1 { +export interface CerebrasProvider extends ProviderV2 { /** Creates a Cerebras model for text generation. */ ( modelId: CerebrasChatModelId, settings?: CerebrasChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a Cerebras model for text generation. @@ -66,7 +66,7 @@ Creates a Cerebras model for text generation. languageModel( modelId: CerebrasChatModelId, settings?: CerebrasChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a Cerebras chat model for text generation. @@ -74,7 +74,7 @@ Creates a Cerebras chat model for text generation. chat( modelId: CerebrasChatModelId, settings?: CerebrasChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; } export function createCerebras( @@ -113,9 +113,13 @@ export function createCerebras( provider.languageModel = createLanguageModel; provider.chat = createLanguageModel; + provider.textEmbeddingModel = (modelId: string) => { throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' }); }; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; return provider; } diff --git a/packages/cohere/src/cohere-chat-language-model.test.ts b/packages/cohere/src/cohere-chat-language-model.test.ts index fa71029e50bb..d8b8ce75fa2d 100644 --- a/packages/cohere/src/cohere-chat-language-model.test.ts +++ b/packages/cohere/src/cohere-chat-language-model.test.ts @@ -1,11 +1,11 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, } from '@ai-sdk/provider-utils/test'; import { createCohere } from './cohere-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'system', content: 'you are a friendly bot!', diff --git a/packages/cohere/src/cohere-chat-language-model.ts b/packages/cohere/src/cohere-chat-language-model.ts index cfc461b10544..4f9b024c5bf9 100644 --- a/packages/cohere/src/cohere-chat-language-model.ts +++ b/packages/cohere/src/cohere-chat-language-model.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, - LanguageModelV1FinishReason, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2FinishReason, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -29,7 +29,7 @@ type CohereChatConfig = { fetch?: FetchFunction; }; -export class CohereChatLanguageModel implements LanguageModelV1 { +export class CohereChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'json'; @@ -64,7 +64,7 @@ export class CohereChatLanguageModel implements LanguageModelV1 { stopSequences, responseFormat, seed, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; const chatPrompt = convertToCohereChatPrompt(prompt); @@ -150,8 +150,8 @@ export class CohereChatLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const { @@ -210,8 +210,8 @@ export class CohereChatLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const { responseHeaders, value: response } = await postJsonToApi({ @@ -228,7 +228,7 @@ export class CohereChatLanguageModel implements LanguageModelV1 { const { messages, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, @@ -248,7 +248,7 @@ export class CohereChatLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: diff --git a/packages/cohere/src/cohere-prepare-tools.ts b/packages/cohere/src/cohere-prepare-tools.ts index 3a23ffe1ce77..e27a53c5f75f 100644 --- a/packages/cohere/src/cohere-prepare-tools.ts +++ b/packages/cohere/src/cohere-prepare-tools.ts @@ -1,11 +1,11 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; export function prepareTools( - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }, ): { @@ -20,10 +20,10 @@ export function prepareTools( }> | undefined; toolChoice: 'NONE' | 'REQUIRED' | undefined; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { return { tools: undefined, toolChoice: undefined, toolWarnings }; diff --git a/packages/cohere/src/cohere-provider.ts b/packages/cohere/src/cohere-provider.ts index 457cd0d367d9..c5efd2472ed4 100644 --- a/packages/cohere/src/cohere-provider.ts +++ b/packages/cohere/src/cohere-provider.ts @@ -1,7 +1,8 @@ import { EmbeddingModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + NoSuchModelError, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -16,8 +17,8 @@ import { CohereEmbeddingSettings, } from './cohere-embedding-settings'; -export interface CohereProvider extends ProviderV1 { - (modelId: CohereChatModelId, settings?: CohereChatSettings): LanguageModelV1; +export interface CohereProvider extends ProviderV2 { + (modelId: CohereChatModelId, settings?: CohereChatSettings): LanguageModelV2; /** Creates a model for text generation. @@ -25,7 +26,7 @@ Creates a model for text generation. languageModel( modelId: CohereChatModelId, settings?: CohereChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; embedding( modelId: CohereEmbeddingModelId, @@ -120,6 +121,10 @@ export function createCohere( provider.embedding = createTextEmbeddingModel; provider.textEmbeddingModel = createTextEmbeddingModel; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; + return provider; } diff --git a/packages/cohere/src/convert-to-cohere-chat-prompt.ts b/packages/cohere/src/convert-to-cohere-chat-prompt.ts index d6db681e6aee..80f9d8c5a1c4 100644 --- a/packages/cohere/src/convert-to-cohere-chat-prompt.ts +++ b/packages/cohere/src/convert-to-cohere-chat-prompt.ts @@ -1,11 +1,11 @@ import { - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { CohereAssistantMessage, CohereChatPrompt } from './cohere-chat-prompt'; export function convertToCohereChatPrompt( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): CohereChatPrompt { const messages: CohereChatPrompt = []; diff --git a/packages/cohere/src/map-cohere-finish-reason.ts b/packages/cohere/src/map-cohere-finish-reason.ts index ef81596e590b..f7047b325102 100644 --- a/packages/cohere/src/map-cohere-finish-reason.ts +++ b/packages/cohere/src/map-cohere-finish-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapCohereFinishReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'COMPLETE': case 'STOP_SEQUENCE': diff --git a/packages/deepinfra/src/deepinfra-provider.test.ts b/packages/deepinfra/src/deepinfra-provider.test.ts index ca4678593054..b40945b2eb44 100644 --- a/packages/deepinfra/src/deepinfra-provider.test.ts +++ b/packages/deepinfra/src/deepinfra-provider.test.ts @@ -5,7 +5,7 @@ import { OpenAICompatibleCompletionLanguageModel, OpenAICompatibleEmbeddingModel, } from '@ai-sdk/openai-compatible'; -import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider'; +import { LanguageModelV2, EmbeddingModelV1 } from '@ai-sdk/provider'; import { loadApiKey } from '@ai-sdk/provider-utils'; import { describe, it, expect, vi, beforeEach, Mock } from 'vitest'; @@ -29,14 +29,14 @@ vi.mock('./deepinfra-image-model', () => ({ })); describe('DeepInfraProvider', () => { - let mockLanguageModel: LanguageModelV1; + let mockLanguageModel: LanguageModelV2; let mockEmbeddingModel: EmbeddingModelV1; beforeEach(() => { // Mock implementations of models mockLanguageModel = { - // Add any required methods for LanguageModelV1 - } as LanguageModelV1; + // Add any required methods for LanguageModelV2 + } as LanguageModelV2; mockEmbeddingModel = { // Add any required methods for EmbeddingModelV1 } as EmbeddingModelV1; diff --git a/packages/deepinfra/src/deepinfra-provider.ts b/packages/deepinfra/src/deepinfra-provider.ts index a737ba2db842..41c867523aa4 100644 --- a/packages/deepinfra/src/deepinfra-provider.ts +++ b/packages/deepinfra/src/deepinfra-provider.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, + LanguageModelV2, EmbeddingModelV1, - ProviderV1, + ProviderV2, ImageModelV1, } from '@ai-sdk/provider'; import { @@ -52,14 +52,14 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface DeepInfraProvider extends ProviderV1 { +export interface DeepInfraProvider extends ProviderV2 { /** Creates a model for text generation. */ ( modelId: DeepInfraChatModelId, settings?: DeepInfraChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a chat model for text generation. @@ -67,7 +67,7 @@ Creates a chat model for text generation. chatModel( modelId: DeepInfraChatModelId, settings?: DeepInfraChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a model for image generation. @@ -91,7 +91,7 @@ Creates a chat model for text generation. languageModel( modelId: DeepInfraChatModelId, settings?: DeepInfraChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a completion model for text generation. @@ -99,7 +99,7 @@ Creates a completion model for text generation. completionModel( modelId: DeepInfraCompletionModelId, settings?: DeepInfraCompletionSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a text embedding model for text generation. diff --git a/packages/deepseek/src/deepseek-provider.ts b/packages/deepseek/src/deepseek-provider.ts index dff18e29f009..7a1e68a00573 100644 --- a/packages/deepseek/src/deepseek-provider.ts +++ b/packages/deepseek/src/deepseek-provider.ts @@ -1,8 +1,8 @@ import { OpenAICompatibleChatLanguageModel } from '@ai-sdk/openai-compatible'; import { - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -35,14 +35,14 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface DeepSeekProvider extends ProviderV1 { +export interface DeepSeekProvider extends ProviderV2 { /** Creates a DeepSeek model for text generation. */ ( modelId: DeepSeekChatModelId, settings?: DeepSeekChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a DeepSeek model for text generation. @@ -50,7 +50,7 @@ Creates a DeepSeek model for text generation. languageModel( modelId: DeepSeekChatModelId, settings?: DeepSeekChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a DeepSeek chat model for text generation. @@ -58,7 +58,7 @@ Creates a DeepSeek chat model for text generation. chat( modelId: DeepSeekChatModelId, settings?: DeepSeekChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; } export function createDeepSeek( @@ -97,9 +97,13 @@ export function createDeepSeek( provider.languageModel = createLanguageModel; provider.chat = createLanguageModel; + provider.textEmbeddingModel = (modelId: string) => { throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' }); }; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; return provider; } diff --git a/packages/fal/src/fal-provider.ts b/packages/fal/src/fal-provider.ts index 2cf4caf76138..84b10516da80 100644 --- a/packages/fal/src/fal-provider.ts +++ b/packages/fal/src/fal-provider.ts @@ -1,4 +1,4 @@ -import { ImageModelV1, NoSuchModelError, ProviderV1 } from '@ai-sdk/provider'; +import { ImageModelV1, NoSuchModelError, ProviderV2 } from '@ai-sdk/provider'; import type { FetchFunction } from '@ai-sdk/provider-utils'; import { withoutTrailingSlash } from '@ai-sdk/provider-utils'; import { FalImageModel } from './fal-image-model'; @@ -29,7 +29,7 @@ requests, or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface FalProvider extends ProviderV1 { +export interface FalProvider extends ProviderV2 { /** Creates a model for image generation. */ diff --git a/packages/fireworks/src/fireworks-provider.test.ts b/packages/fireworks/src/fireworks-provider.test.ts index 6132e47f8435..3c050f55713d 100644 --- a/packages/fireworks/src/fireworks-provider.test.ts +++ b/packages/fireworks/src/fireworks-provider.test.ts @@ -1,6 +1,6 @@ import { describe, it, expect, vi, beforeEach, Mock } from 'vitest'; import { createFireworks } from './fireworks-provider'; -import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider'; +import { LanguageModelV2, EmbeddingModelV1 } from '@ai-sdk/provider'; import { loadApiKey } from '@ai-sdk/provider-utils'; import { OpenAICompatibleChatLanguageModel, @@ -29,14 +29,14 @@ vi.mock('./fireworks-image-model', () => ({ })); describe('FireworksProvider', () => { - let mockLanguageModel: LanguageModelV1; + let mockLanguageModel: LanguageModelV2; let mockEmbeddingModel: EmbeddingModelV1; beforeEach(() => { // Mock implementations of models mockLanguageModel = { - // Add any required methods for LanguageModelV1 - } as LanguageModelV1; + // Add any required methods for LanguageModelV2 + } as LanguageModelV2; mockEmbeddingModel = { // Add any required methods for EmbeddingModelV1 } as EmbeddingModelV1; diff --git a/packages/fireworks/src/fireworks-provider.ts b/packages/fireworks/src/fireworks-provider.ts index 90dd5fcc6d81..8ca2dc3511f8 100644 --- a/packages/fireworks/src/fireworks-provider.ts +++ b/packages/fireworks/src/fireworks-provider.ts @@ -7,8 +7,8 @@ import { import { EmbeddingModelV1, ImageModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -66,14 +66,14 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface FireworksProvider extends ProviderV1 { +export interface FireworksProvider extends ProviderV2 { /** Creates a model for text generation. */ ( modelId: FireworksChatModelId, settings?: FireworksChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a chat model for text generation. @@ -81,7 +81,7 @@ Creates a chat model for text generation. chatModel( modelId: FireworksChatModelId, settings?: FireworksChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a completion model for text generation. @@ -89,7 +89,7 @@ Creates a completion model for text generation. completionModel( modelId: FireworksCompletionModelId, settings?: FireworksCompletionSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a chat model for text generation. @@ -97,7 +97,7 @@ Creates a chat model for text generation. languageModel( modelId: FireworksChatModelId, settings?: FireworksChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a text embedding model for text generation. diff --git a/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts b/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts index e80447d79852..2c0a281aff6c 100644 --- a/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts +++ b/packages/google-vertex/src/anthropic/google-vertex-anthropic-provider.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -18,14 +18,14 @@ import { GoogleVertexAnthropicMessagesModelId, GoogleVertexAnthropicMessagesSettings, } from './google-vertex-anthropic-messages-settings'; -export interface GoogleVertexAnthropicProvider extends ProviderV1 { +export interface GoogleVertexAnthropicProvider extends ProviderV2 { /** Creates a model for text generation. */ ( modelId: GoogleVertexAnthropicMessagesModelId, settings?: GoogleVertexAnthropicMessagesSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a model for text generation. @@ -33,7 +33,7 @@ Creates a model for text generation. languageModel( modelId: GoogleVertexAnthropicMessagesModelId, settings?: GoogleVertexAnthropicMessagesSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Anthropic-specific computer use tool. @@ -132,9 +132,13 @@ export function createVertexAnthropic( provider.languageModel = createChatModel; provider.chat = createChatModel; provider.messages = createChatModel; + provider.textEmbeddingModel = (modelId: string) => { throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' }); }; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; provider.tools = anthropicTools; diff --git a/packages/google-vertex/src/google-vertex-provider.ts b/packages/google-vertex/src/google-vertex-provider.ts index 268a28f5cf7a..23dc16644e3d 100644 --- a/packages/google-vertex/src/google-vertex-provider.ts +++ b/packages/google-vertex/src/google-vertex-provider.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1, ProviderV1, ImageModelV1 } from '@ai-sdk/provider'; +import { LanguageModelV2, ProviderV2, ImageModelV1 } from '@ai-sdk/provider'; import { FetchFunction, generateId, @@ -24,19 +24,19 @@ import { import { GoogleVertexConfig } from './google-vertex-config'; import { isSupportedFileUrl } from './google-vertex-supported-file-url'; -export interface GoogleVertexProvider extends ProviderV1 { +export interface GoogleVertexProvider extends ProviderV2 { /** Creates a model for text generation. */ ( modelId: GoogleVertexModelId, settings?: GoogleVertexSettings, - ): LanguageModelV1; + ): LanguageModelV2; languageModel: ( modelId: GoogleVertexModelId, settings?: GoogleVertexSettings, - ) => LanguageModelV1; + ) => LanguageModelV2; /** * Creates a model for image generation. diff --git a/packages/google/src/convert-to-google-generative-ai-messages.ts b/packages/google/src/convert-to-google-generative-ai-messages.ts index 69d56a16ad72..5ded69ff3e67 100644 --- a/packages/google/src/convert-to-google-generative-ai-messages.ts +++ b/packages/google/src/convert-to-google-generative-ai-messages.ts @@ -1,5 +1,5 @@ import { - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; @@ -10,7 +10,7 @@ import { } from './google-generative-ai-prompt'; export function convertToGoogleGenerativeAIMessages( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): GoogleGenerativeAIPrompt { const systemInstructionParts: Array<{ text: string }> = []; const contents: Array = []; diff --git a/packages/google/src/google-generative-ai-language-model.test.ts b/packages/google/src/google-generative-ai-language-model.test.ts index 4a10794a8c2a..ea81f7966b69 100644 --- a/packages/google/src/google-generative-ai-language-model.test.ts +++ b/packages/google/src/google-generative-ai-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, @@ -10,7 +10,7 @@ import { import { GoogleGenerativeAIGroundingMetadata } from './google-generative-ai-prompt'; import { createGoogleGenerativeAI } from './google-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index 8f2a9bf1c356..90290faad6cc 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -1,10 +1,10 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1ProviderMetadata, - LanguageModelV1Source, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2ProviderMetadata, + LanguageModelV2Source, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { FetchFunction, @@ -39,7 +39,7 @@ type GoogleGenerativeAIConfig = { isSupportedUrl: (url: URL) => boolean; }; -export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { +export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'json'; readonly supportsImageUrls = false; @@ -80,10 +80,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { responseFormat, seed, providerMetadata, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; const googleOptions = parseProviderOptions({ provider: 'google', @@ -208,8 +208,8 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = await this.getArgs(options); const body = JSON.stringify(args); @@ -284,8 +284,8 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = await this.getArgs(options); const body = JSON.stringify(args); @@ -308,12 +308,12 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { const { contents: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, }; - let providerMetadata: LanguageModelV1ProviderMetadata | undefined = + let providerMetadata: LanguageModelV2ProviderMetadata | undefined = undefined; const generateId = this.config.generateId; @@ -323,7 +323,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { if (!chunk.success) { @@ -494,7 +494,7 @@ function extractSources({ }: { groundingMetadata: z.infer | undefined | null; generateId: () => string; -}): undefined | LanguageModelV1Source[] { +}): undefined | LanguageModelV2Source[] { return groundingMetadata?.groundingChunks ?.filter( ( diff --git a/packages/google/src/google-prepare-tools.ts b/packages/google/src/google-prepare-tools.ts index 1ad89aaa170d..a50f23888527 100644 --- a/packages/google/src/google-prepare-tools.ts +++ b/packages/google/src/google-prepare-tools.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema'; @@ -10,7 +10,7 @@ import { } from './google-generative-ai-settings'; export function prepareTools( - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }, useSearchGrounding: boolean, @@ -40,10 +40,10 @@ export function prepareTools( allowedFunctionNames?: string[]; }; }; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; const isGemini2 = modelId.includes('gemini-2'); const supportsDynamicRetrieval = diff --git a/packages/google/src/google-provider.ts b/packages/google/src/google-provider.ts index 27e680a8e07c..bffb2d28e3e6 100644 --- a/packages/google/src/google-provider.ts +++ b/packages/google/src/google-provider.ts @@ -1,41 +1,42 @@ +import { + EmbeddingModelV1, + LanguageModelV2, + NoSuchModelError, + ProviderV2, +} from '@ai-sdk/provider'; import { FetchFunction, generateId, loadApiKey, withoutTrailingSlash, } from '@ai-sdk/provider-utils'; -import { GoogleGenerativeAILanguageModel } from './google-generative-ai-language-model'; -import { - GoogleGenerativeAIModelId, - GoogleGenerativeAISettings, -} from './google-generative-ai-settings'; import { GoogleGenerativeAIEmbeddingModel } from './google-generative-ai-embedding-model'; import { GoogleGenerativeAIEmbeddingModelId, GoogleGenerativeAIEmbeddingSettings, } from './google-generative-ai-embedding-settings'; +import { GoogleGenerativeAILanguageModel } from './google-generative-ai-language-model'; import { - EmbeddingModelV1, - LanguageModelV1, - ProviderV1, -} from '@ai-sdk/provider'; + GoogleGenerativeAIModelId, + GoogleGenerativeAISettings, +} from './google-generative-ai-settings'; import { isSupportedFileUrl } from './google-supported-file-url'; -export interface GoogleGenerativeAIProvider extends ProviderV1 { +export interface GoogleGenerativeAIProvider extends ProviderV2 { ( modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings, - ): LanguageModelV1; + ): LanguageModelV2; languageModel( modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings, - ): LanguageModelV1; + ): LanguageModelV2; chat( modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings, - ): LanguageModelV1; + ): LanguageModelV2; /** * @deprecated Use `chat()` instead. @@ -43,7 +44,7 @@ export interface GoogleGenerativeAIProvider extends ProviderV1 { generativeAI( modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings, - ): LanguageModelV1; + ): LanguageModelV2; /** @deprecated Use `textEmbeddingModel()` instead. @@ -160,6 +161,10 @@ export function createGoogleGenerativeAI( provider.textEmbedding = createEmbeddingModel; provider.textEmbeddingModel = createEmbeddingModel; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; + return provider; } diff --git a/packages/google/src/map-google-generative-ai-finish-reason.ts b/packages/google/src/map-google-generative-ai-finish-reason.ts index 274e30539b37..3bc9a583a980 100644 --- a/packages/google/src/map-google-generative-ai-finish-reason.ts +++ b/packages/google/src/map-google-generative-ai-finish-reason.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapGoogleGenerativeAIFinishReason({ finishReason, @@ -6,7 +6,7 @@ export function mapGoogleGenerativeAIFinishReason({ }: { finishReason: string | null | undefined; hasToolCalls: boolean; -}): LanguageModelV1FinishReason { +}): LanguageModelV2FinishReason { switch (finishReason) { case 'STOP': return hasToolCalls ? 'tool-calls' : 'stop'; diff --git a/packages/groq/src/convert-to-groq-chat-messages.ts b/packages/groq/src/convert-to-groq-chat-messages.ts index 8e5e6bd1543e..ed23555c3733 100644 --- a/packages/groq/src/convert-to-groq-chat-messages.ts +++ b/packages/groq/src/convert-to-groq-chat-messages.ts @@ -1,12 +1,12 @@ import { - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { GroqChatPrompt } from './groq-api-types'; export function convertToGroqChatMessages( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): GroqChatPrompt { const messages: GroqChatPrompt = []; diff --git a/packages/groq/src/groq-chat-language-model.test.ts b/packages/groq/src/groq-chat-language-model.test.ts index eec24740df7d..d998efa0af19 100644 --- a/packages/groq/src/groq-chat-language-model.test.ts +++ b/packages/groq/src/groq-chat-language-model.test.ts @@ -1,11 +1,11 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, } from '@ai-sdk/provider-utils/test'; import { createGroq } from './groq-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/groq/src/groq-chat-language-model.ts b/packages/groq/src/groq-chat-language-model.ts index eafbefc214c5..65f7687e73ea 100644 --- a/packages/groq/src/groq-chat-language-model.ts +++ b/packages/groq/src/groq-chat-language-model.ts @@ -1,10 +1,10 @@ import { InvalidResponseDataError, - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1ProviderMetadata, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2ProviderMetadata, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { FetchFunction, @@ -32,7 +32,7 @@ type GroqChatConfig = { fetch?: FetchFunction; }; -export class GroqChatLanguageModel implements LanguageModelV1 { +export class GroqChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly supportsStructuredOutputs = false; @@ -76,12 +76,12 @@ export class GroqChatLanguageModel implements LanguageModelV1 { seed, stream, providerMetadata, - }: Parameters[0] & { + }: Parameters[0] & { stream: boolean; }) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -197,8 +197,8 @@ export class GroqChatLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs({ ...options, stream: false }); const body = JSON.stringify(args); @@ -248,8 +248,8 @@ export class GroqChatLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs({ ...options, stream: true }); const body = JSON.stringify({ ...args, stream: true }); @@ -283,7 +283,7 @@ export class GroqChatLanguageModel implements LanguageModelV1 { hasFinished: boolean; }> = []; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number | undefined; completionTokens: number | undefined; @@ -293,12 +293,12 @@ export class GroqChatLanguageModel implements LanguageModelV1 { }; let isFirstChunk = true; - let providerMetadata: LanguageModelV1ProviderMetadata | undefined; + let providerMetadata: LanguageModelV2ProviderMetadata | undefined; return { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: diff --git a/packages/groq/src/groq-prepare-tools.ts b/packages/groq/src/groq-prepare-tools.ts index 0cfae2ccf3c6..8e16a421f3b4 100644 --- a/packages/groq/src/groq-prepare-tools.ts +++ b/packages/groq/src/groq-prepare-tools.ts @@ -1,13 +1,13 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; export function prepareTools({ mode, }: { - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }; }): { @@ -27,11 +27,11 @@ export function prepareTools({ | 'none' | 'required' | undefined; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { return { tools: undefined, tool_choice: undefined, toolWarnings }; diff --git a/packages/groq/src/groq-provider.ts b/packages/groq/src/groq-provider.ts index 048288910414..f047c1a461e1 100644 --- a/packages/groq/src/groq-provider.ts +++ b/packages/groq/src/groq-provider.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -11,11 +11,11 @@ import { import { GroqChatLanguageModel } from './groq-chat-language-model'; import { GroqChatModelId, GroqChatSettings } from './groq-chat-settings'; -export interface GroqProvider extends ProviderV1 { +export interface GroqProvider extends ProviderV2 { /** Creates a model for text generation. */ - (modelId: GroqChatModelId, settings?: GroqChatSettings): LanguageModelV1; + (modelId: GroqChatModelId, settings?: GroqChatSettings): LanguageModelV2; /** Creates an Groq chat model for text generation. @@ -23,7 +23,7 @@ Creates an Groq chat model for text generation. languageModel( modelId: GroqChatModelId, settings?: GroqChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; } export interface GroqProviderSettings { @@ -98,9 +98,13 @@ export function createGroq(options: GroqProviderSettings = {}): GroqProvider { provider.languageModel = createLanguageModel; provider.chat = createChatModel; + provider.textEmbeddingModel = (modelId: string) => { throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' }); }; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; return provider; } diff --git a/packages/groq/src/map-groq-finish-reason.ts b/packages/groq/src/map-groq-finish-reason.ts index 5839e1eeebc3..a0411842ca00 100644 --- a/packages/groq/src/map-groq-finish-reason.ts +++ b/packages/groq/src/map-groq-finish-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapGroqFinishReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'stop': return 'stop'; diff --git a/packages/luma/src/luma-provider.ts b/packages/luma/src/luma-provider.ts index cf7a56824a02..309c91756cef 100644 --- a/packages/luma/src/luma-provider.ts +++ b/packages/luma/src/luma-provider.ts @@ -1,4 +1,4 @@ -import { ImageModelV1, NoSuchModelError, ProviderV1 } from '@ai-sdk/provider'; +import { ImageModelV1, NoSuchModelError, ProviderV2 } from '@ai-sdk/provider'; import { FetchFunction, loadApiKey, @@ -28,7 +28,7 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface LumaProvider extends ProviderV1 { +export interface LumaProvider extends ProviderV2 { /** Creates a model for image generation. */ diff --git a/packages/mistral/src/convert-to-mistral-chat-messages.ts b/packages/mistral/src/convert-to-mistral-chat-messages.ts index 4d24266a0135..1f2e80aa3892 100644 --- a/packages/mistral/src/convert-to-mistral-chat-messages.ts +++ b/packages/mistral/src/convert-to-mistral-chat-messages.ts @@ -1,12 +1,12 @@ import { - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { MistralPrompt } from './mistral-chat-prompt'; export function convertToMistralChatMessages( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): MistralPrompt { const messages: MistralPrompt = []; diff --git a/packages/mistral/src/map-mistral-finish-reason.ts b/packages/mistral/src/map-mistral-finish-reason.ts index 251d1b0644b0..aea0cba7f776 100644 --- a/packages/mistral/src/map-mistral-finish-reason.ts +++ b/packages/mistral/src/map-mistral-finish-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapMistralFinishReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'stop': return 'stop'; diff --git a/packages/mistral/src/mistral-chat-language-model.test.ts b/packages/mistral/src/mistral-chat-language-model.test.ts index fe98e2718689..db826d0a9aef 100644 --- a/packages/mistral/src/mistral-chat-language-model.test.ts +++ b/packages/mistral/src/mistral-chat-language-model.test.ts @@ -1,11 +1,11 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, } from '@ai-sdk/provider-utils/test'; import { createMistral } from './mistral-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/mistral/src/mistral-chat-language-model.ts b/packages/mistral/src/mistral-chat-language-model.ts index 3be16988ab9f..25ebe48cfafb 100644 --- a/packages/mistral/src/mistral-chat-language-model.ts +++ b/packages/mistral/src/mistral-chat-language-model.ts @@ -1,8 +1,8 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { FetchFunction, @@ -30,7 +30,7 @@ type MistralChatConfig = { fetch?: FetchFunction; }; -export class MistralChatLanguageModel implements LanguageModelV1 { +export class MistralChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'json'; readonly supportsImageUrls = false; @@ -71,10 +71,10 @@ export class MistralChatLanguageModel implements LanguageModelV1 { responseFormat, seed, providerMetadata, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -180,8 +180,8 @@ export class MistralChatLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const { @@ -243,8 +243,8 @@ export class MistralChatLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const body = { ...args, stream: true }; @@ -263,7 +263,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 { const { messages: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, @@ -275,7 +275,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { if (!chunk.success) { diff --git a/packages/mistral/src/mistral-prepare-tools.ts b/packages/mistral/src/mistral-prepare-tools.ts index 12b17d7a23b5..669184b241c3 100644 --- a/packages/mistral/src/mistral-prepare-tools.ts +++ b/packages/mistral/src/mistral-prepare-tools.ts @@ -1,11 +1,11 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; export function prepareTools( - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }, ): { @@ -25,11 +25,11 @@ export function prepareTools( | 'none' | 'any' | undefined; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { return { tools: undefined, tool_choice: undefined, toolWarnings }; diff --git a/packages/mistral/src/mistral-provider.ts b/packages/mistral/src/mistral-provider.ts index ef08c308a40b..706856e3e1ba 100644 --- a/packages/mistral/src/mistral-provider.ts +++ b/packages/mistral/src/mistral-provider.ts @@ -1,7 +1,8 @@ import { EmbeddingModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + NoSuchModelError, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -19,11 +20,11 @@ import { MistralEmbeddingSettings, } from './mistral-embedding-settings'; -export interface MistralProvider extends ProviderV1 { +export interface MistralProvider extends ProviderV2 { ( modelId: MistralChatModelId, settings?: MistralChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a model for text generation. @@ -31,7 +32,7 @@ Creates a model for text generation. languageModel( modelId: MistralChatModelId, settings?: MistralChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a model for text generation. @@ -39,7 +40,7 @@ Creates a model for text generation. chat( modelId: MistralChatModelId, settings?: MistralChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** @deprecated Use `textEmbeddingModel()` instead. @@ -147,6 +148,10 @@ export function createMistral( provider.textEmbedding = createEmbeddingModel; provider.textEmbeddingModel = createEmbeddingModel; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; + return provider; } diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts index 617e0a51e5ad..85c271e41cd0 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts @@ -1,19 +1,19 @@ import { - LanguageModelV1Prompt, - LanguageModelV1ProviderMetadata, + LanguageModelV2Prompt, + LanguageModelV2ProviderMetadata, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { OpenAICompatibleChatPrompt } from './openai-compatible-api-types'; function getOpenAIMetadata(message: { - providerMetadata?: LanguageModelV1ProviderMetadata; + providerMetadata?: LanguageModelV2ProviderMetadata; }) { return message?.providerMetadata?.openaiCompatible ?? {}; } export function convertToOpenAICompatibleChatMessages( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): OpenAICompatibleChatPrompt { const messages: OpenAICompatibleChatPrompt = []; for (const { role, content, ...message } of prompt) { diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts b/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts index 8b13f10b1ef3..fa473118510e 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts @@ -1,6 +1,6 @@ import { InvalidPromptError, - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; @@ -10,7 +10,7 @@ export function convertToOpenAICompatibleCompletionPrompt({ user = 'user', assistant = 'assistant', }: { - prompt: LanguageModelV1Prompt; + prompt: LanguageModelV2Prompt; inputFormat: 'prompt' | 'messages'; user?: string; assistant?: string; diff --git a/packages/openai-compatible/src/map-openai-compatible-finish-reason.ts b/packages/openai-compatible/src/map-openai-compatible-finish-reason.ts index 6d2b232aba23..a130c2ac75b6 100644 --- a/packages/openai-compatible/src/map-openai-compatible-finish-reason.ts +++ b/packages/openai-compatible/src/map-openai-compatible-finish-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapOpenAICompatibleFinishReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'stop': return 'stop'; diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts index 9a3d06bd5e95..b8597a3cee65 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, @@ -6,7 +6,7 @@ import { import { OpenAICompatibleChatLanguageModel } from './openai-compatible-chat-language-model'; import { createOpenAICompatible } from './openai-compatible-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts index ff1874fa16c2..b61c8572a578 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts @@ -1,12 +1,12 @@ import { APICallError, InvalidResponseDataError, - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1ObjectGenerationMode, - LanguageModelV1ProviderMetadata, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2ObjectGenerationMode, + LanguageModelV2ProviderMetadata, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { combineHeaders, @@ -48,7 +48,7 @@ Default object generation mode that should be used with this model when no mode is specified. Should be the mode with the best results for this model. `undefined` can be specified if object generation is not supported. */ - defaultObjectGenerationMode?: LanguageModelV1ObjectGenerationMode; + defaultObjectGenerationMode?: LanguageModelV2ObjectGenerationMode; /** * Whether the model supports structured outputs. @@ -56,7 +56,7 @@ model. `undefined` can be specified if object generation is not supported. supportsStructuredOutputs?: boolean; }; -export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { +export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly supportsStructuredOutputs: boolean; @@ -113,10 +113,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { stopSequences, responseFormat, seed, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -238,8 +238,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs({ ...options }); const body = JSON.stringify(args); @@ -267,7 +267,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { const choice = responseBody.choices[0]; // provider metadata: - const providerMetadata: LanguageModelV1ProviderMetadata = { + const providerMetadata: LanguageModelV2ProviderMetadata = { [this.providerOptionsName]: {}, ...this.config.metadataExtractor?.extractMetadata?.({ parsedBody: rawResponse, @@ -317,11 +317,11 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { if (this.settings.simulateStreaming) { const result = await this.doGenerate(options); - const simulatedStream = new ReadableStream({ + const simulatedStream = new ReadableStream({ start(controller) { controller.enqueue({ type: 'response-metadata', ...result.response }); if (result.reasoning) { @@ -409,7 +409,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { hasFinished: boolean; }> = []; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { completionTokens: number | undefined; completionTokensDetails: { @@ -440,7 +440,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX transform(chunk, controller) { @@ -643,7 +643,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { }, flush(controller) { - const providerMetadata: LanguageModelV1ProviderMetadata = { + const providerMetadata: LanguageModelV2ProviderMetadata = { [providerOptionsName]: {}, ...metadataExtractor?.buildMetadata(), }; diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts index 601e706cea17..accef4e986a4 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, @@ -6,7 +6,7 @@ import { import { OpenAICompatibleChatLanguageModel } from './openai-compatible-chat-language-model'; import { createOpenAICompatible } from './openai-compatible-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts index 1ba1b6dfe6d2..b4eccee4bb2d 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts @@ -1,9 +1,9 @@ import { APICallError, - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -38,7 +38,7 @@ type OpenAICompatibleCompletionConfig = { }; export class OpenAICompatibleCompletionLanguageModel - implements LanguageModelV1 + implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = undefined; @@ -90,10 +90,10 @@ export class OpenAICompatibleCompletionLanguageModel responseFormat, seed, providerMetadata, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -178,8 +178,8 @@ export class OpenAICompatibleCompletionLanguageModel } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const { @@ -220,8 +220,8 @@ export class OpenAICompatibleCompletionLanguageModel } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const body = { @@ -246,7 +246,7 @@ export class OpenAICompatibleCompletionLanguageModel const { prompt: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, @@ -257,7 +257,7 @@ export class OpenAICompatibleCompletionLanguageModel stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: diff --git a/packages/openai-compatible/src/openai-compatible-metadata-extractor.ts b/packages/openai-compatible/src/openai-compatible-metadata-extractor.ts index 0b8e25b80bca..15005b102bfc 100644 --- a/packages/openai-compatible/src/openai-compatible-metadata-extractor.ts +++ b/packages/openai-compatible/src/openai-compatible-metadata-extractor.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1ProviderMetadata } from '@ai-sdk/provider'; +import { LanguageModelV2ProviderMetadata } from '@ai-sdk/provider'; /** Extracts provider-specific metadata from API responses. @@ -18,7 +18,7 @@ export type MetadataExtractor = { parsedBody, }: { parsedBody: unknown; - }) => LanguageModelV1ProviderMetadata | undefined; + }) => LanguageModelV2ProviderMetadata | undefined; /** * Creates an extractor for handling streaming responses. The returned object provides @@ -43,6 +43,6 @@ export type MetadataExtractor = { * @returns Provider-specific metadata or undefined if no metadata is available. * The metadata should be under a key indicating the provider id. */ - buildMetadata(): LanguageModelV1ProviderMetadata | undefined; + buildMetadata(): LanguageModelV2ProviderMetadata | undefined; }; }; diff --git a/packages/openai-compatible/src/openai-compatible-prepare-tools.ts b/packages/openai-compatible/src/openai-compatible-prepare-tools.ts index 54c25708337c..ad5d53cdcc6b 100644 --- a/packages/openai-compatible/src/openai-compatible-prepare-tools.ts +++ b/packages/openai-compatible/src/openai-compatible-prepare-tools.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; @@ -8,7 +8,7 @@ export function prepareTools({ mode, structuredOutputs, }: { - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }; structuredOutputs: boolean; @@ -29,11 +29,11 @@ export function prepareTools({ | 'none' | 'required' | undefined; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { return { tools: undefined, tool_choice: undefined, toolWarnings }; diff --git a/packages/openai-compatible/src/openai-compatible-provider.ts b/packages/openai-compatible/src/openai-compatible-provider.ts index 75c4c9530bee..a137004fe672 100644 --- a/packages/openai-compatible/src/openai-compatible-provider.ts +++ b/packages/openai-compatible/src/openai-compatible-provider.ts @@ -1,8 +1,8 @@ import { EmbeddingModelV1, ImageModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, withoutTrailingSlash } from '@ai-sdk/provider-utils'; import { OpenAICompatibleChatLanguageModel } from './openai-compatible-chat-language-model'; @@ -19,26 +19,26 @@ export interface OpenAICompatibleProvider< COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string, -> extends Omit { +> extends Omit { ( modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; languageModel( modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; chatModel( modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; completionModel( modelId: COMPLETION_MODEL_IDS, settings?: OpenAICompatibleCompletionSettings, - ): LanguageModelV1; + ): LanguageModelV2; textEmbeddingModel( modelId: EMBEDDING_MODEL_IDS, diff --git a/packages/openai/src/convert-to-openai-chat-messages.ts b/packages/openai/src/convert-to-openai-chat-messages.ts index fc284b29d0a6..10c29145c062 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1CallWarning, - LanguageModelV1Prompt, + LanguageModelV2CallWarning, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; @@ -11,15 +11,15 @@ export function convertToOpenAIChatMessages({ useLegacyFunctionCalling = false, systemMessageMode = 'system', }: { - prompt: LanguageModelV1Prompt; + prompt: LanguageModelV2Prompt; useLegacyFunctionCalling?: boolean; systemMessageMode?: 'system' | 'developer' | 'remove'; }): { messages: OpenAIChatPrompt; - warnings: Array; + warnings: Array; } { const messages: OpenAIChatPrompt = []; - const warnings: Array = []; + const warnings: Array = []; for (const { role, content } of prompt) { switch (role) { diff --git a/packages/openai/src/convert-to-openai-completion-prompt.ts b/packages/openai/src/convert-to-openai-completion-prompt.ts index bf93884c3ae8..fc41184022ad 100644 --- a/packages/openai/src/convert-to-openai-completion-prompt.ts +++ b/packages/openai/src/convert-to-openai-completion-prompt.ts @@ -1,6 +1,6 @@ import { InvalidPromptError, - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; @@ -10,7 +10,7 @@ export function convertToOpenAICompletionPrompt({ user = 'user', assistant = 'assistant', }: { - prompt: LanguageModelV1Prompt; + prompt: LanguageModelV2Prompt; inputFormat: 'prompt' | 'messages'; user?: string; assistant?: string; diff --git a/packages/openai/src/map-openai-chat-logprobs.ts b/packages/openai/src/map-openai-chat-logprobs.ts index 25ff45638577..77d0abb6122d 100644 --- a/packages/openai/src/map-openai-chat-logprobs.ts +++ b/packages/openai/src/map-openai-chat-logprobs.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1LogProbs } from '@ai-sdk/provider'; +import { LanguageModelV2LogProbs } from '@ai-sdk/provider'; type OpenAIChatLogProbs = { content: @@ -17,7 +17,7 @@ type OpenAIChatLogProbs = { export function mapOpenAIChatLogProbsOutput( logprobs: OpenAIChatLogProbs | null | undefined, -): LanguageModelV1LogProbs | undefined { +): LanguageModelV2LogProbs | undefined { return ( logprobs?.content?.map(({ token, logprob, top_logprobs }) => ({ token, diff --git a/packages/openai/src/map-openai-completion-logprobs.ts b/packages/openai/src/map-openai-completion-logprobs.ts index 4cbd821e406e..761ac3d0c73a 100644 --- a/packages/openai/src/map-openai-completion-logprobs.ts +++ b/packages/openai/src/map-openai-completion-logprobs.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1LogProbs } from '@ai-sdk/provider'; +import { LanguageModelV2LogProbs } from '@ai-sdk/provider'; type OpenAICompletionLogProps = { tokens: string[]; @@ -8,7 +8,7 @@ type OpenAICompletionLogProps = { export function mapOpenAICompletionLogProbs( logprobs: OpenAICompletionLogProps | null | undefined, -): LanguageModelV1LogProbs | undefined { +): LanguageModelV2LogProbs | undefined { return logprobs?.tokens.map((token, index) => ({ token, logprob: logprobs.token_logprobs[index], diff --git a/packages/openai/src/map-openai-finish-reason.ts b/packages/openai/src/map-openai-finish-reason.ts index f27414f9b407..85b5b7c056b1 100644 --- a/packages/openai/src/map-openai-finish-reason.ts +++ b/packages/openai/src/map-openai-finish-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapOpenAIFinishReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'stop': return 'stop'; diff --git a/packages/openai/src/openai-chat-language-model.test.ts b/packages/openai/src/openai-chat-language-model.test.ts index c8bf86f7d45d..e69ae35bf5b0 100644 --- a/packages/openai/src/openai-chat-language-model.test.ts +++ b/packages/openai/src/openai-chat-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1, LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2, LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, @@ -6,7 +6,7 @@ import { import { mapOpenAIChatLogProbsOutput } from './map-openai-chat-logprobs'; import { createOpenAI } from './openai-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; @@ -608,7 +608,7 @@ describe('doGenerate', () => { }); describe('when useLegacyFunctionCalling is enabled', () => { - let result: Awaited>; + let result: Awaited>; beforeEach(async () => { prepareJsonResponse({ diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index 247d9d4a44fb..4aba40834230 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -1,11 +1,11 @@ import { InvalidResponseDataError, - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1LogProbs, - LanguageModelV1ProviderMetadata, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2LogProbs, + LanguageModelV2ProviderMetadata, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -38,7 +38,7 @@ type OpenAIChatConfig = { fetch?: FetchFunction; }; -export class OpenAIChatLanguageModel implements LanguageModelV1 { +export class OpenAIChatLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly modelId: OpenAIChatModelId; @@ -94,10 +94,10 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { responseFormat, seed, providerMetadata, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -356,8 +356,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args: body, warnings } = this.getArgs(options); const { @@ -385,7 +385,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { // provider metadata: const completionTokenDetails = response.usage?.completion_tokens_details; const promptTokenDetails = response.usage?.prompt_tokens_details; - const providerMetadata: LanguageModelV1ProviderMetadata = { openai: {} }; + const providerMetadata: LanguageModelV2ProviderMetadata = { openai: {} }; if (completionTokenDetails?.reasoning_tokens != null) { providerMetadata.openai.reasoningTokens = completionTokenDetails?.reasoning_tokens; @@ -437,12 +437,12 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { if (this.settings.simulateStreaming) { const result = await this.doGenerate(options); - const simulatedStream = new ReadableStream({ + const simulatedStream = new ReadableStream({ start(controller) { controller.enqueue({ type: 'response-metadata', ...result.response }); if (result.text) { @@ -525,7 +525,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { hasFinished: boolean; }> = []; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number | undefined; completionTokens: number | undefined; @@ -533,18 +533,18 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { promptTokens: undefined, completionTokens: undefined, }; - let logprobs: LanguageModelV1LogProbs; + let logprobs: LanguageModelV2LogProbs; let isFirstChunk = true; const { useLegacyFunctionCalling } = this.settings; - const providerMetadata: LanguageModelV1ProviderMetadata = { openai: {} }; + const providerMetadata: LanguageModelV2ProviderMetadata = { openai: {} }; return { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: diff --git a/packages/openai/src/openai-completion-language-model.test.ts b/packages/openai/src/openai-completion-language-model.test.ts index f17a8d558f99..e0dce07c98c7 100644 --- a/packages/openai/src/openai-completion-language-model.test.ts +++ b/packages/openai/src/openai-completion-language-model.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { LanguageModelV2Prompt } from '@ai-sdk/provider'; import { convertReadableStreamToArray, createTestServer, @@ -6,7 +6,7 @@ import { import { mapOpenAICompletionLogProbs } from './map-openai-completion-logprobs'; import { createOpenAI } from './openai-provider'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/openai/src/openai-completion-language-model.ts b/packages/openai/src/openai-completion-language-model.ts index 8bd3a80ba625..e69d4e472139 100644 --- a/packages/openai/src/openai-completion-language-model.ts +++ b/packages/openai/src/openai-completion-language-model.ts @@ -1,9 +1,9 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1LogProbs, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2LogProbs, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -36,7 +36,7 @@ type OpenAICompletionConfig = { fetch?: FetchFunction; }; -export class OpenAICompletionLanguageModel implements LanguageModelV1 { +export class OpenAICompletionLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = undefined; @@ -72,10 +72,10 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { stopSequences: userStopSequences, responseFormat, seed, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -167,8 +167,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const { @@ -210,8 +210,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const body = { @@ -242,19 +242,19 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { const { prompt: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, }; - let logprobs: LanguageModelV1LogProbs; + let logprobs: LanguageModelV2LogProbs; let isFirstChunk = true; return { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: diff --git a/packages/openai/src/openai-prepare-tools.ts b/packages/openai/src/openai-prepare-tools.ts index ed232e8cb9d0..8ff7442f6476 100644 --- a/packages/openai/src/openai-prepare-tools.ts +++ b/packages/openai/src/openai-prepare-tools.ts @@ -1,7 +1,7 @@ import { JSONSchema7, - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; @@ -10,7 +10,7 @@ export function prepareTools({ useLegacyFunctionCalling = false, structuredOutputs, }: { - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }; useLegacyFunctionCalling: boolean | undefined; @@ -38,12 +38,12 @@ export function prepareTools({ parameters: JSONSchema7; }[]; function_call?: { name: string }; - toolWarnings: Array; + toolWarnings: Array; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { return { tools: undefined, tool_choice: undefined, toolWarnings }; diff --git a/packages/openai/src/openai-provider.ts b/packages/openai/src/openai-provider.ts index 8b403073d94a..8383bdb101f6 100644 --- a/packages/openai/src/openai-provider.ts +++ b/packages/openai/src/openai-provider.ts @@ -1,8 +1,8 @@ import { EmbeddingModelV1, ImageModelV1, - LanguageModelV1, - ProviderV1, + LanguageModelV2, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -26,16 +26,16 @@ import { OpenAIImageModelId, OpenAIImageSettings, } from './openai-image-settings'; +import { openaiTools } from './openai-tools'; import { OpenAIResponsesLanguageModel } from './responses/openai-responses-language-model'; import { OpenAIResponsesModelId } from './responses/openai-responses-settings'; -import { openaiTools } from './openai-tools'; -export interface OpenAIProvider extends ProviderV1 { +export interface OpenAIProvider extends ProviderV2 { ( modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings, ): OpenAICompletionLanguageModel; - (modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV1; + (modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV2; /** Creates an OpenAI model for text generation. @@ -47,7 +47,7 @@ Creates an OpenAI model for text generation. languageModel( modelId: OpenAIChatModelId, settings?: OpenAIChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates an OpenAI chat model for text generation. @@ -55,12 +55,12 @@ Creates an OpenAI chat model for text generation. chat( modelId: OpenAIChatModelId, settings?: OpenAIChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates an OpenAI responses API model for text generation. */ - responses(modelId: OpenAIResponsesModelId): LanguageModelV1; + responses(modelId: OpenAIResponsesModelId): LanguageModelV2; /** Creates an OpenAI completion model for text generation. @@ -68,7 +68,7 @@ Creates an OpenAI completion model for text generation. completion( modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a model for text embeddings. diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.ts index 46ca2fea3775..3f0dc99cbe3a 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1CallWarning, - LanguageModelV1Prompt, + LanguageModelV2CallWarning, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; @@ -10,14 +10,14 @@ export function convertToOpenAIResponsesMessages({ prompt, systemMessageMode, }: { - prompt: LanguageModelV1Prompt; + prompt: LanguageModelV2Prompt; systemMessageMode: 'system' | 'developer' | 'remove'; }): { messages: OpenAIResponsesPrompt; - warnings: Array; + warnings: Array; } { const messages: OpenAIResponsesPrompt = []; - const warnings: Array = []; + const warnings: Array = []; for (const { role, content } of prompt) { switch (role) { diff --git a/packages/openai/src/responses/map-openai-responses-finish-reason.ts b/packages/openai/src/responses/map-openai-responses-finish-reason.ts index 8db30a46005d..6ea717cd5a53 100644 --- a/packages/openai/src/responses/map-openai-responses-finish-reason.ts +++ b/packages/openai/src/responses/map-openai-responses-finish-reason.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapOpenAIResponseFinishReason({ finishReason, @@ -6,7 +6,7 @@ export function mapOpenAIResponseFinishReason({ }: { finishReason: string | null | undefined; hasToolCalls: boolean; -}): LanguageModelV1FinishReason { +}): LanguageModelV2FinishReason { switch (finishReason) { case undefined: case null: diff --git a/packages/openai/src/responses/openai-responses-language-model.test.ts b/packages/openai/src/responses/openai-responses-language-model.test.ts index 616da0ae3b15..ce1e5b35638c 100644 --- a/packages/openai/src/responses/openai-responses-language-model.test.ts +++ b/packages/openai/src/responses/openai-responses-language-model.test.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1FunctionTool, - LanguageModelV1Prompt, + LanguageModelV2FunctionTool, + LanguageModelV2Prompt, } from '@ai-sdk/provider'; import { convertReadableStreamToArray, @@ -9,11 +9,11 @@ import { } from '@ai-sdk/provider-utils/test'; import { OpenAIResponsesLanguageModel } from './openai-responses-language-model'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; -const TEST_TOOLS: Array = [ +const TEST_TOOLS: Array = [ { type: 'function', name: 'weather', diff --git a/packages/openai/src/responses/openai-responses-language-model.ts b/packages/openai/src/responses/openai-responses-language-model.ts index b3da02d0c1fe..4d6f46816bfd 100644 --- a/packages/openai/src/responses/openai-responses-language-model.ts +++ b/packages/openai/src/responses/openai-responses-language-model.ts @@ -1,8 +1,8 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2StreamPart, } from '@ai-sdk/provider'; import { combineHeaders, @@ -21,7 +21,7 @@ import { mapOpenAIResponseFinishReason } from './map-openai-responses-finish-rea import { prepareResponsesTools } from './openai-responses-prepare-tools'; import { OpenAIResponsesModelId } from './openai-responses-settings'; -export class OpenAIResponsesLanguageModel implements LanguageModelV1 { +export class OpenAIResponsesLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'json'; @@ -51,8 +51,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV1 { prompt, providerMetadata, responseFormat, - }: Parameters[0]) { - const warnings: LanguageModelV1CallWarning[] = []; + }: Parameters[0]) { + const warnings: LanguageModelV2CallWarning[] = []; const modelConfig = getResponsesModelConfig(this.modelId); const type = mode.type; @@ -234,8 +234,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args: body, warnings } = this.getArgs(options); const { @@ -364,8 +364,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args: body, warnings } = this.getArgs(options); const { responseHeaders, value: response } = await postJsonToApi({ @@ -388,7 +388,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV1 { const self = this; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let promptTokens = NaN; let completionTokens = NaN; let cachedPromptTokens: number | null = null; @@ -404,7 +404,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: diff --git a/packages/openai/src/responses/openai-responses-prepare-tools.ts b/packages/openai/src/responses/openai-responses-prepare-tools.ts index abc583f14b31..60fbb5facf1e 100644 --- a/packages/openai/src/responses/openai-responses-prepare-tools.ts +++ b/packages/openai/src/responses/openai-responses-prepare-tools.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, + LanguageModelV2, + LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { OpenAIResponsesTool } from './openai-responses-api-types'; @@ -9,7 +9,7 @@ export function prepareResponsesTools({ mode, strict, }: { - mode: Parameters[0]['mode'] & { + mode: Parameters[0]['mode'] & { type: 'regular'; }; strict: boolean; @@ -21,12 +21,12 @@ export function prepareResponsesTools({ | 'required' | { type: 'web_search_preview' } | { type: 'function'; name: string }; - toolWarnings: LanguageModelV1CallWarning[]; + toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: const tools = mode.tools?.length ? mode.tools : undefined; - const toolWarnings: LanguageModelV1CallWarning[] = []; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { return { tools: undefined, tool_choice: undefined, toolWarnings }; diff --git a/packages/perplexity/src/convert-to-perplexity-messages.ts b/packages/perplexity/src/convert-to-perplexity-messages.ts index d022a3188da6..14b41ebd1c16 100644 --- a/packages/perplexity/src/convert-to-perplexity-messages.ts +++ b/packages/perplexity/src/convert-to-perplexity-messages.ts @@ -1,11 +1,11 @@ import { - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { PerplexityPrompt } from './perplexity-language-model-prompt'; export function convertToPerplexityMessages( - prompt: LanguageModelV1Prompt, + prompt: LanguageModelV2Prompt, ): PerplexityPrompt { const messages: PerplexityPrompt = []; diff --git a/packages/perplexity/src/map-perplexity-finish-reason.ts b/packages/perplexity/src/map-perplexity-finish-reason.ts index aee2004d5547..61538a5a0958 100644 --- a/packages/perplexity/src/map-perplexity-finish-reason.ts +++ b/packages/perplexity/src/map-perplexity-finish-reason.ts @@ -1,8 +1,8 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; export function mapPerplexityFinishReason( finishReason: string | null | undefined, -): LanguageModelV1FinishReason { +): LanguageModelV2FinishReason { switch (finishReason) { case 'stop': case 'length': diff --git a/packages/perplexity/src/perplexity-language-model.test.ts b/packages/perplexity/src/perplexity-language-model.test.ts index 5c0a6a1c4243..39972430d3d6 100644 --- a/packages/perplexity/src/perplexity-language-model.test.ts +++ b/packages/perplexity/src/perplexity-language-model.test.ts @@ -1,5 +1,5 @@ import { - LanguageModelV1Prompt, + LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -13,7 +13,7 @@ import { } from './perplexity-language-model'; import { z } from 'zod'; -const TEST_PROMPT: LanguageModelV1Prompt = [ +const TEST_PROMPT: LanguageModelV2Prompt = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ]; diff --git a/packages/perplexity/src/perplexity-language-model.ts b/packages/perplexity/src/perplexity-language-model.ts index 0f189bf72041..b3538deea37a 100644 --- a/packages/perplexity/src/perplexity-language-model.ts +++ b/packages/perplexity/src/perplexity-language-model.ts @@ -1,8 +1,8 @@ import { - LanguageModelV1, - LanguageModelV1CallWarning, - LanguageModelV1FinishReason, - LanguageModelV1StreamPart, + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2FinishReason, + LanguageModelV2StreamPart, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { @@ -26,7 +26,7 @@ type PerplexityChatConfig = { fetch?: FetchFunction; }; -export class PerplexityLanguageModel implements LanguageModelV1 { +export class PerplexityLanguageModel implements LanguageModelV2 { readonly specificationVersion = 'v1'; readonly defaultObjectGenerationMode = 'json'; readonly supportsStructuredOutputs = true; @@ -58,10 +58,10 @@ export class PerplexityLanguageModel implements LanguageModelV1 { responseFormat, seed, providerMetadata, - }: Parameters[0]) { + }: Parameters[0]) { const type = mode.type; - const warnings: LanguageModelV1CallWarning[] = []; + const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { warnings.push({ @@ -144,8 +144,8 @@ export class PerplexityLanguageModel implements LanguageModelV1 { } async doGenerate( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const { @@ -208,8 +208,8 @@ export class PerplexityLanguageModel implements LanguageModelV1 { } async doStream( - options: Parameters[0], - ): Promise>> { + options: Parameters[0], + ): Promise>> { const { args, warnings } = this.getArgs(options); const body = { ...args, stream: true }; @@ -231,7 +231,7 @@ export class PerplexityLanguageModel implements LanguageModelV1 { const { messages: rawPrompt, ...rawSettings } = args; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; let usage: { promptTokens: number; completionTokens: number } = { promptTokens: Number.NaN, completionTokens: Number.NaN, @@ -266,7 +266,7 @@ export class PerplexityLanguageModel implements LanguageModelV1 { stream: response.pipeThrough( new TransformStream< ParseResult>, - LanguageModelV1StreamPart + LanguageModelV2StreamPart >({ transform(chunk, controller) { if (!chunk.success) { diff --git a/packages/perplexity/src/perplexity-provider.ts b/packages/perplexity/src/perplexity-provider.ts index 862b18738d88..fac4f63bfc49 100644 --- a/packages/perplexity/src/perplexity-provider.ts +++ b/packages/perplexity/src/perplexity-provider.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { FetchFunction, @@ -12,16 +12,16 @@ import { import { PerplexityLanguageModel } from './perplexity-language-model'; import { PerplexityLanguageModelId } from './perplexity-language-model-settings'; -export interface PerplexityProvider extends ProviderV1 { +export interface PerplexityProvider extends ProviderV2 { /** Creates an Perplexity chat model for text generation. */ - (modelId: PerplexityLanguageModelId): LanguageModelV1; + (modelId: PerplexityLanguageModelId): LanguageModelV2; /** Creates an Perplexity language model for text generation. */ - languageModel(modelId: PerplexityLanguageModelId): LanguageModelV1; + languageModel(modelId: PerplexityLanguageModelId): LanguageModelV2; } export interface PerplexityProviderSettings { @@ -78,6 +78,9 @@ export function createPerplexity( provider.textEmbeddingModel = (modelId: string) => { throw new NoSuchModelError({ modelId, modelType: 'textEmbeddingModel' }); }; + provider.imageModel = (modelId: string) => { + throw new NoSuchModelError({ modelId, modelType: 'imageModel' }); + }; return provider; } diff --git a/packages/provider/src/language-model/index.ts b/packages/provider/src/language-model/index.ts index e69fb44d6835..1e418c92a257 100644 --- a/packages/provider/src/language-model/index.ts +++ b/packages/provider/src/language-model/index.ts @@ -1 +1,2 @@ export * from './v1/index'; +export * from './v2/index'; diff --git a/packages/provider/src/language-model/v2/index.ts b/packages/provider/src/language-model/v2/index.ts new file mode 100644 index 000000000000..673a8d0f53cf --- /dev/null +++ b/packages/provider/src/language-model/v2/index.ts @@ -0,0 +1,12 @@ +export * from './language-model-v2-source'; +export * from './language-model-v2'; +export * from './language-model-v2-call-options'; +export * from './language-model-v2-call-warning'; +export * from './language-model-v2-finish-reason'; +export * from './language-model-v2-function-tool'; +export * from './language-model-v2-function-tool-call'; +export * from './language-model-v2-logprobs'; +export * from './language-model-v2-prompt'; +export * from './language-model-v2-provider-defined-tool'; +export * from './language-model-v2-provider-metadata'; +export * from './language-model-v2-tool-choice'; diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-options.ts b/packages/provider/src/language-model/v2/language-model-v2-call-options.ts new file mode 100644 index 000000000000..23aa48e9d2f0 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-call-options.ts @@ -0,0 +1,90 @@ +import { JSONSchema7 } from 'json-schema'; +import { LanguageModelV2CallSettings } from './language-model-v2-call-settings'; +import { LanguageModelV2FunctionTool } from './language-model-v2-function-tool'; +import { LanguageModelV2Prompt } from './language-model-v2-prompt'; +import { LanguageModelV2ProviderDefinedTool } from './language-model-v2-provider-defined-tool'; +import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-metadata'; +import { LanguageModelV2ToolChoice } from './language-model-v2-tool-choice'; + +export type LanguageModelV2CallOptions = LanguageModelV2CallSettings & { + /** +Whether the user provided the input as messages or as +a prompt. This can help guide non-chat models in the +expansion, bc different expansions can be needed for +chat/non-chat use cases. + */ + inputFormat: 'messages' | 'prompt'; + + /** +The mode affects the behavior of the language model. It is required to +support provider-independent streaming and generation of structured objects. +The model can take this information and e.g. configure json mode, the correct +low level grammar, etc. It can also be used to optimize the efficiency of the +streaming, e.g. tool-delta stream parts are only needed in the +object-tool mode. + +@deprecated mode will be removed in v2. +All necessary settings will be directly supported through the call settings, +in particular responseFormat, toolChoice, and tools. + */ + mode: + | { + // stream text & complete tool calls + type: 'regular'; + + /** +The tools that are available for the model. + */ + // TODO Spec V2: move to call settings + tools?: Array< + LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool + >; + + /** +Specifies how the tool should be selected. Defaults to 'auto'. + */ + // TODO Spec V2: move to call settings + toolChoice?: LanguageModelV2ToolChoice; + } + | { + // object generation with json mode enabled (streaming: text delta) + type: 'object-json'; + + /** + * JSON schema that the generated output should conform to. + */ + schema?: JSONSchema7; + + /** + * Name of output that should be generated. Used by some providers for additional LLM guidance. + */ + name?: string; + + /** + * Description of the output that should be generated. Used by some providers for additional LLM guidance. + */ + description?: string; + } + | { + // object generation with tool mode enabled (streaming: tool call deltas) + type: 'object-tool'; + tool: LanguageModelV2FunctionTool; + }; + + /** +A language mode prompt is a standardized prompt type. + +Note: This is **not** the user-facing prompt. The AI SDK methods will map the +user-facing prompt types such as chat or instruction prompts to this format. +That approach allows us to evolve the user facing prompts without breaking +the language model interface. + */ + prompt: LanguageModelV2Prompt; + + /** +Additional provider-specific metadata. +The metadata is passed through to the provider from the AI SDK and enables +provider-specific functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-settings.ts b/packages/provider/src/language-model/v2/language-model-v2-call-settings.ts new file mode 100644 index 000000000000..98a47b73ffdb --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-call-settings.ts @@ -0,0 +1,92 @@ +import { JSONSchema7 } from 'json-schema'; + +export type LanguageModelV2CallSettings = { + /** +Maximum number of tokens to generate. + */ + maxTokens?: number; + + /** +Temperature setting. + +It is recommended to set either `temperature` or `topP`, but not both. + */ + temperature?: number; + + /** +Stop sequences. +If set, the model will stop generating text when one of the stop sequences is generated. +Providers may have limits on the number of stop sequences. + */ + stopSequences?: string[]; + + /** +Nucleus sampling. + +It is recommended to set either `temperature` or `topP`, but not both. + */ + topP?: number; + + /** +Only sample from the top K options for each subsequent token. + +Used to remove "long tail" low probability responses. +Recommended for advanced use cases only. You usually only need to use temperature. + */ + topK?: number; + + /** +Presence penalty setting. It affects the likelihood of the model to +repeat information that is already in the prompt. + */ + presencePenalty?: number; + + /** +Frequency penalty setting. It affects the likelihood of the model +to repeatedly use the same words or phrases. + */ + frequencyPenalty?: number; + + /** +Response format. The output can either be text or JSON. Default is text. + +If JSON is selected, a schema can optionally be provided to guide the LLM. + */ + responseFormat?: + | { type: 'text' } + | { + type: 'json'; + + /** + * JSON schema that the generated output should conform to. + */ + schema?: JSONSchema7; + + /** + * Name of output that should be generated. Used by some providers for additional LLM guidance. + */ + name?: string; + + /** + * Description of the output that should be generated. Used by some providers for additional LLM guidance. + */ + description?: string; + }; + + /** +The seed (integer) to use for random sampling. If set and supported +by the model, calls will generate deterministic results. + */ + seed?: number; + + /** +Abort signal for cancelling the operation. + */ + abortSignal?: AbortSignal; + + /** +Additional HTTP headers to be sent with the request. +Only applicable for HTTP-based providers. + */ + headers?: Record; +}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts b/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts new file mode 100644 index 000000000000..a442549ae5c4 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts @@ -0,0 +1,23 @@ +import { LanguageModelV2CallSettings } from './language-model-v2-call-settings'; +import { LanguageModelV2FunctionTool } from './language-model-v2-function-tool'; +import { LanguageModelV2ProviderDefinedTool } from './language-model-v2-provider-defined-tool'; + +/** +Warning from the model provider for this call. The call will proceed, but e.g. +some settings might not be supported, which can lead to suboptimal results. + */ +export type LanguageModelV2CallWarning = + | { + type: 'unsupported-setting'; + setting: keyof LanguageModelV2CallSettings; + details?: string; + } + | { + type: 'unsupported-tool'; + tool: LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool; + details?: string; + } + | { + type: 'other'; + message: string; + }; diff --git a/packages/provider/src/language-model/v2/language-model-v2-finish-reason.ts b/packages/provider/src/language-model/v2/language-model-v2-finish-reason.ts new file mode 100644 index 000000000000..d4cc0494f5da --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-finish-reason.ts @@ -0,0 +1,20 @@ +/** +Reason why a language model finished generating a response. + +Can be one of the following: +- `stop`: model generated stop sequence +- `length`: model generated maximum number of tokens +- `content-filter`: content filter violation stopped the model +- `tool-calls`: model triggered tool calls +- `error`: model stopped because of an error +- `other`: model stopped for other reasons +- `unknown`: the model has not transmitted a finish reason + */ +export type LanguageModelV2FinishReason = + | 'stop' // model generated stop sequence + | 'length' // model generated maximum number of tokens + | 'content-filter' // content filter violation stopped the model + | 'tool-calls' // model triggered tool calls + | 'error' // model stopped because of an error + | 'other' // model stopped for other reasons + | 'unknown'; // the model has not transmitted a finish reason diff --git a/packages/provider/src/language-model/v2/language-model-v2-function-tool-call.ts b/packages/provider/src/language-model/v2/language-model-v2-function-tool-call.ts new file mode 100644 index 000000000000..016c816964b9 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-function-tool-call.ts @@ -0,0 +1,11 @@ +export type LanguageModelV2FunctionToolCall = { + toolCallType: 'function'; + toolCallId: string; + toolName: string; + + /** +Stringified JSON object with the tool call arguments. Must match the +parameters schema of the tool. + */ + args: string; +}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-function-tool.ts b/packages/provider/src/language-model/v2/language-model-v2-function-tool.ts new file mode 100644 index 000000000000..e77590b18a30 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-function-tool.ts @@ -0,0 +1,31 @@ +import { JSONSchema7 } from 'json-schema'; + +/** +A tool has a name, a description, and a set of parameters. + +Note: this is **not** the user-facing tool definition. The AI SDK methods will +map the user-facing tool definitions to this format. + */ +export type LanguageModelV2FunctionTool = { + /** +The type of the tool (always 'function'). + */ + type: 'function'; + + /** +The name of the tool. Unique within this model call. + */ + name: string; + + /** +A description of the tool. The language model uses this to understand the +tool's purpose and to provide better completion suggestions. + */ + description?: string; + + /** +The parameters that the tool expects. The language model uses this to +understand the tool's input requirements and to provide matching suggestions. + */ + parameters: JSONSchema7; +}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-logprobs.ts b/packages/provider/src/language-model/v2/language-model-v2-logprobs.ts new file mode 100644 index 000000000000..0920e299e134 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-logprobs.ts @@ -0,0 +1,11 @@ +/** +Log probabilities for each token and its top log probabilities. + */ +export type LanguageModelV2LogProbs = Array<{ + token: string; + logprob: number; + topLogprobs: Array<{ + token: string; + logprob: number; + }>; +}>; diff --git a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts new file mode 100644 index 000000000000..bdd145646412 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts @@ -0,0 +1,261 @@ +import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-metadata'; + +/** +A prompt is a list of messages. + +Note: Not all models and prompt formats support multi-modal inputs and +tool calls. The validation happens at runtime. + +Note: This is not a user-facing prompt. The AI SDK methods will map the +user-facing prompt types such as chat or instruction prompts to this format. + */ +export type LanguageModelV2Prompt = Array; + +export type LanguageModelV2Message = + // Note: there could be additional parts for each role in the future, + // e.g. when the assistant can return images or the user can share files + // such as PDFs. + ( + | { + role: 'system'; + content: string; + } + | { + role: 'user'; + content: Array< + | LanguageModelV2TextPart + | LanguageModelV2ImagePart + | LanguageModelV2FilePart + >; + } + | { + role: 'assistant'; + content: Array< + | LanguageModelV2TextPart + | LanguageModelV2FilePart + | LanguageModelV2ReasoningPart + | LanguageModelV2RedactedReasoningPart + | LanguageModelV2ToolCallPart + >; + } + | { + role: 'tool'; + content: Array; + } + ) & { + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; + }; + +/** +Text content part of a prompt. It contains a string of text. + */ +export interface LanguageModelV2TextPart { + type: 'text'; + + /** +The text content. + */ + text: string; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} + +/** +Reasoning content part of a prompt. It contains a string of reasoning text. + */ +export interface LanguageModelV2ReasoningPart { + type: 'reasoning'; + + /** +The reasoning text. + */ + text: string; + + /** +An optional signature for verifying that the reasoning originated from the model. + */ + signature?: string; + + /** +Additional provider-specific metadata. They are passed through +to the provider from the AI SDK and enable provider-specific +functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} + +/** +Redacted reasoning content part of a prompt. + */ +export interface LanguageModelV2RedactedReasoningPart { + type: 'redacted-reasoning'; + + /** +Redacted reasoning data. + */ + data: string; + + /** +Additional provider-specific metadata. They are passed through +to the provider from the AI SDK and enable provider-specific +functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} + +/** +Image content part of a prompt. It contains an image. + */ +// TODO merge into file part in language model v2 +export interface LanguageModelV2ImagePart { + type: 'image'; + + /** +Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL. + */ + image: Uint8Array | URL; + + /** +Optional mime type of the image. + */ + mimeType?: string; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} + +/** +File content part of a prompt. It contains a file. + */ +export interface LanguageModelV2FilePart { + type: 'file'; + + /** + * Optional filename of the file. + */ + filename?: string; + + /** +File data as base64 encoded string or as a URL. + */ + // Note: base64-encoded strings are used to prevent + // unnecessary conversions from string to buffer to string + data: string | URL; + + /** +Mime type of the file. + */ + mimeType: string; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} + +/** +Tool call content part of a prompt. It contains a tool call (usually generated by the AI model). + */ +export interface LanguageModelV2ToolCallPart { + type: 'tool-call'; + + /** +ID of the tool call. This ID is used to match the tool call with the tool result. + */ + toolCallId: string; + + /** +Name of the tool that is being called. + */ + toolName: string; + + /** +Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema. + */ + args: unknown; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} + +/** +Tool result content part of a prompt. It contains the result of the tool call with the matching ID. + */ +export interface LanguageModelV2ToolResultPart { + type: 'tool-result'; + + /** +ID of the tool call that this result is associated with. + */ + toolCallId: string; + + /** +Name of the tool that generated this result. + */ + toolName: string; + + /** +Result of the tool call. This is a JSON-serializable object. + */ + result: unknown; + + /** +Optional flag if the result is an error or an error message. + */ + isError?: boolean; + + /** +Tool results as an array of parts. This enables advanced tool results including images. +When this is used, the `result` field should be ignored (if the provider supports content). + */ + content?: Array< + | { + type: 'text'; + + /** +Text content. + */ + text: string; + } + | { + type: 'image'; + + /** +base-64 encoded image data + */ + data: string; + + /** +Mime type of the image. + */ + mimeType?: string; + } + >; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +} diff --git a/packages/provider/src/language-model/v2/language-model-v2-provider-defined-tool.ts b/packages/provider/src/language-model/v2/language-model-v2-provider-defined-tool.ts new file mode 100644 index 000000000000..a7d3fff54b84 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-provider-defined-tool.ts @@ -0,0 +1,24 @@ +/** +The configuration of a tool that is defined by the provider. + */ +export type LanguageModelV2ProviderDefinedTool = { + /** +The type of the tool (always 'provider-defined'). + */ + type: 'provider-defined'; + + /** +The ID of the tool. Should follow the format `.`. + */ + id: `${string}.${string}`; + + /** +The name of the tool. Unique within this model call. + */ + name: string; + + /** +The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool. + */ + args: Record; +}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts b/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts new file mode 100644 index 000000000000..47979b00d677 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts @@ -0,0 +1,26 @@ +import { JSONValue } from '../../json-value/json-value'; + +/** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + * + * This enables us to quickly ship provider-specific functionality + * without affecting the core AI SDK. + * + * The outer record is keyed by the provider name, and the inner + * record is keyed by the provider-specific metadata key. + * + * ```ts + * { + * "anthropic": { + * "cacheControl": { "type": "ephemeral" } + * } + * } + * ``` + */ +// TODO language model v2 separate provider metadata (output) from provider options (input) +export type LanguageModelV2ProviderMetadata = Record< + string, + Record +>; diff --git a/packages/provider/src/language-model/v2/language-model-v2-source.ts b/packages/provider/src/language-model/v2/language-model-v2-source.ts new file mode 100644 index 000000000000..89abbc53ef62 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-source.ts @@ -0,0 +1,31 @@ +import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-metadata'; + +/** + * A source that has been used as input to generate the response. + */ +export type LanguageModelV2Source = { + /** + * A URL source. This is return by web search RAG models. + */ + sourceType: 'url'; + + /** + * The ID of the source. + */ + id: string; + + /** + * The URL of the source. + */ + url: string; + + /** + * The title of the source. + */ + title?: string; + + /** + * Additional provider metadata for the source. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; +}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-tool-choice.ts b/packages/provider/src/language-model/v2/language-model-v2-tool-choice.ts new file mode 100644 index 000000000000..2c7d00c6392e --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-tool-choice.ts @@ -0,0 +1,5 @@ +export type LanguageModelV2ToolChoice = + | { type: 'auto' } // the tool selection is automatic (can be no tool) + | { type: 'none' } // no tool must be selected + | { type: 'required' } // one of the available tools must be selected + | { type: 'tool'; toolName: string }; // a specific tool must be selected: diff --git a/packages/provider/src/language-model/v2/language-model-v2.ts b/packages/provider/src/language-model/v2/language-model-v2.ts new file mode 100644 index 000000000000..c1b0808e2225 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2.ts @@ -0,0 +1,363 @@ +import { LanguageModelV2CallOptions } from './language-model-v2-call-options'; +import { LanguageModelV2CallWarning } from './language-model-v2-call-warning'; +import { LanguageModelV2FinishReason } from './language-model-v2-finish-reason'; +import { LanguageModelV2FunctionToolCall } from './language-model-v2-function-tool-call'; +import { LanguageModelV2LogProbs } from './language-model-v2-logprobs'; +import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-metadata'; +import { LanguageModelV2Source } from './language-model-v2-source'; + +/** +Specification for a language model that implements the language model interface version 1. + */ +export type LanguageModelV2 = { + /** +The language model must specify which language model interface +version it implements. This will allow us to evolve the language +model interface and retain backwards compatibility. The different +implementation versions can be handled as a discriminated union +on our side. + */ + readonly specificationVersion: 'v1'; + + /** +Name of the provider for logging purposes. + */ + readonly provider: string; + + /** +Provider-specific model ID for logging purposes. + */ + readonly modelId: string; + + /** +Default object generation mode that should be used with this model when +no mode is specified. Should be the mode with the best results for this +model. `undefined` can be returned if object generation is not supported. + +This is needed to generate the best objects possible w/o requiring the +user to explicitly specify the object generation mode. + */ + readonly defaultObjectGenerationMode: LanguageModelV2ObjectGenerationMode; + + /** +Flag whether this model supports image URLs. Default is `true`. + +When the flag is set to `false`, the AI SDK will download the image and +pass the image data to the model. + */ + // TODO generalize to file urls in language model v2 + readonly supportsImageUrls?: boolean; + + /** +Flag whether this model supports grammar-guided generation, +i.e. follows JSON schemas for object generation +when the response format is set to 'json' or +when the `object-json` mode is used. + +This means that the model guarantees that the generated JSON +will be a valid JSON object AND that the object will match the +JSON schema. + +Please note that `generateObject` and `streamObject` will work +regardless of this flag, but might send different prompts and +use further optimizations if this flag is set to `true`. + +Defaults to `false`. +*/ + // TODO v2: rename to supportsGrammarGuidedGeneration? + readonly supportsStructuredOutputs?: boolean; + + /** +Checks if the model supports the given URL for file parts natively. +If the model does not support the URL, +the AI SDK will download the file and pass the file data to the model. + +When undefined, the AI SDK will download the file. + */ + supportsUrl?(url: URL): boolean; + + /** +Generates a language model output (non-streaming). + +Naming: "do" prefix to prevent accidental direct usage of the method +by the user. + */ + doGenerate(options: LanguageModelV2CallOptions): PromiseLike<{ + // TODO v2: switch to a composite content array with text, tool calls, reasoning, files + + /** +Text that the model has generated. +Can be undefined if the model did not generate any text. + */ + text?: string; + + /** +Reasoning that the model has generated. +Can be undefined if the model does not support reasoning. + */ + // TODO v2: remove string option + reasoning?: + | string + | Array< + | { + type: 'text'; + text: string; + + /** +An optional signature for verifying that the reasoning originated from the model. + */ + signature?: string; + } + | { + type: 'redacted'; + data: string; + } + >; + + /** +Generated files as base64 encoded strings or binary data. +The files should be returned without any unnecessary conversion. +If the API returns base64 encoded strings, the files should be returned +as base64 encoded strings. If the API returns binary data, the files should +be returned as binary data. + */ + files?: Array<{ + data: string | Uint8Array; + mimeType: string; + }>; + + /** +Tool calls that the model has generated. +Can be undefined if the model did not generate any tool calls. + */ + toolCalls?: Array; + + /** +Finish reason. + */ + finishReason: LanguageModelV2FinishReason; + + /** + Usage information. + */ + usage: { + promptTokens: number; + completionTokens: number; + }; + + /** +Raw prompt and setting information for observability provider integration. + */ + // TODO v2: remove in v2 (now there is request) + rawCall: { + /** +Raw prompt after expansion and conversion to the format that the +provider uses to send the information to their API. + */ + rawPrompt: unknown; + + /** +Raw settings that are used for the API call. Includes provider-specific +settings. + */ + rawSettings: Record; + }; + + /** +Optional response information for telemetry and debugging purposes. + */ + // TODO rename to `response` in v2 + rawResponse?: { + /** +Response headers. + */ + headers?: Record; + + /** +Response body. + */ + body?: unknown; + }; + + /** +Optional request information for telemetry and debugging purposes. + */ + request?: { + /** +Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified). +Non-HTTP(s) providers should not set this. + */ + body?: string; + }; + + /** +Optional response information for telemetry and debugging purposes. + */ + response?: { + /** +ID for the generated response, if the provider sends one. + */ + id?: string; + + /** +Timestamp for the start of the generated response, if the provider sends one. + */ + timestamp?: Date; + + /** +The ID of the response model that was used to generate the response, if the provider sends one. + */ + modelId?: string; + }; + + warnings?: LanguageModelV2CallWarning[]; + + /** +Additional provider-specific metadata. They are passed through +from the provider to the AI SDK and enable provider-specific +results that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV2ProviderMetadata; + + /** +Sources that have been used as input to generate the response. + */ + sources?: LanguageModelV2Source[]; + + /** +Logprobs for the completion. +`undefined` if the mode does not support logprobs or if was not enabled + +@deprecated will be changed into a provider-specific extension in v2 + */ + // TODO change in language model v2 + logprobs?: LanguageModelV2LogProbs; + }>; + + /** +Generates a language model output (streaming). + +Naming: "do" prefix to prevent accidental direct usage of the method +by the user. + * +@return A stream of higher-level language model output parts. + */ + doStream(options: LanguageModelV2CallOptions): PromiseLike<{ + stream: ReadableStream; + + /** +Raw prompt and setting information for observability provider integration. + */ + // TODO remove in v2 (there is now request) + rawCall: { + /** +Raw prompt after expansion and conversion to the format that the +provider uses to send the information to their API. + */ + rawPrompt: unknown; + + /** +Raw settings that are used for the API call. Includes provider-specific +settings. + */ + rawSettings: Record; + }; + + /** +Optional raw response data. + */ + // TODO rename to response in v2 + rawResponse?: { + /** +Response headers. + */ + headers?: Record; + }; + + /** +Optional request information for telemetry and debugging purposes. + */ + request?: { + /** +Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified). +Non-HTTP(s) providers should not set this. + */ + body?: string; + }; + + /** +Warnings for the call, e.g. unsupported settings. + */ + warnings?: Array; + }>; +}; + +export type LanguageModelV2StreamPart = + // Basic text deltas: + | { type: 'text-delta'; textDelta: string } + + // Reasoning text deltas: + | { type: 'reasoning'; textDelta: string } + | { type: 'reasoning-signature'; signature: string } + | { type: 'redacted-reasoning'; data: string } + + // Sources: + | { type: 'source'; source: LanguageModelV2Source } + + // Files: + | { + type: 'file'; + mimeType: string; + + /** +Generated file data as base64 encoded strings or binary data. +The file data should be returned without any unnecessary conversion. +If the API returns base64 encoded strings, the file data should be returned +as base64 encoded strings. If the API returns binary data, the file data should +be returned as binary data. + */ + data: string | Uint8Array; + } + + // Complete tool calls: + | ({ type: 'tool-call' } & LanguageModelV2FunctionToolCall) + + // Tool call deltas are only needed for object generation modes. + // The tool call deltas must be partial JSON strings. + | { + type: 'tool-call-delta'; + toolCallType: 'function'; + toolCallId: string; + toolName: string; + argsTextDelta: string; + } + + // metadata for the response. + // separate stream part so it can be sent once it is available. + | { + type: 'response-metadata'; + id?: string; + timestamp?: Date; + modelId?: string; + } + + // the usage stats, finish reason and logprobs should be the last part of the + // stream: + | { + type: 'finish'; + finishReason: LanguageModelV2FinishReason; + providerMetadata?: LanguageModelV2ProviderMetadata; + usage: { promptTokens: number; completionTokens: number }; + + // @deprecated - will be changed into a provider-specific extension in v2 + logprobs?: LanguageModelV2LogProbs; + } + + // error parts are streamed, allowing for multiple errors + | { type: 'error'; error: unknown }; + +/** +The object generation modes available for use with a model. `undefined` +represents no support for object generation. + */ +export type LanguageModelV2ObjectGenerationMode = 'json' | 'tool' | undefined; diff --git a/packages/provider/src/provider/index.ts b/packages/provider/src/provider/index.ts index e69fb44d6835..1e418c92a257 100644 --- a/packages/provider/src/provider/index.ts +++ b/packages/provider/src/provider/index.ts @@ -1 +1,2 @@ export * from './v1/index'; +export * from './v2/index'; diff --git a/packages/provider/src/provider/v2/index.ts b/packages/provider/src/provider/v2/index.ts new file mode 100644 index 000000000000..08d9d6c4bd75 --- /dev/null +++ b/packages/provider/src/provider/v2/index.ts @@ -0,0 +1 @@ +export type { ProviderV2 } from './provider-v2'; diff --git a/packages/provider/src/provider/v2/provider-v2.ts b/packages/provider/src/provider/v2/provider-v2.ts new file mode 100644 index 000000000000..fb33607d9457 --- /dev/null +++ b/packages/provider/src/provider/v2/provider-v2.ts @@ -0,0 +1,42 @@ +import { EmbeddingModelV1 } from '../../embedding-model/v1/embedding-model-v1'; +import { ImageModelV1 } from '../../image-model/v1/image-model-v1'; +import { LanguageModelV2 } from '../../language-model/v2/language-model-v2'; + +/** + * Provider for language, text embedding, and image generation models. + */ +export interface ProviderV2 { + /** +Returns the language model with the given id. +The model id is then passed to the provider function to get the model. + +@param {string} modelId - The id of the model to return. + +@returns {LanguageModel} The language model associated with the id + +@throws {NoSuchModelError} If no such model exists. + */ + languageModel(modelId: string): LanguageModelV2; + + /** +Returns the text embedding model with the given id. +The model id is then passed to the provider function to get the model. + +@param {string} modelId - The id of the model to return. + +@returns {LanguageModel} The language model associated with the id + +@throws {NoSuchModelError} If no such model exists. + */ + textEmbeddingModel(modelId: string): EmbeddingModelV1; + + /** +Returns the image model with the given id. +The model id is then passed to the provider function to get the model. + +@param {string} modelId - The id of the model to return. + +@returns {ImageModel} The image model associated with the id +*/ + readonly imageModel: (modelId: string) => ImageModelV1; +} diff --git a/packages/replicate/src/replicate-provider.ts b/packages/replicate/src/replicate-provider.ts index 5bcc539c9ed8..3271dd8a4161 100644 --- a/packages/replicate/src/replicate-provider.ts +++ b/packages/replicate/src/replicate-provider.ts @@ -1,4 +1,4 @@ -import { NoSuchModelError, ProviderV1 } from '@ai-sdk/provider'; +import { NoSuchModelError, ProviderV2 } from '@ai-sdk/provider'; import type { FetchFunction } from '@ai-sdk/provider-utils'; import { loadApiKey } from '@ai-sdk/provider-utils'; import { ReplicateImageModel } from './replicate-image-model'; @@ -32,7 +32,7 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface ReplicateProvider extends ProviderV1 { +export interface ReplicateProvider extends ProviderV2 { /** * Creates a Replicate image generation model. */ diff --git a/packages/togetherai/src/togetherai-provider.test.ts b/packages/togetherai/src/togetherai-provider.test.ts index 576669d97187..ec45a3ea4bb7 100644 --- a/packages/togetherai/src/togetherai-provider.test.ts +++ b/packages/togetherai/src/togetherai-provider.test.ts @@ -3,7 +3,7 @@ import { OpenAICompatibleCompletionLanguageModel, OpenAICompatibleEmbeddingModel, } from '@ai-sdk/openai-compatible'; -import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider'; +import { LanguageModelV2, EmbeddingModelV1 } from '@ai-sdk/provider'; import { loadApiKey } from '@ai-sdk/provider-utils'; import { TogetherAIImageModel } from './togetherai-image-model'; import { createTogetherAI } from './togetherai-provider'; @@ -29,15 +29,15 @@ vi.mock('./togetherai-image-model', () => ({ })); describe('TogetherAIProvider', () => { - let mockLanguageModel: LanguageModelV1; + let mockLanguageModel: LanguageModelV2; let mockEmbeddingModel: EmbeddingModelV1; let createOpenAICompatibleMock: Mock; beforeEach(() => { // Mock implementations of models mockLanguageModel = { - // Add any required methods for LanguageModelV1 - } as LanguageModelV1; + // Add any required methods for LanguageModelV2 + } as LanguageModelV2; mockEmbeddingModel = { // Add any required methods for EmbeddingModelV1 } as EmbeddingModelV1; diff --git a/packages/togetherai/src/togetherai-provider.ts b/packages/togetherai/src/togetherai-provider.ts index a81071fcdd9c..26fb018f13d6 100644 --- a/packages/togetherai/src/togetherai-provider.ts +++ b/packages/togetherai/src/togetherai-provider.ts @@ -1,7 +1,7 @@ import { - LanguageModelV1, + LanguageModelV2, EmbeddingModelV1, - ProviderV1, + ProviderV2, ImageModelV1, } from '@ai-sdk/provider'; import { @@ -52,14 +52,14 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: FetchFunction; } -export interface TogetherAIProvider extends ProviderV1 { +export interface TogetherAIProvider extends ProviderV2 { /** Creates a model for text generation. */ ( modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a chat model for text generation. @@ -67,7 +67,7 @@ Creates a chat model for text generation. chatModel( modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a chat model for text generation. @@ -75,7 +75,7 @@ Creates a chat model for text generation. languageModel( modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a completion model for text generation. @@ -83,7 +83,7 @@ Creates a completion model for text generation. completionModel( modelId: TogetherAICompletionModelId, settings?: TogetherAICompletionSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates a text embedding model for text generation. diff --git a/packages/ui-utils/src/data-stream-parts.ts b/packages/ui-utils/src/data-stream-parts.ts index 37b8a032beaf..4fe395fe84c8 100644 --- a/packages/ui-utils/src/data-stream-parts.ts +++ b/packages/ui-utils/src/data-stream-parts.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1FinishReason, - LanguageModelV1Source, + LanguageModelV2FinishReason, + LanguageModelV2Source, } from '@ai-sdk/provider'; import { ToolCall, ToolResult } from '@ai-sdk/provider-utils'; import { JSONValue } from './types'; @@ -191,7 +191,7 @@ const finishMessageStreamPart: DataStreamPart< 'd', 'finish_message', { - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; // TODO v5 remove usage from finish event (only on step-finish) usage?: { promptTokens: number; @@ -214,13 +214,13 @@ const finishMessageStreamPart: DataStreamPart< } const result: { - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; usage?: { promptTokens: number; completionTokens: number; }; } = { - finishReason: value.finishReason as LanguageModelV1FinishReason, + finishReason: value.finishReason as LanguageModelV2FinishReason, }; if ( @@ -254,7 +254,7 @@ const finishStepStreamPart: DataStreamPart< 'finish_step', { isContinued: boolean; - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; usage?: { promptTokens: number; completionTokens: number; @@ -277,13 +277,13 @@ const finishStepStreamPart: DataStreamPart< const result: { isContinued: boolean; - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; usage?: { promptTokens: number; completionTokens: number; }; } = { - finishReason: value.finishReason as LanguageModelV1FinishReason, + finishReason: value.finishReason as LanguageModelV2FinishReason, isContinued: false, }; @@ -358,7 +358,7 @@ const reasoningStreamPart: DataStreamPart<'g', 'reasoning', string> = { }, }; -const sourcePart: DataStreamPart<'h', 'source', LanguageModelV1Source> = { +const sourcePart: DataStreamPart<'h', 'source', LanguageModelV2Source> = { code: 'h', name: 'source', parse: (value: JSONValue) => { @@ -368,7 +368,7 @@ const sourcePart: DataStreamPart<'h', 'source', LanguageModelV1Source> = { return { type: 'source', - value: value as LanguageModelV1Source, + value: value as LanguageModelV2Source, }; }, }; diff --git a/packages/ui-utils/src/process-chat-response.test.ts b/packages/ui-utils/src/process-chat-response.test.ts index 1c191828d62f..f7805ee3e343 100644 --- a/packages/ui-utils/src/process-chat-response.test.ts +++ b/packages/ui-utils/src/process-chat-response.test.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; import { describe, expect, it, vi } from 'vitest'; import { DataStreamString, formatDataStreamPart } from './data-stream-parts'; @@ -30,12 +30,12 @@ const update = (options: { let finishCalls: Array<{ message: Message | undefined; - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; usage: LanguageModelUsage; }> = []; const onFinish = (options: { message: Message | undefined; - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; usage: LanguageModelUsage; }) => { // clone to preserve the original object diff --git a/packages/ui-utils/src/process-chat-response.ts b/packages/ui-utils/src/process-chat-response.ts index fd0196145ad5..4c74f59ffd78 100644 --- a/packages/ui-utils/src/process-chat-response.ts +++ b/packages/ui-utils/src/process-chat-response.ts @@ -1,4 +1,4 @@ -import { LanguageModelV1FinishReason } from '@ai-sdk/provider'; +import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; import { generateId as generateIdFunction } from '@ai-sdk/provider-utils'; import { calculateLanguageModelUsage, @@ -34,7 +34,7 @@ export async function processChatResponse({ onToolCall?: UseChatOptions['onToolCall']; onFinish?: (options: { message: UIMessage | undefined; - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; usage: LanguageModelUsage; }) => void; generateId?: () => string; @@ -104,7 +104,7 @@ export async function processChatResponse({ promptTokens: NaN, totalTokens: NaN, }; - let finishReason: LanguageModelV1FinishReason = 'unknown'; + let finishReason: LanguageModelV2FinishReason = 'unknown'; function execUpdate() { // make a copy of the data array to ensure UI is updated (SWR) diff --git a/packages/ui-utils/src/types.ts b/packages/ui-utils/src/types.ts index 2c08bc41a0bb..bfc67a517fb8 100644 --- a/packages/ui-utils/src/types.ts +++ b/packages/ui-utils/src/types.ts @@ -1,6 +1,6 @@ import { - LanguageModelV1FinishReason, - LanguageModelV1Source, + LanguageModelV2FinishReason, + LanguageModelV2Source, } from '@ai-sdk/provider'; import { FetchFunction, ToolCall, ToolResult } from '@ai-sdk/provider-utils'; import { LanguageModelUsage } from './duplicated/usage'; @@ -183,7 +183,7 @@ export type SourceUIPart = { /** * The source. */ - source: LanguageModelV1Source; + source: LanguageModelV2Source; }; /** @@ -328,7 +328,7 @@ either synchronously or asynchronously. message: Message, options: { usage: LanguageModelUsage; - finishReason: LanguageModelV1FinishReason; + finishReason: LanguageModelV2FinishReason; }, ) => void; diff --git a/packages/xai/src/xai-provider.ts b/packages/xai/src/xai-provider.ts index 2984d31e685e..5875e9374219 100644 --- a/packages/xai/src/xai-provider.ts +++ b/packages/xai/src/xai-provider.ts @@ -1,8 +1,8 @@ import { ImageModelV1, - LanguageModelV1, + LanguageModelV2, NoSuchModelError, - ProviderV1, + ProviderV2, } from '@ai-sdk/provider'; import { OpenAICompatibleChatLanguageModel, @@ -28,11 +28,11 @@ const xaiErrorStructure: ProviderErrorStructure = { errorToMessage: data => data.error, }; -export interface XaiProvider extends ProviderV1 { +export interface XaiProvider extends ProviderV2 { /** Creates an Xai chat model for text generation. */ - (modelId: XaiChatModelId, settings?: XaiChatSettings): LanguageModelV1; + (modelId: XaiChatModelId, settings?: XaiChatSettings): LanguageModelV2; /** Creates an Xai language model for text generation. @@ -40,7 +40,7 @@ Creates an Xai language model for text generation. languageModel( modelId: XaiChatModelId, settings?: XaiChatSettings, - ): LanguageModelV1; + ): LanguageModelV2; /** Creates an Xai chat model for text generation. @@ -48,7 +48,7 @@ Creates an Xai chat model for text generation. chat: ( modelId: XaiChatModelId, settings?: XaiChatSettings, - ) => LanguageModelV1; + ) => LanguageModelV2; /** Creates an Xai image model for image generation. From b7eae2d42e7c40c848409f8fe2397101ed8681d6 Mon Sep 17 00:00:00 2001 From: BramMeerten Date: Fri, 4 Apr 2025 16:15:00 +0200 Subject: [PATCH 0022/1307] feat (core): Add finishReason field to NoObjectGeneratedError (#5541) Co-authored-by: Bram Meerten --- .changeset/flat-plums-bake.md | 5 +++++ .../ai-no-object-generated-error.mdx | 2 ++ .../ai/core/generate-object/generate-object.test.ts | 1 + packages/ai/core/generate-object/generate-object.ts | 4 ++++ packages/ai/core/generate-object/output-strategy.ts | 8 +++++++- .../ai/core/generate-object/stream-object.test.ts | 6 ++++++ packages/ai/core/generate-object/stream-object.ts | 1 + packages/ai/core/generate-text/generate-text.ts | 6 +++++- packages/ai/core/generate-text/output.test.ts | 4 ++++ packages/ai/core/generate-text/output.ts | 5 +++++ packages/ai/errors/no-object-generated-error.ts | 13 +++++++++++++ 11 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 .changeset/flat-plums-bake.md diff --git a/.changeset/flat-plums-bake.md b/.changeset/flat-plums-bake.md new file mode 100644 index 000000000000..d7092eca4757 --- /dev/null +++ b/.changeset/flat-plums-bake.md @@ -0,0 +1,5 @@ +--- +'ai': minor +--- + +feat (core): Add finishReason field to NoObjectGeneratedError diff --git a/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx b/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx index bac1c98474ff..ef98315a0d31 100644 --- a/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +++ b/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx @@ -18,6 +18,7 @@ It can arise due to the following reasons: - `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the object generation mode. - `response`: Metadata about the language model response, including response id, timestamp, and model. - `usage`: Request token usage. +- `finishReason`: Request finish reason. For example 'length' if model generated maximum number of tokens, this could result in a JSON parsing error. - `cause`: The cause of the error (e.g. a JSON parsing error). You can use this for more detailed error handling. ## Checking for this Error @@ -36,6 +37,7 @@ try { console.log('Text:', error.text); console.log('Response:', error.response); console.log('Usage:', error.usage); + console.log('Finish Reason:', error.finishReason); } } ``` diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index 305485391df4..c341cb36f3e1 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -801,6 +801,7 @@ describe('output = "object"', () => { promptTokens: 10, totalTokens: 30, }, + finishReason: 'stop', }); } diff --git a/packages/ai/core/generate-object/generate-object.ts b/packages/ai/core/generate-object/generate-object.ts index 6284a12d8262..1a2e279d3726 100644 --- a/packages/ai/core/generate-object/generate-object.ts +++ b/packages/ai/core/generate-object/generate-object.ts @@ -556,6 +556,7 @@ export async function generateObject({ 'No object generated: the model did not return a response.', response: responseData, usage: calculateLanguageModelUsage(result.usage), + finishReason: result.finishReason, }); } @@ -681,6 +682,7 @@ export async function generateObject({ message: 'No object generated: the tool was not called.', response: responseData, usage: calculateLanguageModelUsage(result.usage), + finishReason: result.finishReason, }); } @@ -751,6 +753,7 @@ export async function generateObject({ text: result, response, usage: calculateLanguageModelUsage(usage), + finishReason: finishReason, }); } @@ -770,6 +773,7 @@ export async function generateObject({ text: result, response, usage: calculateLanguageModelUsage(usage), + finishReason: finishReason, }); } diff --git a/packages/ai/core/generate-object/output-strategy.ts b/packages/ai/core/generate-object/output-strategy.ts index 07c9d70ecd38..6da624bd331c 100644 --- a/packages/ai/core/generate-object/output-strategy.ts +++ b/packages/ai/core/generate-object/output-strategy.ts @@ -16,7 +16,11 @@ import { createAsyncIterableStream, } from '../util/async-iterable-stream'; import { ObjectStreamPart } from './stream-object-result'; -import { LanguageModelResponseMetadata, LanguageModelUsage } from '../types'; +import { + FinishReason, + LanguageModelResponseMetadata, + LanguageModelUsage, +} from '../types'; export interface OutputStrategy { readonly type: 'object' | 'array' | 'enum' | 'no-schema'; @@ -64,6 +68,7 @@ const noSchemaOutputStrategy: OutputStrategy = { text: string; response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ): ValidationResult { return value === undefined @@ -74,6 +79,7 @@ const noSchemaOutputStrategy: OutputStrategy = { text: context.text, response: context.response, usage: context.usage, + finishReason: context.finishReason, }), } : { success: true, value }; diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index 68a43f17690e..7f3c7129a24a 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -1311,6 +1311,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1354,6 +1355,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1403,6 +1405,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1446,6 +1449,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1488,6 +1492,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1530,6 +1535,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index 8113adc3ce78..e166d21e9c10 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -898,6 +898,7 @@ class DefaultStreamObjectResult text: accumulatedText, response, usage, + finishReason: finishReason, }); self.objectPromise.reject(error); } diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index 58b82664b47b..3f4293114b3f 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -576,7 +576,11 @@ A function that attempts to repair a tool call that failed to parse. return output.parseOutput( { text }, - { response: currentModelResponse.response, usage }, + { + response: currentModelResponse.response, + usage, + finishReason: currentModelResponse.finishReason, + }, ); }, toolCalls: currentToolCalls, diff --git a/packages/ai/core/generate-text/output.test.ts b/packages/ai/core/generate-text/output.test.ts index adde6bb5b610..d0a839036008 100644 --- a/packages/ai/core/generate-text/output.test.ts +++ b/packages/ai/core/generate-text/output.test.ts @@ -2,6 +2,7 @@ import { fail } from 'assert'; import { z } from 'zod'; import { verifyNoObjectGeneratedError } from '../../errors/no-object-generated-error'; import { object } from './output'; +import { FinishReason } from '../types'; const context = { response: { @@ -14,6 +15,7 @@ const context = { completionTokens: 2, totalTokens: 3, }, + finishReason: 'length' as FinishReason, }; describe('Output.object', () => { @@ -37,6 +39,7 @@ describe('Output.object', () => { message: 'No object generated: could not parse the response.', response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } }); @@ -50,6 +53,7 @@ describe('Output.object', () => { message: 'No object generated: response did not match schema.', response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } }); diff --git a/packages/ai/core/generate-text/output.ts b/packages/ai/core/generate-text/output.ts index 03fa79363d1a..1ca8ab5e4634 100644 --- a/packages/ai/core/generate-text/output.ts +++ b/packages/ai/core/generate-text/output.ts @@ -9,6 +9,7 @@ import { z } from 'zod'; import { NoObjectGeneratedError } from '../../errors'; import { injectJsonInstruction } from '../generate-object/inject-json-instruction'; import { + FinishReason, LanguageModel, LanguageModelV2CallOptions, } from '../types/language-model'; @@ -33,6 +34,7 @@ export interface Output { context: { response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ): OUTPUT; } @@ -108,6 +110,7 @@ export const object = ({ context: { response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ) { const parseResult = safeParseJSON({ text }); @@ -119,6 +122,7 @@ export const object = ({ text, response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } @@ -134,6 +138,7 @@ export const object = ({ text, response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } diff --git a/packages/ai/errors/no-object-generated-error.ts b/packages/ai/errors/no-object-generated-error.ts index 77ec7f5f1d52..06b7c05846c9 100644 --- a/packages/ai/errors/no-object-generated-error.ts +++ b/packages/ai/errors/no-object-generated-error.ts @@ -1,6 +1,7 @@ import { AISDKError } from '@ai-sdk/provider'; import { LanguageModelResponseMetadata } from '../core/types/language-model-response-metadata'; import { LanguageModelUsage } from '../core/types/usage'; +import { FinishReason } from '../core'; const name = 'AI_NoObjectGeneratedError'; const marker = `vercel.ai.error.${name}`; @@ -35,24 +36,32 @@ export class NoObjectGeneratedError extends AISDKError { */ readonly usage: LanguageModelUsage | undefined; + /** + Reason why the model finished generating a response. + */ + readonly finishReason: FinishReason | undefined; + constructor({ message = 'No object generated.', cause, text, response, usage, + finishReason, }: { message?: string; cause?: Error; text?: string; response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }) { super({ name, message, cause }); this.text = text; this.response = response; this.usage = usage; + this.finishReason = finishReason; } static isInstance(error: unknown): error is NoObjectGeneratedError { @@ -66,6 +75,7 @@ export function verifyNoObjectGeneratedError( message: string; response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ) { expect(NoObjectGeneratedError.isInstance(error)).toBeTruthy(); @@ -73,4 +83,7 @@ export function verifyNoObjectGeneratedError( expect(noObjectGeneratedError.message).toStrictEqual(expected.message); expect(noObjectGeneratedError.response).toStrictEqual(expected.response); expect(noObjectGeneratedError.usage).toStrictEqual(expected.usage); + expect(noObjectGeneratedError.finishReason).toStrictEqual( + expected.finishReason, + ); } From 80ff9da74c7ba15e93914acfef5db124bf7e8554 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 4 Apr 2025 17:51:02 +0200 Subject: [PATCH 0023/1307] chore: move language model middleware to provider (#5549) --- .../ai-core/src/e2e/feature-test-suite.ts | 33 +++++++++-------- .../openai-log-metadata-middleware.ts | 3 +- .../src/middleware/your-cache-middleware.ts | 2 +- .../middleware/your-guardrail-middleware.ts | 2 +- .../src/middleware/your-log-middleware.ts | 5 ++- .../src/middleware/your-rag-middleware.ts | 2 +- .../middleware/default-settings-middleware.ts | 4 +-- .../extract-reasoning-middleware.ts | 8 +++-- packages/ai/core/middleware/index.ts | 1 - .../simulate-streaming-middleware.ts | 8 +++-- .../middleware/wrap-language-model.test.ts | 36 +++++++++---------- .../ai/core/middleware/wrap-language-model.ts | 9 +++-- .../ai/core/test/mock-language-model-v1.ts | 2 +- .../src/bedrock-chat-language-model.ts | 2 +- .../src/anthropic-messages-language-model.ts | 2 +- .../cohere/src/cohere-chat-language-model.ts | 2 +- .../google-generative-ai-language-model.ts | 2 +- packages/groq/src/groq-chat-language-model.ts | 2 +- .../src/mistral-chat-language-model.ts | 2 +- .../openai-compatible-chat-language-model.ts | 2 +- ...ai-compatible-completion-language-model.ts | 2 +- .../src/openai-compatible-image-model.ts | 6 ++-- .../openai/src/openai-chat-language-model.ts | 2 +- .../src/openai-completion-language-model.ts | 2 +- .../openai-responses-language-model.ts | 2 +- .../src/perplexity-language-model.ts | 2 +- packages/provider/src/index.ts | 1 + .../src/language-model-middleware/index.ts | 1 + .../src/language-model-middleware/v2/index.ts | 1 + .../v2}/language-model-v2-middleware.ts | 7 ++-- .../language-model/v2/language-model-v2.ts | 4 +-- 31 files changed, 87 insertions(+), 72 deletions(-) create mode 100644 packages/provider/src/language-model-middleware/index.ts create mode 100644 packages/provider/src/language-model-middleware/v2/index.ts rename packages/{ai/core/middleware => provider/src/language-model-middleware/v2}/language-model-v2-middleware.ts (89%) diff --git a/examples/ai-core/src/e2e/feature-test-suite.ts b/examples/ai-core/src/e2e/feature-test-suite.ts index 5e05434c2466..b9b768fa6917 100644 --- a/examples/ai-core/src/e2e/feature-test-suite.ts +++ b/examples/ai-core/src/e2e/feature-test-suite.ts @@ -1,23 +1,22 @@ -import { z } from 'zod'; +import type { GoogleGenerativeAIProviderMetadata } from '@ai-sdk/google'; +import type { + EmbeddingModelV1, + ImageModelV1, + LanguageModelV2, +} from '@ai-sdk/provider'; import { + APICallError, + embed, + embedMany, experimental_generateImage as generateImage, - generateText, generateObject, - streamText, + generateText, streamObject, - embed, - embedMany, - APICallError, - ToolExecutionError, + streamText, } from 'ai'; import fs from 'fs'; import { describe, expect, it, vi } from 'vitest'; -import type { - EmbeddingModelV1, - ImageModelV1, - LanguageModelV1, -} from '@ai-sdk/provider'; -import type { GoogleGenerativeAIProviderMetadata } from '@ai-sdk/google'; +import { z } from 'zod'; export type Capability = | 'audioInput' @@ -50,9 +49,9 @@ export const defaultChatModelCapabilities: ModelCapabilities = [ ]; export const createLanguageModelWithCapabilities = ( - model: LanguageModelV1, + model: LanguageModelV2, capabilities: ModelCapabilities = defaultChatModelCapabilities, -): ModelWithCapabilities => ({ +): ModelWithCapabilities => ({ model, capabilities, }); @@ -74,8 +73,8 @@ export const createImageModelWithCapabilities = ( }); export interface ModelVariants { - invalidModel?: LanguageModelV1; - languageModels?: ModelWithCapabilities[]; + invalidModel?: LanguageModelV2; + languageModels?: ModelWithCapabilities[]; embeddingModels?: ModelWithCapabilities>[]; invalidImageModel?: ImageModelV1; imageModels?: ModelWithCapabilities[]; diff --git a/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts b/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts index e353193989e7..6507c2a12ce6 100644 --- a/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts +++ b/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts @@ -1,5 +1,6 @@ import { openai } from '@ai-sdk/openai'; -import { generateText, LanguageModelV2Middleware, wrapLanguageModel } from 'ai'; +import { LanguageModelV2Middleware } from '@ai-sdk/provider'; +import { generateText, wrapLanguageModel } from 'ai'; import 'dotenv/config'; const logProviderMetadataMiddleware: LanguageModelV2Middleware = { diff --git a/examples/ai-core/src/middleware/your-cache-middleware.ts b/examples/ai-core/src/middleware/your-cache-middleware.ts index 17a43c9bce02..1692998fe337 100644 --- a/examples/ai-core/src/middleware/your-cache-middleware.ts +++ b/examples/ai-core/src/middleware/your-cache-middleware.ts @@ -1,4 +1,4 @@ -import type { LanguageModelV2Middleware } from 'ai'; +import { LanguageModelV2Middleware } from '@ai-sdk/provider'; const cache = new Map(); diff --git a/examples/ai-core/src/middleware/your-guardrail-middleware.ts b/examples/ai-core/src/middleware/your-guardrail-middleware.ts index 191a6929683a..22ba857387c6 100644 --- a/examples/ai-core/src/middleware/your-guardrail-middleware.ts +++ b/examples/ai-core/src/middleware/your-guardrail-middleware.ts @@ -1,4 +1,4 @@ -import type { LanguageModelV2Middleware } from 'ai'; +import { LanguageModelV2Middleware } from '@ai-sdk/provider'; export const yourGuardrailMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate }) => { diff --git a/examples/ai-core/src/middleware/your-log-middleware.ts b/examples/ai-core/src/middleware/your-log-middleware.ts index 2fe934ea3f67..8da502e8829d 100644 --- a/examples/ai-core/src/middleware/your-log-middleware.ts +++ b/examples/ai-core/src/middleware/your-log-middleware.ts @@ -1,4 +1,7 @@ -import type { LanguageModelV2Middleware, LanguageModelV2StreamPart } from 'ai'; +import { + LanguageModelV2Middleware, + LanguageModelV2StreamPart, +} from '@ai-sdk/provider'; export const yourLogMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { diff --git a/examples/ai-core/src/middleware/your-rag-middleware.ts b/examples/ai-core/src/middleware/your-rag-middleware.ts index 63be3068a5d7..52378e75e5fa 100644 --- a/examples/ai-core/src/middleware/your-rag-middleware.ts +++ b/examples/ai-core/src/middleware/your-rag-middleware.ts @@ -1,6 +1,6 @@ +import { LanguageModelV2Middleware } from '@ai-sdk/provider'; import { addToLastUserMessage } from './add-to-last-user-message'; import { getLastUserMessageText } from './get-last-user-message-text'; -import type { LanguageModelV2Middleware } from 'ai'; export const yourRagMiddleware: LanguageModelV2Middleware = { transformParams: async ({ params }) => { diff --git a/packages/ai/core/middleware/default-settings-middleware.ts b/packages/ai/core/middleware/default-settings-middleware.ts index f8ad8126cce9..dbbb446f00e4 100644 --- a/packages/ai/core/middleware/default-settings-middleware.ts +++ b/packages/ai/core/middleware/default-settings-middleware.ts @@ -1,8 +1,8 @@ import { LanguageModelV2CallOptions, + LanguageModelV2Middleware, LanguageModelV2ProviderMetadata, } from '@ai-sdk/provider'; -import type { LanguageModelV2Middleware } from './language-model-v2-middleware'; import { mergeObjects } from '../util/merge-objects'; /** @@ -18,7 +18,7 @@ export function defaultSettingsMiddleware({ >; }): LanguageModelV2Middleware { return { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams: async ({ params }) => { return { ...settings, diff --git a/packages/ai/core/middleware/extract-reasoning-middleware.ts b/packages/ai/core/middleware/extract-reasoning-middleware.ts index 55b8625b1bc9..d640079485e0 100644 --- a/packages/ai/core/middleware/extract-reasoning-middleware.ts +++ b/packages/ai/core/middleware/extract-reasoning-middleware.ts @@ -1,6 +1,8 @@ -import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'; +import type { + LanguageModelV2Middleware, + LanguageModelV2StreamPart, +} from '@ai-sdk/provider'; import { getPotentialStartIndex } from '../util/get-potential-start-index'; -import type { LanguageModelV2Middleware } from './language-model-v2-middleware'; /** * Extract an XML-tagged reasoning section from the generated text and exposes it @@ -23,7 +25,7 @@ export function extractReasoningMiddleware({ const closingTag = `<\/${tagName}>`; return { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapGenerate: async ({ doGenerate }) => { const { text: rawText, ...rest } = await doGenerate(); diff --git a/packages/ai/core/middleware/index.ts b/packages/ai/core/middleware/index.ts index b4b30244c12f..ad92fbe1fccf 100644 --- a/packages/ai/core/middleware/index.ts +++ b/packages/ai/core/middleware/index.ts @@ -1,6 +1,5 @@ export { defaultSettingsMiddleware } from './default-settings-middleware'; export { extractReasoningMiddleware } from './extract-reasoning-middleware'; -export type { LanguageModelV2Middleware } from './language-model-v2-middleware'; export { simulateStreamingMiddleware } from './simulate-streaming-middleware'; export { experimental_wrapLanguageModel, diff --git a/packages/ai/core/middleware/simulate-streaming-middleware.ts b/packages/ai/core/middleware/simulate-streaming-middleware.ts index 756cd5109b5a..89e599af6d57 100644 --- a/packages/ai/core/middleware/simulate-streaming-middleware.ts +++ b/packages/ai/core/middleware/simulate-streaming-middleware.ts @@ -1,12 +1,14 @@ -import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'; -import type { LanguageModelV2Middleware } from './language-model-v2-middleware'; +import type { + LanguageModelV2Middleware, + LanguageModelV2StreamPart, +} from '@ai-sdk/provider'; /** * Simulates streaming chunks with the response from a generate call. */ export function simulateStreamingMiddleware(): LanguageModelV2Middleware { return { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapStream: async ({ doGenerate }) => { const result = await doGenerate(); diff --git a/packages/ai/core/middleware/wrap-language-model.test.ts b/packages/ai/core/middleware/wrap-language-model.test.ts index 3e2e0fda4bbc..efb3a82ecdeb 100644 --- a/packages/ai/core/middleware/wrap-language-model.test.ts +++ b/packages/ai/core/middleware/wrap-language-model.test.ts @@ -12,7 +12,7 @@ describe('wrapLanguageModel', () => { supportsStructuredOutputs: true, }), middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', }, }); @@ -26,7 +26,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: new MockLanguageModelV2(), middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', }, providerId: 'override-provider', modelId: 'override-model', @@ -48,7 +48,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: mockModel, middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams, }, }); @@ -83,7 +83,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: mockModel, middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapGenerate, }, }); @@ -116,7 +116,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: mockModel, middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams, }, }); @@ -148,7 +148,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: mockModel, middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapStream, }, }); @@ -177,7 +177,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: mockModel, middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', }, }); @@ -193,7 +193,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: mockModel, middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', }, }); @@ -206,7 +206,7 @@ describe('wrapLanguageModel', () => { let supportsUrlCalled = false; class MockLanguageModelWithImageSupport implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly provider = 'test-provider'; readonly modelId = 'test-model'; readonly defaultObjectGenerationMode = 'json'; @@ -227,7 +227,7 @@ describe('wrapLanguageModel', () => { const wrappedModel = wrapLanguageModel({ model: new MockLanguageModelWithImageSupport(), middleware: { - middlewareVersion: 'v1', + middlewareVersion: 'v2', }, }); @@ -256,11 +256,11 @@ describe('wrapLanguageModel', () => { model: mockModel, middleware: [ { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams: transformParams1, }, { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams: transformParams2, }, ], @@ -310,11 +310,11 @@ describe('wrapLanguageModel', () => { model: mockModel, middleware: [ { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams: transformParams1, }, { - middlewareVersion: 'v1', + middlewareVersion: 'v2', transformParams: transformParams2, }, ], @@ -366,11 +366,11 @@ describe('wrapLanguageModel', () => { model: mockModel, middleware: [ { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapGenerate: wrapGenerate1, }, { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapGenerate: wrapGenerate2, }, ], @@ -414,11 +414,11 @@ describe('wrapLanguageModel', () => { model: mockModel, middleware: [ { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapStream: wrapStream1, }, { - middlewareVersion: 'v1', + middlewareVersion: 'v2', wrapStream: wrapStream2, }, ], diff --git a/packages/ai/core/middleware/wrap-language-model.ts b/packages/ai/core/middleware/wrap-language-model.ts index 0912204b9e1d..21971875a050 100644 --- a/packages/ai/core/middleware/wrap-language-model.ts +++ b/packages/ai/core/middleware/wrap-language-model.ts @@ -1,5 +1,8 @@ -import { LanguageModelV2, LanguageModelV2CallOptions } from '@ai-sdk/provider'; -import { LanguageModelV2Middleware } from './language-model-v2-middleware'; +import { + LanguageModelV2, + LanguageModelV2CallOptions, + LanguageModelV2Middleware, +} from '@ai-sdk/provider'; import { asArray } from '../../util/as-array'; /** @@ -54,7 +57,7 @@ const doWrap = ({ } return { - specificationVersion: 'v1', + specificationVersion: 'v2', provider: providerId ?? model.provider, modelId: modelId ?? model.modelId, diff --git a/packages/ai/core/test/mock-language-model-v1.ts b/packages/ai/core/test/mock-language-model-v1.ts index acbd42ad6d4c..8d273b50c504 100644 --- a/packages/ai/core/test/mock-language-model-v1.ts +++ b/packages/ai/core/test/mock-language-model-v1.ts @@ -2,7 +2,7 @@ import { LanguageModelV2 } from '@ai-sdk/provider'; import { notImplemented } from './not-implemented'; export class MockLanguageModelV2 implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly provider: LanguageModelV2['provider']; readonly modelId: LanguageModelV2['modelId']; diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts index 3f1a6b18654e..3bbab5140304 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts @@ -42,7 +42,7 @@ type BedrockChatConfig = { }; export class BedrockChatLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly provider = 'amazon-bedrock'; readonly defaultObjectGenerationMode = 'tool'; readonly supportsImageUrls = false; diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index 87980fb3ea98..1e21fa8b5469 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -39,7 +39,7 @@ type AnthropicMessagesConfig = { }; export class AnthropicMessagesLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = 'tool'; readonly modelId: AnthropicMessagesModelId; diff --git a/packages/cohere/src/cohere-chat-language-model.ts b/packages/cohere/src/cohere-chat-language-model.ts index 4f9b024c5bf9..65ec0e1f3be7 100644 --- a/packages/cohere/src/cohere-chat-language-model.ts +++ b/packages/cohere/src/cohere-chat-language-model.ts @@ -30,7 +30,7 @@ type CohereChatConfig = { }; export class CohereChatLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = 'json'; readonly modelId: CohereChatModelId; diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index 90290faad6cc..cb7875e2f233 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -40,7 +40,7 @@ type GoogleGenerativeAIConfig = { }; export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = 'json'; readonly supportsImageUrls = false; diff --git a/packages/groq/src/groq-chat-language-model.ts b/packages/groq/src/groq-chat-language-model.ts index 65f7687e73ea..9236323b815e 100644 --- a/packages/groq/src/groq-chat-language-model.ts +++ b/packages/groq/src/groq-chat-language-model.ts @@ -33,7 +33,7 @@ type GroqChatConfig = { }; export class GroqChatLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly supportsStructuredOutputs = false; readonly defaultObjectGenerationMode = 'json'; diff --git a/packages/mistral/src/mistral-chat-language-model.ts b/packages/mistral/src/mistral-chat-language-model.ts index 25ebe48cfafb..9acd8a729fed 100644 --- a/packages/mistral/src/mistral-chat-language-model.ts +++ b/packages/mistral/src/mistral-chat-language-model.ts @@ -31,7 +31,7 @@ type MistralChatConfig = { }; export class MistralChatLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = 'json'; readonly supportsImageUrls = false; diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts index b61c8572a578..caef59f1d6aa 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts @@ -57,7 +57,7 @@ model. `undefined` can be specified if object generation is not supported. }; export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly supportsStructuredOutputs: boolean; diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts index b4eccee4bb2d..530c8fd7c720 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts @@ -40,7 +40,7 @@ type OpenAICompatibleCompletionConfig = { export class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = undefined; readonly modelId: OpenAICompatibleCompletionModelId; diff --git a/packages/openai-compatible/src/openai-compatible-image-model.ts b/packages/openai-compatible/src/openai-compatible-image-model.ts index a37f35d0e0a8..abaf8b8c41c7 100644 --- a/packages/openai-compatible/src/openai-compatible-image-model.ts +++ b/packages/openai-compatible/src/openai-compatible-image-model.ts @@ -7,12 +7,14 @@ import { postJsonToApi, } from '@ai-sdk/provider-utils'; import { z } from 'zod'; -import { OpenAICompatibleImageModelId } from './openai-compatible-image-settings'; -import { OpenAICompatibleImageSettings } from './openai-compatible-image-settings'; import { defaultOpenAICompatibleErrorStructure, ProviderErrorStructure, } from './openai-compatible-error'; +import { + OpenAICompatibleImageModelId, + OpenAICompatibleImageSettings, +} from './openai-compatible-image-settings'; export type OpenAICompatibleImageModelConfig = { provider: string; diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index 4aba40834230..7b25ab229643 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -39,7 +39,7 @@ type OpenAIChatConfig = { }; export class OpenAIChatLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly modelId: OpenAIChatModelId; readonly settings: OpenAIChatSettings; diff --git a/packages/openai/src/openai-completion-language-model.ts b/packages/openai/src/openai-completion-language-model.ts index e69d4e472139..43428e9cbd92 100644 --- a/packages/openai/src/openai-completion-language-model.ts +++ b/packages/openai/src/openai-completion-language-model.ts @@ -37,7 +37,7 @@ type OpenAICompletionConfig = { }; export class OpenAICompletionLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = undefined; readonly modelId: OpenAICompletionModelId; diff --git a/packages/openai/src/responses/openai-responses-language-model.ts b/packages/openai/src/responses/openai-responses-language-model.ts index 4d6f46816bfd..41ae402013a1 100644 --- a/packages/openai/src/responses/openai-responses-language-model.ts +++ b/packages/openai/src/responses/openai-responses-language-model.ts @@ -22,7 +22,7 @@ import { prepareResponsesTools } from './openai-responses-prepare-tools'; import { OpenAIResponsesModelId } from './openai-responses-settings'; export class OpenAIResponsesLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = 'json'; readonly modelId: OpenAIResponsesModelId; diff --git a/packages/perplexity/src/perplexity-language-model.ts b/packages/perplexity/src/perplexity-language-model.ts index b3538deea37a..62e66d7952a7 100644 --- a/packages/perplexity/src/perplexity-language-model.ts +++ b/packages/perplexity/src/perplexity-language-model.ts @@ -27,7 +27,7 @@ type PerplexityChatConfig = { }; export class PerplexityLanguageModel implements LanguageModelV2 { - readonly specificationVersion = 'v1'; + readonly specificationVersion = 'v2'; readonly defaultObjectGenerationMode = 'json'; readonly supportsStructuredOutputs = true; readonly supportsImageUrls = false; diff --git a/packages/provider/src/index.ts b/packages/provider/src/index.ts index 1e258cf9baa8..117003c809d8 100644 --- a/packages/provider/src/index.ts +++ b/packages/provider/src/index.ts @@ -3,6 +3,7 @@ export * from './errors/index'; export * from './image-model/index'; export * from './json-value/index'; export * from './language-model/index'; +export * from './language-model-middleware/index'; export * from './provider/index'; export type { JSONSchema7, JSONSchema7Definition } from 'json-schema'; diff --git a/packages/provider/src/language-model-middleware/index.ts b/packages/provider/src/language-model-middleware/index.ts new file mode 100644 index 000000000000..5640476d8b82 --- /dev/null +++ b/packages/provider/src/language-model-middleware/index.ts @@ -0,0 +1 @@ +export * from './v2/index'; diff --git a/packages/provider/src/language-model-middleware/v2/index.ts b/packages/provider/src/language-model-middleware/v2/index.ts new file mode 100644 index 000000000000..d41cfbd28f3d --- /dev/null +++ b/packages/provider/src/language-model-middleware/v2/index.ts @@ -0,0 +1 @@ +export * from './language-model-v2-middleware'; diff --git a/packages/ai/core/middleware/language-model-v2-middleware.ts b/packages/provider/src/language-model-middleware/v2/language-model-v2-middleware.ts similarity index 89% rename from packages/ai/core/middleware/language-model-v2-middleware.ts rename to packages/provider/src/language-model-middleware/v2/language-model-v2-middleware.ts index 1a5a84ad1402..132c36f14716 100644 --- a/packages/ai/core/middleware/language-model-v2-middleware.ts +++ b/packages/provider/src/language-model-middleware/v2/language-model-v2-middleware.ts @@ -1,4 +1,5 @@ -import { LanguageModelV2, LanguageModelV2CallOptions } from '@ai-sdk/provider'; +import { LanguageModelV2 } from '../../language-model/v2/language-model-v2'; +import { LanguageModelV2CallOptions } from '../../language-model/v2/language-model-v2-call-options'; /** * Experimental middleware for LanguageModelV2. @@ -7,9 +8,9 @@ import { LanguageModelV2, LanguageModelV2CallOptions } from '@ai-sdk/provider'; */ export type LanguageModelV2Middleware = { /** - * Middleware specification version. Use `v1` for the current version. + * Middleware specification version. Use `v2` for the current version. */ - middlewareVersion?: 'v1' | undefined; // backwards compatibility + middlewareVersion?: 'v2' | undefined; // backwards compatibility /** * Transforms the parameters before they are passed to the language model. diff --git a/packages/provider/src/language-model/v2/language-model-v2.ts b/packages/provider/src/language-model/v2/language-model-v2.ts index c1b0808e2225..82f960971605 100644 --- a/packages/provider/src/language-model/v2/language-model-v2.ts +++ b/packages/provider/src/language-model/v2/language-model-v2.ts @@ -7,7 +7,7 @@ import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-me import { LanguageModelV2Source } from './language-model-v2-source'; /** -Specification for a language model that implements the language model interface version 1. +Specification for a language model that implements the language model interface version 2. */ export type LanguageModelV2 = { /** @@ -17,7 +17,7 @@ model interface and retain backwards compatibility. The different implementation versions can be handled as a discriminated union on our side. */ - readonly specificationVersion: 'v1'; + readonly specificationVersion: 'v2'; /** Name of the provider for logging purposes. From 5cbd957200f5cda93698fc782c7fac58d6028338 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 4 Apr 2025 18:57:07 +0200 Subject: [PATCH 0024/1307] fix (docs): troubleshooting page description (#5551) --- .../09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx b/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx index 9ca39a8792af..d7e83986229f 100644 --- a/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +++ b/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx @@ -1,6 +1,6 @@ --- title: "Jest: cannot find module 'ai/rsc'" -description: "Troubleshooting AI SDK errors related to the "Jest: cannot find module 'ai/rsc'" error". +description: "Troubleshooting AI SDK errors related to the Jest: cannot find module 'ai/rsc' error". --- # Jest: cannot find module 'ai/rsc' From 779d9168912b6d4744692740cfb55932ccf1fa3b Mon Sep 17 00:00:00 2001 From: Grace Yun <74513600+iteratetograceness@users.noreply.github.com> Date: Fri, 4 Apr 2025 20:21:54 -0400 Subject: [PATCH 0025/1307] feat (provider/{google, google-vertex}): expose type for validating provider options (#5491) --- .changeset/pink-deers-switch.md | 6 ++ .../15-google-generative-ai.mdx | 25 ++++++- .../01-ai-sdk-providers/16-google-vertex.mdx | 35 ++++++++++ .../src/generate-image/google-vertex.ts | 10 +-- .../ai-core/src/generate-text/google-image.ts | 7 +- .../src/google-vertex-image-model.test.ts | 66 ++++++++++--------- .../src/google-vertex-image-model.ts | 29 +++++++- packages/google-vertex/src/index.ts | 1 + ...oogle-generative-ai-language-model.test.ts | 58 ++++++++++++++++ .../google-generative-ai-language-model.ts | 11 +++- packages/google/src/index.ts | 3 +- 11 files changed, 210 insertions(+), 41 deletions(-) create mode 100644 .changeset/pink-deers-switch.md diff --git a/.changeset/pink-deers-switch.md b/.changeset/pink-deers-switch.md new file mode 100644 index 000000000000..84d524aeb623 --- /dev/null +++ b/.changeset/pink-deers-switch.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/google-vertex': patch +'@ai-sdk/google': patch +--- + +feat: add provider option schemas for vertex imagegen and google genai diff --git a/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx b/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx index 980fb3ccdc4d..2fba02438350 100644 --- a/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx +++ b/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx @@ -80,7 +80,7 @@ const model = google('gemini-1.5-pro-latest'); e.g. `tunedModels/my-model`. -Google Generative AI models support also some model specific settings that are not part of the [standard call settings](/docs/ai-sdk-core/settings). +Google Generative AI also supports some model specific settings that are not part of the [standard call settings](/docs/ai-sdk-core/settings). You can pass them as an options argument: ```ts @@ -132,6 +132,29 @@ The following optional settings are available for Google Generative AI models: - `BLOCK_ONLY_HIGH` - `BLOCK_NONE` +Further configuration can be done using Google Generative AI provider options. You can validate the provider options using the `GoogleGenerativeAIProviderOptions` type. + +```ts +import { google } from '@ai-sdk/google'; +import { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'; +import { generateText } from 'ai'; + +const { text } = await generateText({ + model: google('gemini-1.5-pro-latest'), + providerOptions: { + google: { + responseModalities: ['TEXT', 'IMAGE'], + } satisfies GoogleGenerativeAIProviderOptions, + }, + // ... +}); +``` + +The following provider options are available: + +- **responseModalities** _string[]_ + The modalities to use for the response. The following modalities are supported: `TEXT`, `IMAGE`. When not defined or empty, the model defaults to returning only text. + You can use Google Generative AI language models to generate text with the `generateText` function: ```ts diff --git a/content/providers/01-ai-sdk-providers/16-google-vertex.mdx b/content/providers/01-ai-sdk-providers/16-google-vertex.mdx index b326449fd6fe..b65b0f6b0819 100644 --- a/content/providers/01-ai-sdk-providers/16-google-vertex.mdx +++ b/content/providers/01-ai-sdk-providers/16-google-vertex.mdx @@ -599,6 +599,41 @@ const { image } = await generateImage({ }); ``` +Further configuration can be done using Google Vertex provider options. You can validate the provider options using the `GoogleVertexImageProviderOptions` type. + +```ts +import { vertex } from '@ai-sdk/google-vertex'; +import { GoogleVertexImageProviderOptions } from '@ai-sdk/google-vertex'; +import { generateImage } from 'ai'; + +const { image } = await generateImage({ + model: vertex.image('imagen-3.0-generate-001'), + providerOptions: { + vertex: { + negativePrompt: 'pixelated, blurry, low-quality', + } satisfies GoogleVertexImageProviderOptions, + }, + // ... +}); +``` + +The following provider options are available: + +- **negativePrompt** _string_ + A description of what to discourage in the generated images. + +- **personGeneration** `allow_adult` | `allow_all` | `dont_allow` + Whether to allow person generation. Defaults to `allow_adult`. + +- **safetySetting** `block_low_and_above` | `block_medium_and_above` | `block_only_high` | `block_none` + Whether to block unsafe content. Defaults to `block_medium_and_above`. + +- **addWatermark** _boolean_ + Whether to add an invisible watermark to the generated images. Defaults to `true`. + +- **storageUri** _string_ + Cloud Storage URI to store the generated images. + Imagen models do not support the `size` parameter. Use the `aspectRatio` parameter instead. diff --git a/examples/ai-core/src/generate-image/google-vertex.ts b/examples/ai-core/src/generate-image/google-vertex.ts index 4635fcda81dd..d21043e1005b 100644 --- a/examples/ai-core/src/generate-image/google-vertex.ts +++ b/examples/ai-core/src/generate-image/google-vertex.ts @@ -1,7 +1,10 @@ -import { vertex } from '@ai-sdk/google-vertex'; +import { + GoogleVertexImageProviderOptions, + vertex, +} from '@ai-sdk/google-vertex'; import { experimental_generateImage as generateImage } from 'ai'; -import { presentImages } from '../lib/present-image'; import 'dotenv/config'; +import { presentImages } from '../lib/present-image'; async function main() { const { image } = await generateImage({ @@ -10,9 +13,8 @@ async function main() { aspectRatio: '1:1', providerOptions: { vertex: { - // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#parameter_list addWatermark: false, - }, + } satisfies GoogleVertexImageProviderOptions, }, }); diff --git a/examples/ai-core/src/generate-text/google-image.ts b/examples/ai-core/src/generate-text/google-image.ts index d7c66219ecdf..f5f32d72134a 100644 --- a/examples/ai-core/src/generate-text/google-image.ts +++ b/examples/ai-core/src/generate-text/google-image.ts @@ -1,4 +1,4 @@ -import { google } from '@ai-sdk/google'; +import { google, GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'; import { generateText } from 'ai'; import 'dotenv/config'; import fs from 'node:fs'; @@ -15,6 +15,11 @@ async function main() { ], }, ], + providerOptions: { + google: { + responseModalities: ['TEXT', 'IMAGE'], + } satisfies GoogleGenerativeAIProviderOptions, + }, }); console.log(result.text); diff --git a/packages/google-vertex/src/google-vertex-image-model.test.ts b/packages/google-vertex/src/google-vertex-image-model.test.ts index 0a7dab1c6fe9..c9be8cb4839a 100644 --- a/packages/google-vertex/src/google-vertex-image-model.test.ts +++ b/packages/google-vertex/src/google-vertex-image-model.test.ts @@ -38,27 +38,6 @@ describe('GoogleVertexImageModel', () => { }; } - it('should pass the correct parameters', async () => { - prepareJsonResponse(); - - await model.doGenerate({ - prompt, - n: 2, - size: undefined, - aspectRatio: undefined, - seed: undefined, - providerOptions: { vertex: { aspectRatio: '1:1' } }, - }); - - expect(await server.calls[0].requestBody).toStrictEqual({ - instances: [{ prompt }], - parameters: { - sampleCount: 2, - aspectRatio: '1:1', - }, - }); - }); - it('should pass headers', async () => { prepareJsonResponse(); @@ -143,13 +122,9 @@ describe('GoogleVertexImageModel', () => { prompt: 'test prompt', n: 1, size: undefined, - aspectRatio: undefined, + aspectRatio: '16:9', seed: undefined, - providerOptions: { - vertex: { - aspectRatio: '16:9', - }, - }, + providerOptions: {}, }); expect(await server.calls[0].requestBody).toStrictEqual({ @@ -214,7 +189,7 @@ describe('GoogleVertexImageModel', () => { seed: 42, providerOptions: { vertex: { - temperature: 0.8, + addWatermark: false, }, }, }); @@ -225,7 +200,7 @@ describe('GoogleVertexImageModel', () => { sampleCount: 1, aspectRatio: '1:1', seed: 42, - temperature: 0.8, + addWatermark: false, }, }); }); @@ -302,7 +277,7 @@ describe('GoogleVertexImageModel', () => { const result = await model.doGenerate({ prompt, - n: 1, + n: 2, size: undefined, aspectRatio: undefined, seed: undefined, @@ -319,5 +294,36 @@ describe('GoogleVertexImageModel', () => { ); expect(result.response.modelId).toBe('imagen-3.0-generate-001'); }); + + it('should only pass valid provider options', async () => { + prepareJsonResponse(); + + await model.doGenerate({ + prompt, + n: 2, + size: undefined, + aspectRatio: '16:9', + seed: undefined, + providerOptions: { + vertex: { + addWatermark: false, + negativePrompt: 'negative prompt', + personGeneration: 'allow_all', + foo: 'bar', + }, + }, + }); + + expect(await server.calls[0].requestBody).toStrictEqual({ + instances: [{ prompt }], + parameters: { + sampleCount: 2, + addWatermark: false, + negativePrompt: 'negative prompt', + personGeneration: 'allow_all', + aspectRatio: '16:9', + }, + }); + }); }); }); diff --git a/packages/google-vertex/src/google-vertex-image-model.ts b/packages/google-vertex/src/google-vertex-image-model.ts index c94d5ddc32d5..635b50df5445 100644 --- a/packages/google-vertex/src/google-vertex-image-model.ts +++ b/packages/google-vertex/src/google-vertex-image-model.ts @@ -3,6 +3,7 @@ import { Resolvable, combineHeaders, createJsonResponseHandler, + parseProviderOptions, postJsonToApi, resolve, } from '@ai-sdk/provider-utils'; @@ -65,13 +66,19 @@ export class GoogleVertexImageModel implements ImageModelV1 { }); } + const vertexImageOptions = parseProviderOptions({ + provider: 'vertex', + providerOptions, + schema: vertexImageProviderOptionsSchema, + }); + const body = { instances: [{ prompt }], parameters: { sampleCount: n, ...(aspectRatio != null ? { aspectRatio } : {}), ...(seed != null ? { seed } : {}), - ...(providerOptions.vertex ?? {}), + ...(vertexImageOptions ?? {}), }, }; @@ -108,3 +115,23 @@ export class GoogleVertexImageModel implements ImageModelV1 { const vertexImageResponseSchema = z.object({ predictions: z.array(z.object({ bytesBase64Encoded: z.string() })).nullish(), }); + +const vertexImageProviderOptionsSchema = z.object({ + negativePrompt: z.string().nullish(), + personGeneration: z + .enum(['dont_allow', 'allow_adult', 'allow_all']) + .nullish(), + safetySetting: z + .enum([ + 'block_low_and_above', + 'block_medium_and_above', + 'block_only_high', + 'block_none', + ]) + .nullish(), + addWatermark: z.boolean().nullish(), + storageUri: z.string().nullish(), +}); +export type GoogleVertexImageProviderOptions = z.infer< + typeof vertexImageProviderOptionsSchema +>; diff --git a/packages/google-vertex/src/index.ts b/packages/google-vertex/src/index.ts index bf1e0fad1033..e84f8e06d72f 100644 --- a/packages/google-vertex/src/index.ts +++ b/packages/google-vertex/src/index.ts @@ -1,3 +1,4 @@ +export type { GoogleVertexImageProviderOptions } from './google-vertex-image-model'; export { createVertex, vertex } from './google-vertex-provider-node'; export type { GoogleVertexProvider, diff --git a/packages/google/src/google-generative-ai-language-model.test.ts b/packages/google/src/google-generative-ai-language-model.test.ts index ea81f7966b69..75dcc2661b8e 100644 --- a/packages/google/src/google-generative-ai-language-model.test.ts +++ b/packages/google/src/google-generative-ai-language-model.test.ts @@ -414,6 +414,39 @@ describe('doGenerate', () => { }); }); + it('should only pass valid provider options', async () => { + prepareJsonResponse({}); + + await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: [ + { role: 'system', content: 'test system instruction' }, + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + ], + seed: 123, + temperature: 0.5, + providerMetadata: { + google: { foo: 'bar', responseModalities: ['TEXT', 'IMAGE'] }, + }, + }); + + expect(await server.calls[0].requestBody).toStrictEqual({ + contents: [ + { + role: 'user', + parts: [{ text: 'Hello' }], + }, + ], + systemInstruction: { parts: [{ text: 'test system instruction' }] }, + generationConfig: { + seed: 123, + temperature: 0.5, + responseModalities: ['TEXT', 'IMAGE'], + }, + }); + }); + it('should pass tools and toolChoice', async () => { prepareJsonResponse({}); @@ -1869,4 +1902,29 @@ describe('doStream', () => { 'tool-calls', ); }); + + it('should only pass valid provider options', async () => { + prepareStreamResponse({ content: [''] }); + + await model.doStream({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + providerMetadata: { + google: { foo: 'bar', responseModalities: ['TEXT', 'IMAGE'] }, + }, + }); + + expect(await server.calls[0].requestBody).toMatchObject({ + contents: [ + { + role: 'user', + parts: [{ text: 'Hello' }], + }, + ], + generationConfig: { + responseModalities: ['TEXT', 'IMAGE'], + }, + }); + }); }); diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index cb7875e2f233..23685bac9afe 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -88,9 +88,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { const googleOptions = parseProviderOptions({ provider: 'google', providerOptions: providerMetadata, - schema: z.object({ - responseModalities: z.array(z.enum(['TEXT', 'IMAGE'])).nullish(), - }), + schema: googleGenerativeAIProviderOptionsSchema, }); const generationConfig = { @@ -623,3 +621,10 @@ const chunkSchema = z.object({ }) .nullish(), }); + +const googleGenerativeAIProviderOptionsSchema = z.object({ + responseModalities: z.array(z.enum(['TEXT', 'IMAGE'])).nullish(), +}); +export type GoogleGenerativeAIProviderOptions = z.infer< + typeof googleGenerativeAIProviderOptionsSchema +>; diff --git a/packages/google/src/index.ts b/packages/google/src/index.ts index bf9c08c46346..8220c6c80ba0 100644 --- a/packages/google/src/index.ts +++ b/packages/google/src/index.ts @@ -1,6 +1,7 @@ -export { createGoogleGenerativeAI, google } from './google-provider'; export type { GoogleErrorData } from './google-error'; +export type { GoogleGenerativeAIProviderOptions } from './google-generative-ai-language-model'; export type { GoogleGenerativeAIProviderMetadata } from './google-generative-ai-prompt'; +export { createGoogleGenerativeAI, google } from './google-provider'; export type { GoogleGenerativeAIProvider, GoogleGenerativeAIProviderSettings, From 9efb22331959b918e1d6fe47725f4dc9f1a77fe4 Mon Sep 17 00:00:00 2001 From: Walter Korman Date: Fri, 4 Apr 2025 23:01:24 -0700 Subject: [PATCH 0026/1307] fix (docs): correct content outside description in jest docs (#5559) --- .../09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx b/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx index d7e83986229f..6626d31c1bf0 100644 --- a/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +++ b/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx @@ -1,6 +1,6 @@ --- title: "Jest: cannot find module 'ai/rsc'" -description: "Troubleshooting AI SDK errors related to the Jest: cannot find module 'ai/rsc' error". +description: "Troubleshooting AI SDK errors related to the Jest: cannot find module 'ai/rsc' error" --- # Jest: cannot find module 'ai/rsc' From 0c0c0b308490d75827728c2fd021560b0b58cfa5 Mon Sep 17 00:00:00 2001 From: Gregor Martynus <39992+gr2m@users.noreply.github.com> Date: Sat, 5 Apr 2025 01:53:12 -0700 Subject: [PATCH 0027/1307] refactor (provider-utils): move `customAlphabet()` method from `nanoid` into codebase (#5543) --- .changeset/eleven-lobsters-rescue.md | 5 +++ packages/provider-utils/package.json | 1 - .../src/generate-id-custom-alphabet.ts | 34 +++++++++++++++++++ packages/provider-utils/src/generate-id.ts | 2 +- pnpm-lock.yaml | 3 -- 5 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 .changeset/eleven-lobsters-rescue.md create mode 100644 packages/provider-utils/src/generate-id-custom-alphabet.ts diff --git a/.changeset/eleven-lobsters-rescue.md b/.changeset/eleven-lobsters-rescue.md new file mode 100644 index 000000000000..591a7d207840 --- /dev/null +++ b/.changeset/eleven-lobsters-rescue.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +refactor (provider-utils): move `customAlphabet()` method from `nanoid` into codebase diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index d760ec3592e1..2c5efde10370 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -38,7 +38,6 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "nanoid": "^3.3.8", "secure-json-parse": "^2.7.0" }, "devDependencies": { diff --git a/packages/provider-utils/src/generate-id-custom-alphabet.ts b/packages/provider-utils/src/generate-id-custom-alphabet.ts new file mode 100644 index 000000000000..0761e6b08bfe --- /dev/null +++ b/packages/provider-utils/src/generate-id-custom-alphabet.ts @@ -0,0 +1,34 @@ +// License for this File only. +// Based on https://github.com/ai/nanoid/blob/c49cc113499f2b44ab52b80314e18043ad0f3f79/non-secure/index.js +// +// MIT License +// +// Copyright (c) 2017 Andrey Sitnik +// Copyright (c) Vercel, Inc. (https://vercel.com) +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +// to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +// CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +// IN THE SOFTWARE. + +export function customAlphabet(alphabet: string, defaultSize: number) { + return (size = defaultSize) => { + let id = ''; + // A compact alternative for `for (var i = 0; i < step; i++)`. + let i = size | 0; + while (i--) { + // `| 0` is more compact and faster than `Math.floor()`. + id += alphabet[(Math.random() * alphabet.length) | 0]; + } + return id; + }; +} diff --git a/packages/provider-utils/src/generate-id.ts b/packages/provider-utils/src/generate-id.ts index 1c6cd6cb80e9..646b8c51fe5e 100644 --- a/packages/provider-utils/src/generate-id.ts +++ b/packages/provider-utils/src/generate-id.ts @@ -1,5 +1,5 @@ import { InvalidArgumentError } from '@ai-sdk/provider'; -import { customAlphabet } from 'nanoid/non-secure'; +import { customAlphabet } from './generate-id-custom-alphabet'; /** Creates an ID generator. diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 53b2743fdc2e..c9015d1b9a7f 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1701,9 +1701,6 @@ importers: '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider - nanoid: - specifier: ^3.3.8 - version: 3.3.8 secure-json-parse: specifier: ^2.7.0 version: 2.7.0 From a4f3007f25c2617a2fcd32d2df5afbc2a9b1083b Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Sat, 5 Apr 2025 10:54:55 +0200 Subject: [PATCH 0028/1307] chore: remove ai/react (#5560) --- .changeset/twelve-kids-travel.md | 5 ++++ packages/ai/package.json | 11 +-------- packages/ai/react/index.ts | 42 -------------------------------- packages/ai/react/package.json | 10 -------- packages/ai/tsup.config.ts | 12 --------- pnpm-lock.yaml | 3 --- 6 files changed, 6 insertions(+), 77 deletions(-) create mode 100644 .changeset/twelve-kids-travel.md delete mode 100644 packages/ai/react/index.ts delete mode 100644 packages/ai/react/package.json diff --git a/.changeset/twelve-kids-travel.md b/.changeset/twelve-kids-travel.md new file mode 100644 index 000000000000..9238be102309 --- /dev/null +++ b/.changeset/twelve-kids-travel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore: remove ai/react diff --git a/packages/ai/package.json b/packages/ai/package.json index dd4e07a4d058..0b342f7f7d43 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -10,7 +10,6 @@ "files": [ "dist/**/*", "mcp-stdio/**/*", - "react/dist/**/*", "rsc/dist/**/*", "test/dist/**/*", "CHANGELOG.md" @@ -18,7 +17,7 @@ "scripts": { "build": "tsup", "build:watch": "tsup --watch", - "clean": "rm -rf dist && rm -rf react/dist && rm -rf rsc/dist", + "clean": "rm -rf dist && rm -rf rsc/dist", "lint": "eslint \"./**/*.ts*\"", "type-check": "tsc --noEmit", "prettier-check": "prettier --check \"./**/*.ts*\"", @@ -53,12 +52,6 @@ "react-server": "./rsc/dist/rsc-server.mjs", "import": "./rsc/dist/rsc-client.mjs" }, - "./react": { - "types": "./react/dist/index.d.ts", - "react-server": "./react/dist/index.server.mjs", - "import": "./react/dist/index.mjs", - "require": "./react/dist/index.js" - }, "./mcp-stdio": { "types": "./mcp-stdio/dist/index.d.ts", "import": "./mcp-stdio/dist/index.mjs", @@ -68,7 +61,6 @@ "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", "@ai-sdk/provider-utils": "3.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", "@ai-sdk/ui-utils": "2.0.0-canary.0", "@opentelemetry/api": "1.9.0", "jsondiffpatch": "0.6.0" @@ -114,7 +106,6 @@ "keywords": [ "ai", "vercel", - "react", "next", "nextjs" ] diff --git a/packages/ai/react/index.ts b/packages/ai/react/index.ts deleted file mode 100644 index b3f79af12e0d..000000000000 --- a/packages/ai/react/index.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { - useChat as useChatReact, - useCompletion as useCompletionReact, - experimental_useObject as experimental_useObjectReact, -} from '@ai-sdk/react'; - -/** - * @deprecated Use `@ai-sdk/react` instead. - */ -export const useChat = useChatReact; - -/** - * @deprecated Use `@ai-sdk/react` instead. - */ -export const useCompletion = useCompletionReact; - -/** - * @deprecated Use `@ai-sdk/react` instead. - */ -export const experimental_useObject = experimental_useObjectReact; - -export type { - /** - * @deprecated Use `@ai-sdk/react` instead. - */ - CreateMessage, - - /** - * @deprecated Use `@ai-sdk/react` instead. - */ - Message, - - /** - * @deprecated Use `@ai-sdk/react` instead. - */ - UseChatOptions, - - /** - * @deprecated Use `@ai-sdk/react` instead. - */ - UseChatHelpers, -} from '@ai-sdk/react'; diff --git a/packages/ai/react/package.json b/packages/ai/react/package.json deleted file mode 100644 index e93684423ec2..000000000000 --- a/packages/ai/react/package.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "main": "./dist/index.js", - "module": "./dist/index.mjs", - "types": "./dist/index.d.ts", - "exports": "./dist/index.mjs", - "private": true, - "peerDependencies": { - "react": ">=18" - } -} diff --git a/packages/ai/tsup.config.ts b/packages/ai/tsup.config.ts index 920257426bbd..b2d726105e08 100644 --- a/packages/ai/tsup.config.ts +++ b/packages/ai/tsup.config.ts @@ -17,18 +17,6 @@ export default defineConfig([ dts: true, sourcemap: true, }, - // React APIs - { - entry: ['react/index.ts'], - outDir: 'react/dist', - banner: { - js: "'use client'", - }, - format: ['cjs', 'esm'], - external: ['react'], - dts: true, - sourcemap: true, - }, // RSC APIs - shared client { // Entry is `.mts` as the entrypoints that import it will be ESM so it needs exact imports that includes the `.mjs` extension. diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c9015d1b9a7f..c5dd76526282 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1096,9 +1096,6 @@ importers: '@ai-sdk/provider-utils': specifier: 3.0.0-canary.0 version: link:../provider-utils - '@ai-sdk/react': - specifier: 2.0.0-canary.0 - version: link:../react '@ai-sdk/ui-utils': specifier: 2.0.0-canary.0 version: link:../ui-utils From c22ad54c71f0ac3b2a2a7fd0cf932d8fd5bfe2f4 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Sat, 5 Apr 2025 10:58:54 +0200 Subject: [PATCH 0029/1307] feat (core): add chunking functions support to smoothStream (#5548) Co-authored-by: Sam Denty Co-authored-by: Sam Denty --- .changeset/beige-ligers-kneel.md | 5 + .../22-smooth-stream-japanese.mdx | 19 ++ .../04-ai-sdk-ui/23-smooth-stream-chinese.mdx | 19 ++ .../01-ai-sdk-core/80-smooth-stream.mdx | 57 +++++- .../src/stream-text/smooth-stream-chinese.ts | 39 ++++ .../src/stream-text/smooth-stream-japanese.ts | 39 ++++ packages/ai/core/generate-text/index.ts | 2 +- .../core/generate-text/smooth-stream.test.ts | 169 ++++++++++++++++++ .../ai/core/generate-text/smooth-stream.ts | 73 ++++++-- 9 files changed, 404 insertions(+), 18 deletions(-) create mode 100644 .changeset/beige-ligers-kneel.md create mode 100644 content/docs/04-ai-sdk-ui/22-smooth-stream-japanese.mdx create mode 100644 content/docs/04-ai-sdk-ui/23-smooth-stream-chinese.mdx create mode 100644 examples/ai-core/src/stream-text/smooth-stream-chinese.ts create mode 100644 examples/ai-core/src/stream-text/smooth-stream-japanese.ts diff --git a/.changeset/beige-ligers-kneel.md b/.changeset/beige-ligers-kneel.md new file mode 100644 index 000000000000..fe3a654a5dc5 --- /dev/null +++ b/.changeset/beige-ligers-kneel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat(smooth-stream): chunking callbacks diff --git a/content/docs/04-ai-sdk-ui/22-smooth-stream-japanese.mdx b/content/docs/04-ai-sdk-ui/22-smooth-stream-japanese.mdx new file mode 100644 index 000000000000..8faabe199b0c --- /dev/null +++ b/content/docs/04-ai-sdk-ui/22-smooth-stream-japanese.mdx @@ -0,0 +1,19 @@ +--- +title: Smooth streaming japanese text +description: Learn how to stream smooth stream japanese text +--- + +# Smooth streaming japanese text + +You can smooth stream japanese text by using the `smoothStream` function, and the following regex that splits either on words of japanese characters: + +```tsx filename="page.tsx" +import { smoothStream } from 'ai'; +import { useChat } from '@ai-sdk/react'; + +const { data } = useChat({ + experimental_transform: smoothStream({ + chunking: /[\u3040-\u309F\u30A0-\u30FF]|\S+\s+/, + }), +}); +``` diff --git a/content/docs/04-ai-sdk-ui/23-smooth-stream-chinese.mdx b/content/docs/04-ai-sdk-ui/23-smooth-stream-chinese.mdx new file mode 100644 index 000000000000..7f7533397f5c --- /dev/null +++ b/content/docs/04-ai-sdk-ui/23-smooth-stream-chinese.mdx @@ -0,0 +1,19 @@ +--- +title: Smooth streaming chinese text +description: Learn how to stream smooth stream chinese text +--- + +# Smooth streaming chinese text + +You can smooth stream chinese text by using the `smoothStream` function, and the following regex that splits either on words of chinese characters: + +```tsx filename="page.tsx" +import { smoothStream } from 'ai'; +import { useChat } from '@ai-sdk/react'; + +const { data } = useChat({ + experimental_transform: smoothStream({ + chunking: /[\u4E00-\u9FFF]|\S+\s+/, + }), +}); +``` diff --git a/content/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx b/content/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx index 5e644487a284..474ea5875e8c 100644 --- a/content/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx @@ -42,14 +42,67 @@ const result = streamText({ }, { name: 'chunking', - type: '"word" | "line" | RegExp', + type: '"word" | "line" | RegExp | (buffer: string) => string | undefined | null', isOptional: true, description: - 'Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.', + 'Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom callback or RegExp pattern for custom chunking.', }, ]} /> +#### Word chunking caveats with non-latin languages + + + The word based chunking **does not work well** with the following languages that do not delimit words with spaces: + + For these languages we recommend using a custom regex, like the following: + + - Chinese - `/[\u4E00-\u9FFF]|\S+\s+/` + - Japanese - `/[\u3040-\u309F\u30A0-\u30FF]|\S+\s+/` + + For these languages you could pass your own language aware chunking function: + + - Vietnamese + - Thai + - Javanese (Aksara Jawa) + + + +#### Regex based chunking + +To use regex based chunking, pass a `RegExp` to the `chunking` option. + +```ts +// To split on underscores: +smoothStream({ + chunking: /_+/, +}); + +// Also can do it like this, same behavior +smoothStream({ + chunking: /[^_]*_/, +}); +``` + +#### Custom callback chunking + +To use a custom callback for chunking, pass a function to the `chunking` option. + +```ts +smoothStream({ + chunking: text => { + const findString = 'some string'; + const index = text.indexOf(findString); + + if (index === -1) { + return null; + } + + return text.slice(0, index) + findString; + }, +}); +``` + ### Returns Returns a `TransformStream` that: diff --git a/examples/ai-core/src/stream-text/smooth-stream-chinese.ts b/examples/ai-core/src/stream-text/smooth-stream-chinese.ts new file mode 100644 index 000000000000..71b8d1e99cae --- /dev/null +++ b/examples/ai-core/src/stream-text/smooth-stream-chinese.ts @@ -0,0 +1,39 @@ +import { simulateReadableStream, smoothStream, streamText } from 'ai'; +import { MockLanguageModelV1 } from 'ai/test'; + +async function main() { + const result = streamText({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: simulateReadableStream({ + chunks: [ + { type: 'text-delta', textDelta: '你好你好你好你好你好' }, + { type: 'text-delta', textDelta: '你好你好你好你好你好' }, + { type: 'text-delta', textDelta: '你好你好你好你好你好' }, + { type: 'text-delta', textDelta: '你好你好你好你好你好' }, + { type: 'text-delta', textDelta: '你好你好你好你好你好' }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ], + chunkDelayInMs: 400, + }), + rawCall: { rawPrompt: null, rawSettings: {} }, + }), + }), + + prompt: 'Say hello in Chinese!', + experimental_transform: smoothStream({ + chunking: /[\u4E00-\u9FFF]|\S+\s+/, + }), + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } +} + +main().catch(console.error); diff --git a/examples/ai-core/src/stream-text/smooth-stream-japanese.ts b/examples/ai-core/src/stream-text/smooth-stream-japanese.ts new file mode 100644 index 000000000000..5139d835be3b --- /dev/null +++ b/examples/ai-core/src/stream-text/smooth-stream-japanese.ts @@ -0,0 +1,39 @@ +import { simulateReadableStream, smoothStream, streamText } from 'ai'; +import { MockLanguageModelV1 } from 'ai/test'; + +async function main() { + const result = streamText({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: simulateReadableStream({ + chunks: [ + { type: 'text-delta', textDelta: 'こんにちは' }, + { type: 'text-delta', textDelta: 'こんにちは' }, + { type: 'text-delta', textDelta: 'こんにちは' }, + { type: 'text-delta', textDelta: 'こんにちは' }, + { type: 'text-delta', textDelta: 'こんにちは' }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ], + chunkDelayInMs: 400, + }), + rawCall: { rawPrompt: null, rawSettings: {} }, + }), + }), + + prompt: 'Say hello in Japanese!', + experimental_transform: smoothStream({ + chunking: /[\u3040-\u309F\u30A0-\u30FF]|\S+\s+/, + }), + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } +} + +main().catch(console.error); diff --git a/packages/ai/core/generate-text/index.ts b/packages/ai/core/generate-text/index.ts index 8ca67561e4c9..ab53b626fc47 100644 --- a/packages/ai/core/generate-text/index.ts +++ b/packages/ai/core/generate-text/index.ts @@ -6,7 +6,7 @@ export type { GeneratedFile, } from './generated-file'; export * as Output from './output'; -export { smoothStream } from './smooth-stream'; +export { smoothStream, type ChunkDetector } from './smooth-stream'; export type { StepResult } from './step-result'; export { streamText } from './stream-text'; export type { diff --git a/packages/ai/core/generate-text/smooth-stream.test.ts b/packages/ai/core/generate-text/smooth-stream.test.ts index 7d5bbd5db00d..b2bc8257fe2a 100644 --- a/packages/ai/core/generate-text/smooth-stream.test.ts +++ b/packages/ai/core/generate-text/smooth-stream.test.ts @@ -23,6 +23,24 @@ describe('smoothStream', () => { return Promise.resolve(); } + describe('throws error if chunking option is invalid', async () => { + it('throws error if chunking strategy is invalid', async () => { + expect(() => { + smoothStream({ + chunking: 'foo' as any, + }); + }).toThrowError(); + }); + + it('throws error if chunking option is null', async () => { + expect(() => { + smoothStream({ + chunking: null as any, + }); + }).toThrowError(); + }); + }); + describe('word chunking', () => { it('should combine partial words', async () => { const stream = convertArrayToReadableStream([ @@ -333,6 +351,40 @@ describe('smoothStream', () => { ] `); }); + + it(`doesn't return chunks with just spaces`, async () => { + const stream = convertArrayToReadableStream([ + { type: 'text-delta', textDelta: ' ' }, + { type: 'text-delta', textDelta: ' ' }, + { type: 'text-delta', textDelta: ' ' }, + { type: 'text-delta', textDelta: 'foo' }, + + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ + delayInMs: 10, + _internal: { delay }, + })({ tools: {} }), + ); + + await consumeStream(stream); + + expect(events).toMatchInlineSnapshot(` + [ + { + "textDelta": " foo", + "type": "text-delta", + }, + { + "type": "step-finish", + }, + { + "type": "finish", + }, + ] + `); + }); }); describe('line chunking', () => { @@ -423,6 +475,42 @@ describe('smoothStream', () => { }); describe('custom chunking', () => { + it(`should return correct result for regexes that don't match from the exact start onwards`, async () => { + const stream = convertArrayToReadableStream([ + { textDelta: 'Hello_, world!', type: 'text-delta' }, + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ + chunking: /_/, + delayInMs: 10, + _internal: { delay }, + })({ tools: {} }), + ); + + await consumeStream(stream); + + expect(events).toMatchInlineSnapshot(` + [ + "delay 10", + { + "textDelta": "Hello_", + "type": "text-delta", + }, + { + "textDelta": ", world!", + "type": "text-delta", + }, + { + "type": "step-finish", + }, + { + "type": "finish", + }, + ] + `); + }); + it('should support custom chunking regexps (character-level)', async () => { const stream = convertArrayToReadableStream([ { textDelta: 'Hello, world!', type: 'text-delta' }, @@ -471,6 +559,87 @@ describe('smoothStream', () => { }); }); + describe('custom callback chunking', () => { + it('should support custom chunking callback', async () => { + const stream = convertArrayToReadableStream([ + { textDelta: 'He_llo, ', type: 'text-delta' }, + { textDelta: 'w_orld!', type: 'text-delta' }, + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ + chunking: buffer => /[^_]*_/.exec(buffer)?.[0], + _internal: { delay }, + })({ tools: {} }), + ); + + await consumeStream(stream); + + expect(events).toMatchInlineSnapshot(` + [ + "delay 10", + { + "textDelta": "He_", + "type": "text-delta", + }, + "delay 10", + { + "textDelta": "llo, w_", + "type": "text-delta", + }, + { + "textDelta": "orld!", + "type": "text-delta", + }, + { + "type": "step-finish", + }, + { + "type": "finish", + }, + ] + `); + }); + + describe('throws errors if the chunking function invalid matches', async () => { + it('throws empty match error', async () => { + const stream = convertArrayToReadableStream([ + { textDelta: 'Hello, world!', type: 'text-delta' }, + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ chunking: () => '', _internal: { delay } })({ + tools: {}, + }), + ); + + await expect( + consumeStream(stream), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `[Error: Chunking function must return a non-empty string.]`, + ); + }); + + it('throws match prefix error', async () => { + const stream = convertArrayToReadableStream([ + { textDelta: 'Hello, world!', type: 'text-delta' }, + { type: 'step-finish' }, + { type: 'finish' }, + ]).pipeThrough( + smoothStream({ chunking: () => 'world', _internal: { delay } })({ + tools: {}, + }), + ); + + await expect( + consumeStream(stream), + ).rejects.toThrowErrorMatchingInlineSnapshot( + `[Error: Chunking function must return a match that is a prefix of the buffer. Received: "world" expected to start with "Hello, world!"]`, + ); + }); + }); + }); + describe('delay', () => { it('should default to 10ms', async () => { const stream = convertArrayToReadableStream([ diff --git a/packages/ai/core/generate-text/smooth-stream.ts b/packages/ai/core/generate-text/smooth-stream.ts index 3cca81c15ee9..86be60ca70ca 100644 --- a/packages/ai/core/generate-text/smooth-stream.ts +++ b/packages/ai/core/generate-text/smooth-stream.ts @@ -1,13 +1,22 @@ -import { InvalidArgumentError } from '@ai-sdk/provider'; import { delay as originalDelay } from '@ai-sdk/provider-utils'; import { TextStreamPart } from './stream-text-result'; import { ToolSet } from './tool-set'; +import { InvalidArgumentError } from '@ai-sdk/provider'; const CHUNKING_REGEXPS = { - word: /\s*\S+\s+/m, - line: /[^\n]*\n/m, + word: /\S+\s+/m, + line: /\n+/m, }; +/** + * Detects the first chunk in a buffer. + * + * @param buffer - The buffer to detect the first chunk in. + * + * @returns The first detected chunk, or `undefined` if no chunk was detected. + */ +export type ChunkDetector = (buffer: string) => string | undefined | null; + /** * Smooths text streaming output. * @@ -22,7 +31,7 @@ export function smoothStream({ _internal: { delay = originalDelay } = {}, }: { delayInMs?: number | null; - chunking?: 'word' | 'line' | RegExp; + chunking?: 'word' | 'line' | RegExp | ChunkDetector; /** * Internal. For test use only. May change without notice. */ @@ -32,14 +41,48 @@ export function smoothStream({ } = {}): (options: { tools: TOOLS; }) => TransformStream, TextStreamPart> { - const chunkingRegexp = - typeof chunking === 'string' ? CHUNKING_REGEXPS[chunking] : chunking; + let detectChunk: ChunkDetector; - if (chunkingRegexp == null) { - throw new InvalidArgumentError({ - argument: 'chunking', - message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`, - }); + if (typeof chunking === 'function') { + detectChunk = buffer => { + const match = chunking(buffer); + + if (match == null) { + return null; + } + + if (!match.length) { + throw new Error(`Chunking function must return a non-empty string.`); + } + + if (!buffer.startsWith(match)) { + throw new Error( + `Chunking function must return a match that is a prefix of the buffer. Received: "${match}" expected to start with "${buffer}"`, + ); + } + + return match; + }; + } else { + const chunkingRegex = + typeof chunking === 'string' ? CHUNKING_REGEXPS[chunking] : chunking; + + if (chunkingRegex == null) { + throw new InvalidArgumentError({ + argument: 'chunking', + message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`, + }); + } + + detectChunk = buffer => { + const match = chunkingRegex.exec(buffer); + + if (!match) { + return null; + } + + return buffer.slice(0, match.index) + match?.[0]; + }; } return () => { @@ -60,10 +103,10 @@ export function smoothStream({ buffer += chunk.textDelta; let match; - while ((match = chunkingRegexp.exec(buffer)) != null) { - const chunk = match[0]; - controller.enqueue({ type: 'text-delta', textDelta: chunk }); - buffer = buffer.slice(chunk.length); + + while ((match = detectChunk(buffer)) != null) { + controller.enqueue({ type: 'text-delta', textDelta: match }); + buffer = buffer.slice(match.length); await delay(delayInMs); } From c8136b46629381d11b6956e89c7411f2badc3f6b Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Sat, 5 Apr 2025 11:21:44 +0200 Subject: [PATCH 0030/1307] fix (examples): LanguageModelV2 (#5562) --- examples/ai-core/src/stream-text/smooth-stream-chinese.ts | 4 ++-- examples/ai-core/src/stream-text/smooth-stream-japanese.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/ai-core/src/stream-text/smooth-stream-chinese.ts b/examples/ai-core/src/stream-text/smooth-stream-chinese.ts index 71b8d1e99cae..be0f86eb8ef8 100644 --- a/examples/ai-core/src/stream-text/smooth-stream-chinese.ts +++ b/examples/ai-core/src/stream-text/smooth-stream-chinese.ts @@ -1,9 +1,9 @@ import { simulateReadableStream, smoothStream, streamText } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; async function main() { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: simulateReadableStream({ chunks: [ diff --git a/examples/ai-core/src/stream-text/smooth-stream-japanese.ts b/examples/ai-core/src/stream-text/smooth-stream-japanese.ts index 5139d835be3b..272027e72e25 100644 --- a/examples/ai-core/src/stream-text/smooth-stream-japanese.ts +++ b/examples/ai-core/src/stream-text/smooth-stream-japanese.ts @@ -1,9 +1,9 @@ import { simulateReadableStream, smoothStream, streamText } from 'ai'; -import { MockLanguageModelV1 } from 'ai/test'; +import { MockLanguageModelV2 } from 'ai/test'; async function main() { const result = streamText({ - model: new MockLanguageModelV1({ + model: new MockLanguageModelV2({ doStream: async () => ({ stream: simulateReadableStream({ chunks: [ From 69f88e579b50bfb85976b9d760c67fa3ef63ff90 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 5 Apr 2025 11:35:45 +0200 Subject: [PATCH 0031/1307] Version Packages (canary) (#5537) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .changeset/pre.json | 9 +- examples/ai-core/package.json | 44 ++-- examples/express/package.json | 4 +- examples/fastify/package.json | 4 +- examples/hono/package.json | 4 +- examples/mcp/package.json | 4 +- examples/nest/package.json | 4 +- examples/next-fastapi/package.json | 6 +- examples/next-google-vertex/package.json | 4 +- examples/next-langchain/package.json | 4 +- .../package.json | 6 +- examples/next-openai-pages/package.json | 6 +- .../next-openai-telemetry-sentry/package.json | 6 +- examples/next-openai-telemetry/package.json | 6 +- .../package.json | 6 +- examples/next-openai/package.json | 20 +- examples/node-http-server/package.json | 4 +- examples/nuxt-openai/package.json | 6 +- examples/sveltekit-openai/package.json | 10 +- packages/ai/CHANGELOG.md | 16 ++ packages/ai/package.json | 6 +- .../ai/tests/e2e/next-server/CHANGELOG.md | 9 + packages/amazon-bedrock/CHANGELOG.md | 9 + packages/amazon-bedrock/package.json | 4 +- packages/anthropic/CHANGELOG.md | 9 + packages/anthropic/package.json | 4 +- packages/azure/CHANGELOG.md | 10 + packages/azure/package.json | 6 +- packages/cerebras/CHANGELOG.md | 10 + packages/cerebras/package.json | 6 +- packages/cohere/CHANGELOG.md | 9 + packages/cohere/package.json | 4 +- packages/deepinfra/CHANGELOG.md | 10 + packages/deepinfra/package.json | 6 +- packages/deepseek/CHANGELOG.md | 10 + packages/deepseek/package.json | 6 +- packages/fal/CHANGELOG.md | 9 + packages/fal/package.json | 4 +- packages/fireworks/CHANGELOG.md | 10 + packages/fireworks/package.json | 6 +- packages/google-vertex/CHANGELOG.md | 13 + packages/google-vertex/package.json | 8 +- packages/google/CHANGELOG.md | 10 + packages/google/package.json | 4 +- packages/groq/CHANGELOG.md | 9 + packages/groq/package.json | 4 +- packages/luma/CHANGELOG.md | 9 + packages/luma/package.json | 4 +- packages/mistral/CHANGELOG.md | 9 + packages/mistral/package.json | 4 +- packages/openai-compatible/CHANGELOG.md | 9 + packages/openai-compatible/package.json | 4 +- packages/openai/CHANGELOG.md | 9 + packages/openai/package.json | 4 +- packages/perplexity/CHANGELOG.md | 9 + packages/perplexity/package.json | 4 +- packages/provider-utils/CHANGELOG.md | 8 + packages/provider-utils/package.json | 2 +- packages/react/CHANGELOG.md | 10 + packages/react/package.json | 6 +- packages/replicate/CHANGELOG.md | 9 + packages/replicate/package.json | 4 +- packages/svelte/CHANGELOG.md | 10 + packages/svelte/package.json | 6 +- packages/togetherai/CHANGELOG.md | 10 + packages/togetherai/package.json | 6 +- packages/ui-utils/CHANGELOG.md | 10 + packages/ui-utils/package.json | 4 +- packages/valibot/CHANGELOG.md | 9 + packages/valibot/package.json | 4 +- packages/vue/CHANGELOG.md | 10 + packages/vue/package.json | 6 +- packages/xai/CHANGELOG.md | 10 + packages/xai/package.json | 6 +- pnpm-lock.yaml | 226 +++++++++--------- 75 files changed, 535 insertions(+), 254 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index 3a069d09c502..b6d9c9c49471 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -56,9 +56,16 @@ "generate-llms-txt": "0.0.0" }, "changesets": [ + "beige-ligers-kneel", + "clean-numbers-cover", "cuddly-icons-kick", + "eleven-lobsters-rescue", + "flat-plums-bake", "green-deers-scream", + "pink-deers-switch", + "seven-pens-itch", "silent-nails-taste", - "smooth-mirrors-kneel" + "smooth-mirrors-kneel", + "twelve-kids-travel" ] } diff --git a/examples/ai-core/package.json b/examples/ai-core/package.json index ff6b006ee995..912951386dc8 100644 --- a/examples/ai-core/package.json +++ b/examples/ai-core/package.json @@ -3,33 +3,33 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/amazon-bedrock": "3.0.0-canary.0", - "@ai-sdk/anthropic": "2.0.0-canary.0", - "@ai-sdk/azure": "2.0.0-canary.0", - "@ai-sdk/cerebras": "1.0.0-canary.0", - "@ai-sdk/cohere": "2.0.0-canary.0", - "@ai-sdk/deepinfra": "1.0.0-canary.0", - "@ai-sdk/deepseek": "1.0.0-canary.0", - "@ai-sdk/fal": "1.0.0-canary.0", - "@ai-sdk/fireworks": "1.0.0-canary.0", - "@ai-sdk/google": "2.0.0-canary.0", - "@ai-sdk/google-vertex": "3.0.0-canary.0", - "@ai-sdk/groq": "2.0.0-canary.0", - "@ai-sdk/luma": "1.0.0-canary.0", - "@ai-sdk/mistral": "2.0.0-canary.0", - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/openai-compatible": "1.0.0-canary.0", - "@ai-sdk/perplexity": "2.0.0-canary.0", + "@ai-sdk/amazon-bedrock": "3.0.0-canary.1", + "@ai-sdk/anthropic": "2.0.0-canary.1", + "@ai-sdk/azure": "2.0.0-canary.1", + "@ai-sdk/cerebras": "1.0.0-canary.1", + "@ai-sdk/cohere": "2.0.0-canary.1", + "@ai-sdk/deepinfra": "1.0.0-canary.1", + "@ai-sdk/deepseek": "1.0.0-canary.1", + "@ai-sdk/fal": "1.0.0-canary.1", + "@ai-sdk/fireworks": "1.0.0-canary.1", + "@ai-sdk/google": "2.0.0-canary.1", + "@ai-sdk/google-vertex": "3.0.0-canary.1", + "@ai-sdk/groq": "2.0.0-canary.1", + "@ai-sdk/luma": "1.0.0-canary.1", + "@ai-sdk/mistral": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", + "@ai-sdk/perplexity": "2.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/replicate": "1.0.0-canary.0", - "@ai-sdk/togetherai": "1.0.0-canary.0", - "@ai-sdk/xai": "2.0.0-canary.0", - "@ai-sdk/valibot": "1.0.0-canary.0", + "@ai-sdk/replicate": "1.0.0-canary.1", + "@ai-sdk/togetherai": "1.0.0-canary.1", + "@ai-sdk/xai": "2.0.0-canary.1", + "@ai-sdk/valibot": "1.0.0-canary.1", "@google/generative-ai": "0.21.0", "@opentelemetry/auto-instrumentations-node": "0.54.0", "@opentelemetry/sdk-node": "0.54.2", "@opentelemetry/sdk-trace-node": "1.28.0", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "dotenv": "16.4.5", "image-type": "^5.2.0", "mathjs": "14.0.0", diff --git a/examples/express/package.json b/examples/express/package.json index b90610da1b18..5e9a2b130849 100644 --- a/examples/express/package.json +++ b/examples/express/package.json @@ -7,8 +7,8 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "ai": "5.0.0-canary.1", "dotenv": "16.4.5", "express": "5.0.1" }, diff --git a/examples/fastify/package.json b/examples/fastify/package.json index 8e137218dde0..b6b36c68ff04 100644 --- a/examples/fastify/package.json +++ b/examples/fastify/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "ai": "5.0.0-canary.1", "dotenv": "16.4.5", "fastify": "5.1.0" }, diff --git a/examples/hono/package.json b/examples/hono/package.json index 2dc55889e0fd..3753abe22e2e 100644 --- a/examples/hono/package.json +++ b/examples/hono/package.json @@ -3,9 +3,9 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", "@hono/node-server": "1.13.7", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "dotenv": "16.4.5", "hono": "4.6.9" }, diff --git a/examples/mcp/package.json b/examples/mcp/package.json index c73449d76dc4..1783ad4d963d 100644 --- a/examples/mcp/package.json +++ b/examples/mcp/package.json @@ -12,9 +12,9 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", "@modelcontextprotocol/sdk": "^1.7.0", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "dotenv": "16.4.5", "express": "5.0.1", "zod": "3.23.8" diff --git a/examples/nest/package.json b/examples/nest/package.json index 6c4f85ca4f35..b29dd73b9310 100644 --- a/examples/nest/package.json +++ b/examples/nest/package.json @@ -15,11 +15,11 @@ "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", "@nestjs/common": "^10.4.15", "@nestjs/core": "^10.0.0", "@nestjs/platform-express": "^10.4.9", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1" }, diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index a6f061afb721..f88d4d2abc25 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -11,9 +11,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/ui-utils": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/ui-utils": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", + "ai": "5.0.0-canary.1", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-google-vertex/package.json b/examples/next-google-vertex/package.json index 37a9b405d834..95c46e4cb8c8 100644 --- a/examples/next-google-vertex/package.json +++ b/examples/next-google-vertex/package.json @@ -9,8 +9,8 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/google-vertex": "3.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/google-vertex": "3.0.0-canary.1", + "ai": "5.0.0-canary.1", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-langchain/package.json b/examples/next-langchain/package.json index 0b0cf59ee4f0..4ceb88db54d7 100644 --- a/examples/next-langchain/package.json +++ b/examples/next-langchain/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/react": "2.0.0-canary.1", "@langchain/openai": "0.0.28", "@langchain/core": "0.1.63", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "langchain": "0.1.36", "next": "latest", "react": "^18", diff --git a/examples/next-openai-kasada-bot-protection/package.json b/examples/next-openai-kasada-bot-protection/package.json index e5258f289576..dd4c889c7e4f 100644 --- a/examples/next-openai-kasada-bot-protection/package.json +++ b/examples/next-openai-kasada-bot-protection/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", "@vercel/functions": "latest", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai-pages/package.json b/examples/next-openai-pages/package.json index 1eae5e1f38ec..4195fa98e4d7 100644 --- a/examples/next-openai-pages/package.json +++ b/examples/next-openai-pages/package.json @@ -9,9 +9,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", + "ai": "5.0.0-canary.1", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry-sentry/package.json b/examples/next-openai-telemetry-sentry/package.json index cf2a6a800201..c63d6b9fd8bc 100644 --- a/examples/next-openai-telemetry-sentry/package.json +++ b/examples/next-openai-telemetry-sentry/package.json @@ -9,15 +9,15 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@opentelemetry/sdk-logs": "0.55.0", "@sentry/nextjs": "^8.42.0", "@sentry/opentelemetry": "8.22.0", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry/package.json b/examples/next-openai-telemetry/package.json index 5e1e221350c0..345b767efa62 100644 --- a/examples/next-openai-telemetry/package.json +++ b/examples/next-openai-telemetry/package.json @@ -9,13 +9,13 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/sdk-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-upstash-rate-limits/package.json b/examples/next-openai-upstash-rate-limits/package.json index 50cbf8010b11..a2cd2cda115e 100644 --- a/examples/next-openai-upstash-rate-limits/package.json +++ b/examples/next-openai-upstash-rate-limits/package.json @@ -9,11 +9,11 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", "@upstash/ratelimit": "^0.4.3", "@vercel/kv": "^0.2.2", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 30ba065323dc..4c514bf1f999 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -9,17 +9,17 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/anthropic": "2.0.0-canary.0", - "@ai-sdk/deepseek": "1.0.0-canary.0", - "@ai-sdk/fireworks": "1.0.0-canary.0", - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/google": "2.0.0-canary.0", - "@ai-sdk/google-vertex": "3.0.0-canary.0", - "@ai-sdk/perplexity": "2.0.0-canary.0", - "@ai-sdk/ui-utils": "2.0.0-canary.0", - "@ai-sdk/react": "2.0.0-canary.0", + "@ai-sdk/anthropic": "2.0.0-canary.1", + "@ai-sdk/deepseek": "1.0.0-canary.1", + "@ai-sdk/fireworks": "1.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/google": "2.0.0-canary.1", + "@ai-sdk/google-vertex": "3.0.0-canary.1", + "@ai-sdk/perplexity": "2.0.0-canary.1", + "@ai-sdk/ui-utils": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.1", "@vercel/blob": "^0.26.0", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/node-http-server/package.json b/examples/node-http-server/package.json index a1e9df859fe4..d4bbcd581cac 100644 --- a/examples/node-http-server/package.json +++ b/examples/node-http-server/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "ai": "5.0.0-canary.1", "dotenv": "16.4.5", "zod": "3.23.8", "zod-to-json-schema": "3.23.5" diff --git a/examples/nuxt-openai/package.json b/examples/nuxt-openai/package.json index 03b2908d6455..65a3b3449185 100644 --- a/examples/nuxt-openai/package.json +++ b/examples/nuxt-openai/package.json @@ -9,9 +9,9 @@ "postinstall": "nuxt prepare" }, "dependencies": { - "@ai-sdk/vue": "2.0.0-canary.0", - "@ai-sdk/openai": "2.0.0-canary.0", - "ai": "5.0.0-canary.0", + "@ai-sdk/vue": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.1", + "ai": "5.0.0-canary.1", "zod": "3.23.8" }, "devDependencies": { diff --git a/examples/sveltekit-openai/package.json b/examples/sveltekit-openai/package.json index b478d62aba0c..6292ddeb1e68 100644 --- a/examples/sveltekit-openai/package.json +++ b/examples/sveltekit-openai/package.json @@ -16,16 +16,16 @@ }, "type": "module", "devDependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0", - "@ai-sdk/svelte": "3.0.0-canary.0", - "@ai-sdk/ui-utils": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/svelte": "3.0.0-canary.1", + "@ai-sdk/ui-utils": "2.0.0-canary.1", "@eslint/compat": "^1.2.5", "@eslint/js": "^9.18.0", "@sveltejs/adapter-vercel": "^5.5.2", "@sveltejs/kit": "^2.16.0", "@sveltejs/vite-plugin-svelte": "^5.0.0", - "ai": "5.0.0-canary.0", + "ai": "5.0.0-canary.1", "autoprefixer": "^10.4.20", "bits-ui": "^1.3.9", "clsx": "^2.1.1", diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 57feb8b448a1..cb9416ef3f18 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -1,5 +1,21 @@ # ai +## 5.0.0-canary.1 + +### Minor Changes + +- b7eae2d: feat (core): Add finishReason field to NoObjectGeneratedError + +### Patch Changes + +- c22ad54: feat(smooth-stream): chunking callbacks +- a4f3007: chore: remove ai/react +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/ui-utils@2.0.0-canary.1 + ## 5.0.0-canary.0 ### Major Changes diff --git a/packages/ai/package.json b/packages/ai/package.json index 0b342f7f7d43..707c6b8da40e 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "ai", - "version": "5.0.0-canary.0", + "version": "5.0.0-canary.1", "description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript", "license": "Apache-2.0", "sideEffects": false, @@ -60,8 +60,8 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0", - "@ai-sdk/ui-utils": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/ui-utils": "2.0.0-canary.1", "@opentelemetry/api": "1.9.0", "jsondiffpatch": "0.6.0" }, diff --git a/packages/ai/tests/e2e/next-server/CHANGELOG.md b/packages/ai/tests/e2e/next-server/CHANGELOG.md index 0033698732f5..a589a799538a 100644 --- a/packages/ai/tests/e2e/next-server/CHANGELOG.md +++ b/packages/ai/tests/e2e/next-server/CHANGELOG.md @@ -4,6 +4,15 @@ ### Patch Changes +- Updated dependencies [c22ad54] +- Updated dependencies [b7eae2d] +- Updated dependencies [a4f3007] + - ai@5.0.0-canary.1 + +## 0.0.1-canary.0 + +### Patch Changes + - Updated dependencies [d5f588f] - Updated dependencies [9477ebb] - Updated dependencies [8026705] diff --git a/packages/amazon-bedrock/CHANGELOG.md b/packages/amazon-bedrock/CHANGELOG.md index 8813ddc85b83..97f4f8cbd3c4 100644 --- a/packages/amazon-bedrock/CHANGELOG.md +++ b/packages/amazon-bedrock/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/amazon-bedrock +## 3.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 3.0.0-canary.0 ### Major Changes diff --git a/packages/amazon-bedrock/package.json b/packages/amazon-bedrock/package.json index cbed052d4462..90db80c87596 100644 --- a/packages/amazon-bedrock/package.json +++ b/packages/amazon-bedrock/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/amazon-bedrock", - "version": "3.0.0-canary.0", + "version": "3.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,7 +31,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.1", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" diff --git a/packages/anthropic/CHANGELOG.md b/packages/anthropic/CHANGELOG.md index ebcb9c88bff8..d8291776725b 100644 --- a/packages/anthropic/CHANGELOG.md +++ b/packages/anthropic/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/anthropic +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/anthropic/package.json b/packages/anthropic/package.json index abd8b4ecbc17..ffe6ae31bb65 100644 --- a/packages/anthropic/package.json +++ b/packages/anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/anthropic", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,7 +38,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index db0b22deb542..12b1a3d4ed0f 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/azure +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai@2.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index 9b1a23387535..92fc41b5fe02 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,9 +31,9 @@ } }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.0", + "@ai-sdk/openai": "2.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cerebras/CHANGELOG.md b/packages/cerebras/CHANGELOG.md index 07ea5234e859..9ad0d71029f7 100644 --- a/packages/cerebras/CHANGELOG.md +++ b/packages/cerebras/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/cerebras +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/cerebras/package.json b/packages/cerebras/package.json index 3ab7366ca219..af5418c4f904 100644 --- a/packages/cerebras/package.json +++ b/packages/cerebras/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cerebras", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cohere/CHANGELOG.md b/packages/cohere/CHANGELOG.md index 8585748aaed8..73c315a548fd 100644 --- a/packages/cohere/CHANGELOG.md +++ b/packages/cohere/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/cohere +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/cohere/package.json b/packages/cohere/package.json index 947929c5dfca..709179bbf3a8 100644 --- a/packages/cohere/package.json +++ b/packages/cohere/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cohere", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -32,7 +32,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepinfra/CHANGELOG.md b/packages/deepinfra/CHANGELOG.md index fb4d7a390f6e..c6eb5426ebf2 100644 --- a/packages/deepinfra/CHANGELOG.md +++ b/packages/deepinfra/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/deepinfra +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/deepinfra/package.json b/packages/deepinfra/package.json index cf25cf3e27c6..729ccf146a10 100644 --- a/packages/deepinfra/package.json +++ b/packages/deepinfra/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepinfra", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepseek/CHANGELOG.md b/packages/deepseek/CHANGELOG.md index b06004773799..c288796d44a3 100644 --- a/packages/deepseek/CHANGELOG.md +++ b/packages/deepseek/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/deepseek +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/deepseek/package.json b/packages/deepseek/package.json index 74dc804436a2..a3934ecce999 100644 --- a/packages/deepseek/package.json +++ b/packages/deepseek/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepseek", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fal/CHANGELOG.md b/packages/fal/CHANGELOG.md index 5683f4faab4a..e20646e82b40 100644 --- a/packages/fal/CHANGELOG.md +++ b/packages/fal/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/fal +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/fal/package.json b/packages/fal/package.json index 66b0f77da1c7..3f8ed292c82f 100644 --- a/packages/fal/package.json +++ b/packages/fal/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fal", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,7 +31,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fireworks/CHANGELOG.md b/packages/fireworks/CHANGELOG.md index 15cf7cdffa18..85cbb7aff9f4 100644 --- a/packages/fireworks/CHANGELOG.md +++ b/packages/fireworks/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/fireworks +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/fireworks/package.json b/packages/fireworks/package.json index b7626e554d3a..728e7b6673c4 100644 --- a/packages/fireworks/package.json +++ b/packages/fireworks/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fireworks", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/google-vertex/CHANGELOG.md b/packages/google-vertex/CHANGELOG.md index 075e29612fc0..9b91181f7660 100644 --- a/packages/google-vertex/CHANGELOG.md +++ b/packages/google-vertex/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/google-vertex +## 3.0.0-canary.1 + +### Patch Changes + +- 779d916: feat: add provider option schemas for vertex imagegen and google genai +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [779d916] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/google@2.0.0-canary.1 + - @ai-sdk/anthropic@2.0.0-canary.1 + ## 3.0.0-canary.0 ### Major Changes diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index bf84d39f2782..4f065070082b 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google-vertex", - "version": "3.0.0-canary.0", + "version": "3.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -49,10 +49,10 @@ } }, "dependencies": { - "@ai-sdk/anthropic": "2.0.0-canary.0", - "@ai-sdk/google": "2.0.0-canary.0", + "@ai-sdk/anthropic": "2.0.0-canary.1", + "@ai-sdk/google": "2.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.1", "google-auth-library": "^9.15.0" }, "devDependencies": { diff --git a/packages/google/CHANGELOG.md b/packages/google/CHANGELOG.md index 059921036f0a..dcdfe76e8fae 100644 --- a/packages/google/CHANGELOG.md +++ b/packages/google/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/google +## 2.0.0-canary.1 + +### Patch Changes + +- 779d916: feat: add provider option schemas for vertex imagegen and google genai +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/google/package.json b/packages/google/package.json index b77ba4b124a9..6a04682cb5c3 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -39,7 +39,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/groq/CHANGELOG.md b/packages/groq/CHANGELOG.md index 4274cb7d8cf1..648ca563ce8c 100644 --- a/packages/groq/CHANGELOG.md +++ b/packages/groq/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/groq +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/groq/package.json b/packages/groq/package.json index 312ae518efd7..1af250006c95 100644 --- a/packages/groq/package.json +++ b/packages/groq/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/groq", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -32,7 +32,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/luma/CHANGELOG.md b/packages/luma/CHANGELOG.md index e35bcbe886da..542fb0bad325 100644 --- a/packages/luma/CHANGELOG.md +++ b/packages/luma/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/luma +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/luma/package.json b/packages/luma/package.json index f0fefe0e3ecd..0f808711c89a 100644 --- a/packages/luma/package.json +++ b/packages/luma/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/luma", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,7 +31,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/mistral/CHANGELOG.md b/packages/mistral/CHANGELOG.md index 946965a5237a..345d6733e7c3 100644 --- a/packages/mistral/CHANGELOG.md +++ b/packages/mistral/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/mistral +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/mistral/package.json b/packages/mistral/package.json index 28390f995dff..ce0861769338 100644 --- a/packages/mistral/package.json +++ b/packages/mistral/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/mistral", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -32,7 +32,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai-compatible/CHANGELOG.md b/packages/openai-compatible/CHANGELOG.md index ae846be5a219..bbb3eed81019 100644 --- a/packages/openai-compatible/CHANGELOG.md +++ b/packages/openai-compatible/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/openai-compatible +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/openai-compatible/package.json b/packages/openai-compatible/package.json index 09e4a0410967..e2869bf97843 100644 --- a/packages/openai-compatible/package.json +++ b/packages/openai-compatible/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai-compatible", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -39,7 +39,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai/CHANGELOG.md b/packages/openai/CHANGELOG.md index 3d949a7c9b92..afd5f37f537d 100644 --- a/packages/openai/CHANGELOG.md +++ b/packages/openai/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/openai +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/openai/package.json b/packages/openai/package.json index ed76d103b09c..c710358479a8 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -39,7 +39,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/perplexity/CHANGELOG.md b/packages/perplexity/CHANGELOG.md index 766ae3773e4d..152560f423bd 100644 --- a/packages/perplexity/CHANGELOG.md +++ b/packages/perplexity/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/perplexity +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/perplexity/package.json b/packages/perplexity/package.json index 1ebbf3c199a8..f5c8fbbe096a 100644 --- a/packages/perplexity/package.json +++ b/packages/perplexity/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/perplexity", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,7 +31,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/provider-utils/CHANGELOG.md b/packages/provider-utils/CHANGELOG.md index a11b1c1788fd..9a1b040baccf 100644 --- a/packages/provider-utils/CHANGELOG.md +++ b/packages/provider-utils/CHANGELOG.md @@ -1,5 +1,13 @@ # @ai-sdk/provider-utils +## 3.0.0-canary.1 + +### Patch Changes + +- 060370c: feat(provider-utils): add TestServerCall#requestCredentials +- 0c0c0b3: refactor (provider-utils): move `customAlphabet()` method from `nanoid` into codebase +- 63d791d: chore (utils): remove unused test helpers + ## 3.0.0-canary.0 ### Major Changes diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index 2c5efde10370..c8f603c576a9 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider-utils", - "version": "3.0.0-canary.0", + "version": "3.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/react/CHANGELOG.md b/packages/react/CHANGELOG.md index 34425d33b860..5231598e92ea 100644 --- a/packages/react/CHANGELOG.md +++ b/packages/react/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/react +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/ui-utils@2.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/react/package.json b/packages/react/package.json index aeff131688fa..7011b36c5a42 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/react", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.0", - "@ai-sdk/ui-utils": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/ui-utils": "2.0.0-canary.1", "swr": "^2.2.5", "throttleit": "2.1.0" }, diff --git a/packages/replicate/CHANGELOG.md b/packages/replicate/CHANGELOG.md index 4576643441cd..460450d6c028 100644 --- a/packages/replicate/CHANGELOG.md +++ b/packages/replicate/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/replicate +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/replicate/package.json b/packages/replicate/package.json index 2c02e51c7659..20063125d09d 100644 --- a/packages/replicate/package.json +++ b/packages/replicate/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/replicate", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,7 +31,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/svelte/CHANGELOG.md b/packages/svelte/CHANGELOG.md index 2955dd330a02..615d33c3f250 100644 --- a/packages/svelte/CHANGELOG.md +++ b/packages/svelte/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/svelte +## 3.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/ui-utils@2.0.0-canary.1 + ## 3.0.0-canary.0 ### Major Changes diff --git a/packages/svelte/package.json b/packages/svelte/package.json index a22860d8edf1..10b2a0d07db5 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/svelte", - "version": "3.0.0-canary.0", + "version": "3.0.0-canary.1", "license": "Apache-2.0", "scripts": { "build": "pnpm prepack", @@ -51,8 +51,8 @@ } }, "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.0", - "@ai-sdk/ui-utils": "2.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/ui-utils": "2.0.0-canary.1" }, "devDependencies": { "@eslint/compat": "^1.2.5", diff --git a/packages/togetherai/CHANGELOG.md b/packages/togetherai/CHANGELOG.md index 9c3fe0531343..c163d5bce55e 100644 --- a/packages/togetherai/CHANGELOG.md +++ b/packages/togetherai/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/togetherai +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/togetherai/package.json b/packages/togetherai/package.json index b10ff4a117c7..d8afb23fc753 100644 --- a/packages/togetherai/package.json +++ b/packages/togetherai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/togetherai", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/ui-utils/CHANGELOG.md b/packages/ui-utils/CHANGELOG.md index b9a9b8bc3ba9..c3b6a5de862b 100644 --- a/packages/ui-utils/CHANGELOG.md +++ b/packages/ui-utils/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/ui-utils +## 2.0.0-canary.1 + +### Patch Changes + +- 63d791d: chore (utils): remove unused test helpers +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index 4579503ea454..965046b1fbf7 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/ui-utils", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -32,7 +32,7 @@ }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.1", "zod-to-json-schema": "^3.24.1" }, "devDependencies": { diff --git a/packages/valibot/CHANGELOG.md b/packages/valibot/CHANGELOG.md index bba2e8bae06f..3ddbd465638d 100644 --- a/packages/valibot/CHANGELOG.md +++ b/packages/valibot/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/valibot +## 1.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [c22ad54] +- Updated dependencies [b7eae2d] +- Updated dependencies [a4f3007] + - ai@5.0.0-canary.1 + ## 1.0.0-canary.0 ### Major Changes diff --git a/packages/valibot/package.json b/packages/valibot/package.json index d9c964928f15..954187cd2731 100644 --- a/packages/valibot/package.json +++ b/packages/valibot/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/valibot", - "version": "1.0.0-canary.0", + "version": "1.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -27,7 +27,7 @@ } }, "dependencies": { - "ai": "5.0.0-canary.0" + "ai": "5.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/vue/CHANGELOG.md b/packages/vue/CHANGELOG.md index 8039ef331024..fa8bad444e0b 100644 --- a/packages/vue/CHANGELOG.md +++ b/packages/vue/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/vue +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/ui-utils@2.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/vue/package.json b/packages/vue/package.json index 2891e060ed66..c4dbb5f2e1a4 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/vue", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.0", - "@ai-sdk/ui-utils": "2.0.0-canary.0", + "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/ui-utils": "2.0.0-canary.1", "swrv": "^1.0.4" }, "devDependencies": { diff --git a/packages/xai/CHANGELOG.md b/packages/xai/CHANGELOG.md index 50074b1f2b6f..f2a9fec66b76 100644 --- a/packages/xai/CHANGELOG.md +++ b/packages/xai/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/xai +## 2.0.0-canary.1 + +### Patch Changes + +- Updated dependencies [060370c] +- Updated dependencies [0c0c0b3] +- Updated dependencies [63d791d] + - @ai-sdk/provider-utils@3.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.1 + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/xai/package.json b/packages/xai/package.json index c38ebffe4bf0..505eceec111a 100644 --- a/packages/xai/package.json +++ b/packages/xai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/xai", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.0", + "@ai-sdk/openai-compatible": "1.0.0-canary.1", "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.0" + "@ai-sdk/provider-utils": "3.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c5dd76526282..fa601e6e694d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,70 +60,70 @@ importers: examples/ai-core: dependencies: '@ai-sdk/amazon-bedrock': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../../packages/amazon-bedrock '@ai-sdk/anthropic': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/anthropic '@ai-sdk/azure': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/azure '@ai-sdk/cerebras': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/cerebras '@ai-sdk/cohere': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/cohere '@ai-sdk/deepinfra': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/deepinfra '@ai-sdk/deepseek': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/deepseek '@ai-sdk/fal': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/fal '@ai-sdk/fireworks': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../../packages/google-vertex '@ai-sdk/groq': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/groq '@ai-sdk/luma': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/luma '@ai-sdk/mistral': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/mistral '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/openai-compatible '@ai-sdk/perplexity': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/perplexity '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../../packages/provider '@ai-sdk/replicate': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/replicate '@ai-sdk/togetherai': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/togetherai '@ai-sdk/valibot': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/valibot '@ai-sdk/xai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/xai '@google/generative-ai': specifier: 0.21.0 @@ -138,7 +138,7 @@ importers: specifier: 1.28.0 version: 1.28.0(@opentelemetry/api@1.9.0) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -178,10 +178,10 @@ importers: examples/express: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -206,10 +206,10 @@ importers: examples/fastify: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -231,13 +231,13 @@ importers: examples/hono: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@hono/node-server': specifier: 1.13.7 version: 1.13.7(hono@4.6.9) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -259,13 +259,13 @@ importers: examples/mcp: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@modelcontextprotocol/sdk': specifier: ^1.7.0 version: 1.7.0 ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -293,7 +293,7 @@ importers: examples/nest: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@nestjs/common': specifier: ^10.4.15 @@ -305,7 +305,7 @@ importers: specifier: ^10.4.9 version: 10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai reflect-metadata: specifier: ^0.2.0 @@ -381,13 +381,13 @@ importers: examples/next-fastapi: dependencies: '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/ui-utils ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -436,10 +436,10 @@ importers: examples/next-google-vertex: dependencies: '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../../packages/google-vertex ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -479,7 +479,7 @@ importers: examples/next-langchain: dependencies: '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@langchain/core': specifier: 0.1.63 @@ -488,7 +488,7 @@ importers: specifier: 0.0.28 version: 0.0.28 ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai langchain: specifier: 0.1.36 @@ -534,37 +534,37 @@ importers: examples/next-openai: dependencies: '@ai-sdk/anthropic': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/anthropic '@ai-sdk/deepseek': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/deepseek '@ai-sdk/fireworks': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../../packages/google-vertex '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/perplexity': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/perplexity '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/ui-utils '@vercel/blob': specifier: ^0.26.0 version: 0.26.0 ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai next: specifier: latest @@ -616,16 +616,16 @@ importers: examples/next-openai-kasada-bot-protection: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@vercel/functions': specifier: latest version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0)) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai next: specifier: latest @@ -671,13 +671,13 @@ importers: examples/next-openai-pages: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai next: specifier: latest @@ -726,10 +726,10 @@ importers: examples/next-openai-telemetry: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -744,7 +744,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.29.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai next: specifier: latest @@ -793,10 +793,10 @@ importers: examples/next-openai-telemetry-sentry: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -817,7 +817,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.28.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai next: specifier: latest @@ -866,10 +866,10 @@ importers: examples/next-openai-upstash-rate-limits: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/react '@upstash/ratelimit': specifier: ^0.4.3 @@ -878,7 +878,7 @@ importers: specifier: ^0.2.2 version: 0.2.4 ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai next: specifier: latest @@ -924,10 +924,10 @@ importers: examples/node-http-server: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -952,13 +952,13 @@ importers: examples/nuxt-openai: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/vue': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/vue ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai zod: specifier: 3.23.8 @@ -1007,16 +1007,16 @@ importers: examples/sveltekit-openai: devDependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/openai '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../../packages/provider-utils '@ai-sdk/svelte': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../../packages/svelte '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/ui-utils '@eslint/compat': specifier: ^1.2.5 @@ -1034,7 +1034,7 @@ importers: specifier: ^5.0.0 version: 5.0.3(svelte@5.22.4)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../../packages/ai autoprefixer: specifier: ^10.4.20 @@ -1094,10 +1094,10 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../ui-utils '@opentelemetry/api': specifier: 1.9.0 @@ -1170,7 +1170,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils '@smithy/eventstream-codec': specifier: ^4.0.1 @@ -1204,7 +1204,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1226,13 +1226,13 @@ importers: packages/azure: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../openai '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1254,13 +1254,13 @@ importers: packages/cerebras: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../openai-compatible '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1334,7 +1334,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1356,13 +1356,13 @@ importers: packages/deepinfra: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../openai-compatible '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1384,13 +1384,13 @@ importers: packages/deepseek: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../openai-compatible '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1415,7 +1415,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1437,13 +1437,13 @@ importers: packages/fireworks: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../openai-compatible '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1468,7 +1468,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1490,16 +1490,16 @@ importers: packages/google-vertex: dependencies: '@ai-sdk/anthropic': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../anthropic '@ai-sdk/google': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../google '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils google-auth-library: specifier: ^9.15.0 @@ -1527,7 +1527,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1552,7 +1552,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1577,7 +1577,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1602,7 +1602,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1627,7 +1627,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1652,7 +1652,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1724,10 +1724,10 @@ importers: packages/react: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../ui-utils react: specifier: ^18 || ^19 || ^19.0.0-rc @@ -1791,7 +1791,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1813,10 +1813,10 @@ importers: packages/svelte: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../ui-utils devDependencies: '@eslint/compat': @@ -1877,13 +1877,13 @@ importers: packages/togetherai: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../openai-compatible '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': @@ -1908,7 +1908,7 @@ importers: specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils zod-to-json-schema: specifier: ^3.24.1 @@ -1942,7 +1942,7 @@ importers: specifier: ^1.0.0-rc.0 || ^1.0.0 version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.6.3)) ai: - specifier: 5.0.0-canary.0 + specifier: 5.0.0-canary.1 version: link:../ai devDependencies: '@types/node': @@ -1964,10 +1964,10 @@ importers: packages/vue: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../ui-utils swrv: specifier: ^1.0.4 @@ -2019,13 +2019,13 @@ importers: packages/xai: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.0 + specifier: 1.0.0-canary.1 version: link:../openai-compatible '@ai-sdk/provider': specifier: 2.0.0-canary.0 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.0 + specifier: 3.0.0-canary.1 version: link:../provider-utils devDependencies: '@types/node': From bd398e4bba42bd76f886db0078f3218c0cb6c3ad Mon Sep 17 00:00:00 2001 From: Grace Yun <74513600+iteratetograceness@users.noreply.github.com> Date: Sat, 5 Apr 2025 15:43:17 -0400 Subject: [PATCH 0032/1307] fix (core): consume stream on abort (#5492) --- .changeset/thin-numbers-shave.md | 5 ++ .../01-ai-sdk-core/02-stream-text.mdx | 19 +++- .../use-chat-resilient-persistence/route.ts | 9 +- .../[id]/chat.tsx | 26 ++++-- .../core/generate-text/stream-text-result.ts | 8 +- .../ai/core/generate-text/stream-text.test.ts | 86 +++++++++++++++++++ packages/ai/core/generate-text/stream-text.ts | 14 ++- packages/ai/util/consume-stream.ts | 20 ++++- 8 files changed, 165 insertions(+), 22 deletions(-) create mode 100644 .changeset/thin-numbers-shave.md diff --git a/.changeset/thin-numbers-shave.md b/.changeset/thin-numbers-shave.md new file mode 100644 index 000000000000..e57732de3101 --- /dev/null +++ b/.changeset/thin-numbers-shave.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (core): improve error handling in streamText's consumeStream method diff --git a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx index 450f77c9e619..8b1215e5aba1 100644 --- a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx @@ -2274,13 +2274,26 @@ To see `streamText` in action, check out [these examples](#examples). }, { name: 'consumeStream', - type: '() => Promise', + type: '(options?: ConsumeStreamOptions) => Promise', description: - 'Consumes the stream without processing the parts. This is useful to force the stream to finish.', + 'Consumes the stream without processing the parts. This is useful to force the stream to finish. If an error occurs, it is passed to the optional `onError` callback.', + properties: [ + { + type: 'ConsumeStreamOptions', + parameters: [ + { + name: 'onError', + type: '(error: unknown) => void', + isOptional: true, + description: 'The error callback.', + }, + ], + }, + ], }, { name: 'pipeDataStreamToResponse', - type: '(response: ServerResponse, options: PipeDataStreamToResponseOptions } => void', + type: '(response: ServerResponse, options: PipeDataStreamToResponseOptions) => void', description: 'Writes stream data output to a Node.js response-like object. It sets a `Content-Type` header to `text/plain; charset=utf-8` and writes each stream data part as a separate chunk.', properties: [ diff --git a/examples/next-openai/app/api/use-chat-resilient-persistence/route.ts b/examples/next-openai/app/api/use-chat-resilient-persistence/route.ts index 92a72e6fcaad..7665e5adbcce 100644 --- a/examples/next-openai/app/api/use-chat-resilient-persistence/route.ts +++ b/examples/next-openai/app/api/use-chat-resilient-persistence/route.ts @@ -1,6 +1,6 @@ import { openai } from '@ai-sdk/openai'; -import { appendResponseMessages, createIdGenerator, streamText } from 'ai'; import { saveChat } from '@util/chat-store'; +import { appendResponseMessages, createIdGenerator, streamText } from 'ai'; export async function POST(req: Request) { const { messages, id } = await req.json(); @@ -26,7 +26,12 @@ export async function POST(req: Request) { // consume the stream to ensure it runs to completion and triggers onFinish // even when the client response is aborted (e.g. when the browser tab is closed). - result.consumeStream(); // no await + // no await + result.consumeStream({ + onError: error => { + console.log('Error during background stream consumption: ', error); // optional error callback + }, + }); return result.toDataStreamResponse(); } diff --git a/examples/next-openai/app/use-chat-resilient-persistence/[id]/chat.tsx b/examples/next-openai/app/use-chat-resilient-persistence/[id]/chat.tsx index 90428815a508..c589443d864f 100644 --- a/examples/next-openai/app/use-chat-resilient-persistence/[id]/chat.tsx +++ b/examples/next-openai/app/use-chat-resilient-persistence/[id]/chat.tsx @@ -1,19 +1,20 @@ 'use client'; -import { createIdGenerator } from 'ai'; import { Message, useChat } from '@ai-sdk/react'; +import { createIdGenerator } from 'ai'; export default function Chat({ id, initialMessages, }: { id?: string | undefined; initialMessages?: Message[] } = {}) { - const { input, status, handleInputChange, handleSubmit, messages } = useChat({ - api: '/api/use-chat-resilient-persistence', - id, // use the provided chatId - initialMessages, // initial messages if provided - sendExtraMessageFields: true, // send id and createdAt for each message - generateId: createIdGenerator({ prefix: 'msgc', size: 16 }), // id format for client-side messages - }); + const { input, status, handleInputChange, handleSubmit, messages, stop } = + useChat({ + api: '/api/use-chat-resilient-persistence', + id, // use the provided chatId + initialMessages, // initial messages if provided + sendExtraMessageFields: true, // send id and createdAt for each message + generateId: createIdGenerator({ prefix: 'msgc', size: 16 }), // id format for client-side messages + }); return (
@@ -32,6 +33,15 @@ export default function Chat({ onChange={handleInputChange} disabled={status !== 'ready'} /> + {status === 'streaming' && ( + + )}
); diff --git a/packages/ai/core/generate-text/stream-text-result.ts b/packages/ai/core/generate-text/stream-text-result.ts index 3dde6d3b765e..181518dac5d2 100644 --- a/packages/ai/core/generate-text/stream-text-result.ts +++ b/packages/ai/core/generate-text/stream-text-result.ts @@ -61,6 +61,10 @@ export type DataStreamOptions = { experimental_sendStart?: boolean; }; +export type ConsumeStreamOptions = { + onError?: (error: unknown) => void; +}; + /** A result object for accessing different stream types and additional information. */ @@ -203,8 +207,10 @@ Consumes the stream without processing the parts. This is useful to force the stream to finish. It effectively removes the backpressure and allows the stream to finish, triggering the `onFinish` callback and the promise resolution. + +If an error occurs, it is passed to the optional `onError` callback. */ - consumeStream(): Promise; + consumeStream(options?: ConsumeStreamOptions): Promise; /** Converts the result to a data stream. diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index 00de777bf73c..52f621010968 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -1542,6 +1542,92 @@ describe('streamText', () => { }); }); + describe('result.consumeStream', () => { + it('should ignore AbortError during stream consumption', async () => { + const result = streamText({ + model: createTestModel({ + stream: new ReadableStream({ + start(controller) { + controller.enqueue({ type: 'text-delta', textDelta: 'Hello' }); + queueMicrotask(() => { + controller.error( + Object.assign(new Error('Stream aborted'), { + name: 'AbortError', + }), + ); + }); + }, + }), + }), + prompt: 'test-input', + }); + + await expect(result.consumeStream()).resolves.not.toThrow(); + }); + + it('should ignore ResponseAborted error during stream consumption', async () => { + const result = streamText({ + model: createTestModel({ + stream: new ReadableStream({ + start(controller) { + controller.enqueue({ type: 'text-delta', textDelta: 'Hello' }); + queueMicrotask(() => { + controller.error( + Object.assign(new Error('Response aborted'), { + name: 'ResponseAborted', + }), + ); + }); + }, + }), + }), + prompt: 'test-input', + }); + + await expect(result.consumeStream()).resolves.not.toThrow(); + }); + + it('should ignore any errors during stream consumption', async () => { + const result = streamText({ + model: createTestModel({ + stream: new ReadableStream({ + start(controller) { + controller.enqueue({ type: 'text-delta', textDelta: 'Hello' }); + queueMicrotask(() => { + controller.error(Object.assign(new Error('Some error'))); + }); + }, + }), + }), + prompt: 'test-input', + }); + + await expect(result.consumeStream()).resolves.not.toThrow(); + }); + + it('should call the onError callback with the error', async () => { + const onErrorCallback = vi.fn(); + const result = streamText({ + model: createTestModel({ + stream: new ReadableStream({ + start(controller) { + controller.enqueue({ type: 'text-delta', textDelta: 'Hello' }); + queueMicrotask(() => { + controller.error(Object.assign(new Error('Some error'))); + }); + }, + }), + }), + prompt: 'test-input', + }); + + await expect( + result.consumeStream({ onError: onErrorCallback }), + ).resolves.not.toThrow(); + expect(onErrorCallback).toHaveBeenCalledWith(new Error('Some error')); + }); + }); + describe('multiple stream consumption', () => { it('should support text stream, ai stream, full stream on single result object', async () => { const result = streamText({ diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index 673d1b69f1b3..d918e7a1bc88 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -8,6 +8,7 @@ import { InvalidStreamPartError } from '../../errors/invalid-stream-part-error'; import { NoOutputSpecifiedError } from '../../errors/no-output-specified-error'; import { StreamData } from '../../streams/stream-data'; import { asArray } from '../../util/as-array'; +import { consumeStream } from '../../util/consume-stream'; import { DelayedPromise } from '../../util/delayed-promise'; import { DataStreamWriter } from '../data-stream/data-stream-writer'; import { CallSettings } from '../prompt/call-settings'; @@ -53,6 +54,7 @@ import { } from './run-tools-transformation'; import { ResponseMessage, StepResult } from './step-result'; import { + ConsumeStreamOptions, DataStreamOptions, StreamTextResult, TextStreamPart, @@ -1591,10 +1593,14 @@ However, the LLM results are expected to be small enough to not cause issues. ); } - async consumeStream(): Promise { - const stream = this.fullStream; - for await (const part of stream) { - // no op + async consumeStream(options?: ConsumeStreamOptions): Promise { + try { + await consumeStream({ + stream: this.fullStream, + onError: options?.onError, + }); + } catch (error) { + options?.onError?.(error); } } diff --git a/packages/ai/util/consume-stream.ts b/packages/ai/util/consume-stream.ts index b38ab2fbb16d..c540696d72e3 100644 --- a/packages/ai/util/consume-stream.ts +++ b/packages/ai/util/consume-stream.ts @@ -8,10 +8,22 @@ * @param {ReadableStream} stream - The ReadableStream to be consumed. * @returns {Promise} A promise that resolves when the stream is fully consumed. */ -export async function consumeStream(stream: ReadableStream): Promise { +export async function consumeStream({ + stream, + onError, +}: { + stream: ReadableStream; + onError?: (error: unknown) => void; +}): Promise { const reader = stream.getReader(); - while (true) { - const { done } = await reader.read(); - if (done) break; + try { + while (true) { + const { done } = await reader.read(); + if (done) break; + } + } catch (error) { + onError?.(error); + } finally { + reader.releaseLock(); } } From 67b9604c738d2bd959898eb4082aad05fe7932e3 Mon Sep 17 00:00:00 2001 From: Walter Korman Date: Sat, 5 Apr 2025 13:59:12 -0700 Subject: [PATCH 0033/1307] feat (provider/groq): add llama 4 model (#5565) --- .changeset/tall-rice-flash.md | 5 ++ .../02-providers-and-models.mdx | 75 ++++++++++--------- .../providers/01-ai-sdk-providers/09-groq.mdx | 29 +++---- .../providers/01-ai-sdk-providers/index.mdx | 1 + packages/groq/src/groq-chat-settings.ts | 1 + 5 files changed, 60 insertions(+), 51 deletions(-) create mode 100644 .changeset/tall-rice-flash.md diff --git a/.changeset/tall-rice-flash.md b/.changeset/tall-rice-flash.md new file mode 100644 index 000000000000..20c49b454ede --- /dev/null +++ b/.changeset/tall-rice-flash.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/groq': patch +--- + +feat (provider/groq): add llama 4 model diff --git a/content/docs/02-foundations/02-providers-and-models.mdx b/content/docs/02-foundations/02-providers-and-models.mdx index a1902d339608..fcafe7cf6a2f 100644 --- a/content/docs/02-foundations/02-providers-and-models.mdx +++ b/content/docs/02-foundations/02-providers-and-models.mdx @@ -79,43 +79,44 @@ Additionally, any self-hosted provider that supports the OpenAI specification ca The AI providers support different language models with various capabilities. Here are the capabilities of popular models: -| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming | -| ------------------------------------------------------------------------ | ---------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | | -| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | | -| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-70b` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | | +| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming | +| ------------------------------------------------------------------------ | ------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | | +| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | | +| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-70b` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `meta-llama/llama-4-scout-17b-16e-instruct` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | | This table is not exhaustive. Additional models can be found in the provider diff --git a/content/providers/01-ai-sdk-providers/09-groq.mdx b/content/providers/01-ai-sdk-providers/09-groq.mdx index 96049593f133..6c8cc1c0e75a 100644 --- a/content/providers/01-ai-sdk-providers/09-groq.mdx +++ b/content/providers/01-ai-sdk-providers/09-groq.mdx @@ -112,20 +112,21 @@ const { text } = await generateText({ ## Model Capabilities -| Model | Image Input | Object Generation | Tool Usage | Tool Streaming | -| ------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `gemma2-9b-it` | | | | | -| `llama-3.3-70b-versatile` | | | | | -| `llama-3.1-8b-instant` | | | | | -| `llama-guard-3-8b` | | | | | -| `llama3-70b-8192` | | | | | -| `llama3-8b-8192` | | | | | -| `mixtral-8x7b-32768` | | | | | -| `qwen-qwq-32b` | | | | | -| `mistral-saba-24b` | | | | | -| `qwen-2.5-32b` | | | | | -| `deepseek-r1-distill-qwen-32b` | | | | | -| `deepseek-r1-distill-llama-70b` | | | | | +| Model | Image Input | Object Generation | Tool Usage | Tool Streaming | +| ------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `meta-llama/llama-4-scout-17b-16e-instruct` | | | | | +| `gemma2-9b-it` | | | | | +| `llama-3.3-70b-versatile` | | | | | +| `llama-3.1-8b-instant` | | | | | +| `llama-guard-3-8b` | | | | | +| `llama3-70b-8192` | | | | | +| `llama3-8b-8192` | | | | | +| `mixtral-8x7b-32768` | | | | | +| `qwen-qwq-32b` | | | | | +| `mistral-saba-24b` | | | | | +| `qwen-2.5-32b` | | | | | +| `deepseek-r1-distill-qwen-32b` | | | | | +| `deepseek-r1-distill-llama-70b` | | | | | The table above lists popular models. Please see the [Groq diff --git a/content/providers/01-ai-sdk-providers/index.mdx b/content/providers/01-ai-sdk-providers/index.mdx index 0aa13c6b99d7..503a3119e3de 100644 --- a/content/providers/01-ai-sdk-providers/index.mdx +++ b/content/providers/01-ai-sdk-providers/index.mdx @@ -34,6 +34,7 @@ Not all providers support all AI SDK features. Here's a quick comparison of the | [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | | [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | | [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `meta-llama/llama-4-scout-17b-16e-instruct` | | | | | | [Groq](/providers/ai-sdk-providers/groq) | `deepseek-r1-distill-llama-70b` | | | | | | [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | | [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | diff --git a/packages/groq/src/groq-chat-settings.ts b/packages/groq/src/groq-chat-settings.ts index ed70c1ec7527..2d8104d52cdc 100644 --- a/packages/groq/src/groq-chat-settings.ts +++ b/packages/groq/src/groq-chat-settings.ts @@ -9,6 +9,7 @@ export type GroqChatModelId = | 'llama3-8b-8192' | 'mixtral-8x7b-32768' // preview models (selection) + | 'meta-llama/llama-4-scout-17b-16e-instruct' | 'qwen-qwq-32b' | 'mistral-saba-24b' | 'qwen-2.5-32b' From 5cf30ea5ec6179f734e68d5a6b37683b64db99d8 Mon Sep 17 00:00:00 2001 From: Kevin Ang <51168758+kvnang@users.noreply.github.com> Date: Sun, 6 Apr 2025 04:08:48 -0400 Subject: [PATCH 0034/1307] fix (provider/google): allow "OFF" for Google HarmBlockThreshold (#5550) --- .changeset/huge-cloths-burn.md | 5 +++++ packages/google/src/google-generative-ai-settings.ts | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changeset/huge-cloths-burn.md diff --git a/.changeset/huge-cloths-burn.md b/.changeset/huge-cloths-burn.md new file mode 100644 index 000000000000..bdb13e564d46 --- /dev/null +++ b/.changeset/huge-cloths-burn.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +fix (provider/google): allow "OFF" for Google HarmBlockThreshold diff --git a/packages/google/src/google-generative-ai-settings.ts b/packages/google/src/google-generative-ai-settings.ts index 02f57d68e797..a2c8f32f6982 100644 --- a/packages/google/src/google-generative-ai-settings.ts +++ b/packages/google/src/google-generative-ai-settings.ts @@ -72,7 +72,8 @@ Optional. A list of unique safety settings for blocking unsafe content. | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' - | 'BLOCK_NONE'; + | 'BLOCK_NONE' + | 'OFF'; }>; /** * Optional. Enables timestamp understanding for audio-only files. From c467b38e2de3a7aaf49ad9abb5c6e319ced24fca Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Sun, 6 Apr 2025 10:30:47 +0200 Subject: [PATCH 0035/1307] feat (provider/azure): add OpenAI responses API support (#5461) (#5571) Co-authored-by: ANKIT VARSHNEY <132201033+AVtheking@users.noreply.github.com> --- .changeset/angry-poems-learn.md | 5 + .../01-ai-sdk-providers/03-azure.mdx | 140 ++++++++++++++++++ .../src/generate-text/azure-responses.ts | 22 +++ .../azure/src/azure-openai-provider.test.ts | 120 +++++++++++++-- packages/azure/src/azure-openai-provider.ts | 30 +++- 5 files changed, 304 insertions(+), 13 deletions(-) create mode 100644 .changeset/angry-poems-learn.md create mode 100644 examples/ai-core/src/generate-text/azure-responses.ts diff --git a/.changeset/angry-poems-learn.md b/.changeset/angry-poems-learn.md new file mode 100644 index 000000000000..22456f013a8d --- /dev/null +++ b/.changeset/angry-poems-learn.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/azure': patch +--- + +feat (provider/azure): add OpenAI responses API support diff --git a/content/providers/01-ai-sdk-providers/03-azure.mdx b/content/providers/01-ai-sdk-providers/03-azure.mdx index cbd23429c853..46e03042a833 100644 --- a/content/providers/01-ai-sdk-providers/03-azure.mdx +++ b/content/providers/01-ai-sdk-providers/03-azure.mdx @@ -223,6 +223,146 @@ The following optional settings are available for OpenAI chat models: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. +### Responses Models + +You can use the Azure OpenAI responses API with the `azure.responses(deploymentName)` factory method. + +```ts +const model = azure.responses('your-deployment-name'); +``` + +Further configuration can be done using OpenAI provider options. +You can validate the provider options using the `OpenAIResponsesProviderOptions` type. + +```ts +import { azure, OpenAIResponsesProviderOptions } from '@ai-sdk/azure'; +import { generateText } from 'ai'; + +const result = await generateText({ + model: azure.responses('your-deployment-name'), + providerOptions: { + openai: { + parallelToolCalls: false, + store: false, + user: 'user_123', + // ... + } satisfies OpenAIResponsesProviderOptions, + }, + // ... +}); +``` + +The following provider options are available: + +- **parallelToolCalls** _boolean_ + Whether to use parallel tool calls. Defaults to `true`. + +- **store** _boolean_ + Whether to store the generation. Defaults to `true`. + +- **metadata** _Record<string, string>_ + Additional metadata to store with the generation. + +- **previousResponseId** _string_ + The ID of the previous response. You can use it to continue a conversation. Defaults to `undefined`. + +- **instructions** _string_ + Instructions for the model. + They can be used to change the system or developer message when continuing a conversation using the `previousResponseId` option. + Defaults to `undefined`. + +- **user** _string_ + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Defaults to `undefined`. + +- **reasoningEffort** _'low' | 'medium' | 'high'_ + Reasoning effort for reasoning models. Defaults to `medium`. If you use `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored. + +- **strictSchemas** _boolean_ + Whether to use strict JSON schemas in tools and when generating JSON outputs. Defaults to `true`. + +The Azure OpenAI responses provider also returns provider-specific metadata: + +```ts +const { providerMetadata } = await generateText({ + model: azure.responses('your-deployment-name'), +}); + +const openaiMetadata = providerMetadata?.openai; +``` + +The following OpenAI-specific metadata is returned: + +- **responseId** _string_ + The ID of the response. Can be used to continue a conversation. + +- **cachedPromptTokens** _number_ + The number of prompt tokens that were a cache hit. + +- **reasoningTokens** _number_ + The number of reasoning tokens that the model generated. + +#### Web Search + +The Azure OpenAI responses provider supports web search through the `azure.tools.webSearchPreview` tool. + +You can force the use of the web search tool by setting the `toolChoice` parameter to `{ type: 'tool', toolName: 'web_search_preview' }`. + +```ts +const result = await generateText({ + model: azure.responses('your-deployment-name'), + prompt: 'What happened in San Francisco last week?', + tools: { + web_search_preview: azure.tools.webSearchPreview({ + // optional configuration: + searchContextSize: 'high', + userLocation: { + type: 'approximate', + city: 'San Francisco', + region: 'California', + }, + }), + }, + // Force web search tool: + toolChoice: { type: 'tool', toolName: 'web_search_preview' }, +}); + +// URL sources +const sources = result.sources; +``` + +#### PDF support + +The Azure OpenAI Responses API supports reading PDF files. +You can pass PDF files as part of the message content using the `file` type: + +```ts +const result = await generateText({ + model: azure.responses('your-deployment-name'), + messages: [ + { + role: 'user', + content: [ + { + type: 'text', + text: 'What is an embedding model?', + }, + { + type: 'file', + data: fs.readFileSync('./data/ai.pdf'), + mimeType: 'application/pdf', + filename: 'ai.pdf', // optional + }, + ], + }, + ], +}); +``` + +The model will have access to the contents of the PDF file and +respond to questions about it. +The PDF file should be passed using the `data` field, +and the `mimeType` should be set to `'application/pdf'`. + ### Completion Models You can create models that call the completions API using the `.completion()` factory method. diff --git a/examples/ai-core/src/generate-text/azure-responses.ts b/examples/ai-core/src/generate-text/azure-responses.ts new file mode 100644 index 000000000000..9f7a587b99ae --- /dev/null +++ b/examples/ai-core/src/generate-text/azure-responses.ts @@ -0,0 +1,22 @@ +import { createAzure } from '@ai-sdk/azure'; +import { generateText } from 'ai'; +import 'dotenv/config'; + +// Initialize Azure OpenAI provider +const azure = createAzure({ + apiKey: process.env.AZURE_API_KEY, + baseURL: process.env.AZURE_BASE_URL, +}); + +async function main() { + // Basic text generation + const basicResult = await generateText({ + model: azure.responses('gpt-4o-mini'), + prompt: 'What is quantum computing?', + }); + + console.log('\n=== Basic Text Generation ==='); + console.log(basicResult.text); +} + +main().catch(console.error); diff --git a/packages/azure/src/azure-openai-provider.test.ts b/packages/azure/src/azure-openai-provider.test.ts index 22224020956e..3c65f7487fa7 100644 --- a/packages/azure/src/azure-openai-provider.test.ts +++ b/packages/azure/src/azure-openai-provider.test.ts @@ -29,6 +29,7 @@ const server = createTestServer({ {}, 'https://test-resource.openai.azure.com/openai/deployments/dalle-deployment/images/generations': {}, + 'https://test-resource.openai.azure.com/openai/responses': {}, }); describe('chat', () => { @@ -74,7 +75,7 @@ describe('chat', () => { expect( server.calls[0].requestUrlSearchParams.get('api-version'), - ).toStrictEqual('2024-10-01-preview'); + ).toStrictEqual('2025-03-01-preview'); }); it('should set the correct modified api version', async () => { @@ -132,9 +133,8 @@ describe('chat', () => { mode: { type: 'regular' }, prompt: TEST_PROMPT, }); - expect(server.calls[0].requestUrl).toStrictEqual( - 'https://test-resource.openai.azure.com/openai/deployments/test-deployment/chat/completions?api-version=2024-10-01-preview', + 'https://test-resource.openai.azure.com/openai/deployments/test-deployment/chat/completions?api-version=2025-03-01-preview', ); }); }); @@ -195,10 +195,9 @@ describe('completion', () => { mode: { type: 'regular' }, prompt: TEST_PROMPT, }); - expect( server.calls[0].requestUrlSearchParams.get('api-version'), - ).toStrictEqual('2024-10-01-preview'); + ).toStrictEqual('2025-03-01-preview'); }); it('should pass headers', async () => { @@ -269,10 +268,9 @@ describe('embedding', () => { await model.doEmbed({ values: testValues, }); - expect( server.calls[0].requestUrlSearchParams.get('api-version'), - ).toStrictEqual('2024-10-01-preview'); + ).toStrictEqual('2025-03-01-preview'); }); it('should pass headers', async () => { @@ -342,7 +340,7 @@ describe('image', () => { expect( server.calls[0].requestUrlSearchParams.get('api-version'), - ).toStrictEqual('2024-10-01-preview'); + ).toStrictEqual('2025-03-01-preview'); }); it('should set the correct modified api version', async () => { @@ -413,7 +411,7 @@ describe('image', () => { }); expect(server.calls[0].requestUrl).toStrictEqual( - 'https://test-resource.openai.azure.com/openai/deployments/dalle-deployment/images/generations?api-version=2024-10-01-preview', + 'https://test-resource.openai.azure.com/openai/deployments/dalle-deployment/images/generations?api-version=2025-03-01-preview', ); }); @@ -465,3 +463,107 @@ describe('image', () => { }); }); }); + +describe('responses', () => { + describe('doGenerate', () => { + function prepareJsonResponse({ + content = '', + usage = { + input_tokens: 4, + output_tokens: 30, + total_tokens: 34, + }, + } = {}) { + server.urls[ + 'https://test-resource.openai.azure.com/openai/responses' + ].response = { + type: 'json-value', + body: { + id: 'resp_67c97c0203188190a025beb4a75242bc', + object: 'response', + created_at: 1741257730, + status: 'completed', + model: 'test-deployment', + output: [ + { + id: 'msg_67c97c02656c81908e080dfdf4a03cd1', + type: 'message', + status: 'completed', + role: 'assistant', + content: [ + { + type: 'output_text', + text: content, + annotations: [], + }, + ], + }, + ], + usage, + incomplete_details: null, + }, + }; + } + + it('should set the correct api version', async () => { + prepareJsonResponse(); + + await provider.responses('test-deployment').doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect( + server.calls[0].requestUrlSearchParams.get('api-version'), + ).toStrictEqual('2025-03-01-preview'); + }); + + it('should pass headers', async () => { + prepareJsonResponse(); + + const provider = createAzure({ + resourceName: 'test-resource', + apiKey: 'test-api-key', + headers: { + 'Custom-Provider-Header': 'provider-header-value', + }, + }); + + await provider.responses('test-deployment').doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + headers: { + 'Custom-Request-Header': 'request-header-value', + }, + }); + + expect(server.calls[0].requestHeaders).toStrictEqual({ + 'api-key': 'test-api-key', + 'content-type': 'application/json', + 'custom-provider-header': 'provider-header-value', + 'custom-request-header': 'request-header-value', + }); + }); + + it('should use the baseURL correctly', async () => { + prepareJsonResponse(); + + const provider = createAzure({ + baseURL: 'https://test-resource.openai.azure.com/openai', + apiKey: 'test-api-key', + }); + + await provider.responses('test-deployment').doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(server.calls[0].requestUrl).toStrictEqual( + 'https://test-resource.openai.azure.com/openai/responses?api-version=2025-03-01-preview', + ); + }); + }); +}); diff --git a/packages/azure/src/azure-openai-provider.ts b/packages/azure/src/azure-openai-provider.ts index d5b082e910ac..f171115a046e 100644 --- a/packages/azure/src/azure-openai-provider.ts +++ b/packages/azure/src/azure-openai-provider.ts @@ -7,6 +7,7 @@ import { OpenAIEmbeddingSettings, OpenAIImageModel, OpenAIImageSettings, + OpenAIResponsesLanguageModel, } from '@ai-sdk/openai/internal'; import { EmbeddingModelV1, @@ -32,6 +33,11 @@ Creates an Azure OpenAI chat model for text generation. */ chat(deploymentId: string, settings?: OpenAIChatSettings): LanguageModelV2; + /** +Creates an Azure OpenAI responses API model for text generation. + */ + responses(deploymentId: string): LanguageModelV2; + /** Creates an Azure OpenAI completion model for text generation. */ @@ -140,11 +146,19 @@ export function createAzure( description: 'Azure OpenAI resource name', }); - const apiVersion = options.apiVersion ?? '2024-10-01-preview'; - const url = ({ path, modelId }: { path: string; modelId: string }) => - options.baseURL + const apiVersion = options.apiVersion ?? '2025-03-01-preview'; + const url = ({ path, modelId }: { path: string; modelId: string }) => { + if (path === '/responses') { + return options.baseURL + ? `${options.baseURL}${path}?api-version=${apiVersion}` + : `https://${getResourceName()}.openai.azure.com/openai/responses?api-version=${apiVersion}`; + } + + // Default URL format for other endpoints + return options.baseURL ? `${options.baseURL}/${modelId}${path}?api-version=${apiVersion}` : `https://${getResourceName()}.openai.azure.com/openai/deployments/${modelId}${path}?api-version=${apiVersion}`; + }; const createChatModel = ( deploymentName: string, @@ -181,6 +195,14 @@ export function createAzure( fetch: options.fetch, }); + const createResponsesModel = (modelId: string) => + new OpenAIResponsesLanguageModel(modelId, { + provider: 'azure-openai.responses', + url, + headers: getHeaders, + fetch: options.fetch, + }); + const createImageModel = ( modelId: string, settings: OpenAIImageSettings = {}, @@ -213,7 +235,7 @@ export function createAzure( provider.imageModel = createImageModel; provider.textEmbedding = createEmbeddingModel; provider.textEmbeddingModel = createEmbeddingModel; - + provider.responses = createResponsesModel; return provider; } From 339f3d15fc0c6021fd3527850ddfa48ec8d21d6d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 6 Apr 2025 10:33:15 +0200 Subject: [PATCH 0036/1307] Version Packages (canary) (#5569) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .changeset/pre.json | 4 ++ examples/ai-core/package.json | 12 ++-- examples/express/package.json | 2 +- examples/fastify/package.json | 2 +- examples/hono/package.json | 2 +- examples/mcp/package.json | 2 +- examples/nest/package.json | 2 +- examples/next-fastapi/package.json | 2 +- examples/next-google-vertex/package.json | 4 +- examples/next-langchain/package.json | 2 +- .../package.json | 2 +- examples/next-openai-pages/package.json | 2 +- .../next-openai-telemetry-sentry/package.json | 2 +- examples/next-openai-telemetry/package.json | 2 +- .../package.json | 2 +- examples/next-openai/package.json | 6 +- examples/node-http-server/package.json | 2 +- examples/nuxt-openai/package.json | 2 +- examples/sveltekit-openai/package.json | 2 +- packages/ai/CHANGELOG.md | 6 ++ packages/ai/package.json | 2 +- .../ai/tests/e2e/next-server/CHANGELOG.md | 7 +++ packages/azure/CHANGELOG.md | 6 ++ packages/azure/package.json | 2 +- packages/google-vertex/CHANGELOG.md | 7 +++ packages/google-vertex/package.json | 4 +- packages/google/CHANGELOG.md | 6 ++ packages/google/package.json | 2 +- packages/groq/CHANGELOG.md | 6 ++ packages/groq/package.json | 2 +- packages/valibot/CHANGELOG.md | 7 +++ packages/valibot/package.json | 4 +- pnpm-lock.yaml | 56 +++++++++---------- 33 files changed, 111 insertions(+), 62 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index b6d9c9c49471..ef3762ec8d10 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -56,16 +56,20 @@ "generate-llms-txt": "0.0.0" }, "changesets": [ + "angry-poems-learn", "beige-ligers-kneel", "clean-numbers-cover", "cuddly-icons-kick", "eleven-lobsters-rescue", "flat-plums-bake", "green-deers-scream", + "huge-cloths-burn", "pink-deers-switch", "seven-pens-itch", "silent-nails-taste", "smooth-mirrors-kneel", + "tall-rice-flash", + "thin-numbers-shave", "twelve-kids-travel" ] } diff --git a/examples/ai-core/package.json b/examples/ai-core/package.json index 912951386dc8..bb6cef5fca72 100644 --- a/examples/ai-core/package.json +++ b/examples/ai-core/package.json @@ -5,16 +5,16 @@ "dependencies": { "@ai-sdk/amazon-bedrock": "3.0.0-canary.1", "@ai-sdk/anthropic": "2.0.0-canary.1", - "@ai-sdk/azure": "2.0.0-canary.1", + "@ai-sdk/azure": "2.0.0-canary.2", "@ai-sdk/cerebras": "1.0.0-canary.1", "@ai-sdk/cohere": "2.0.0-canary.1", "@ai-sdk/deepinfra": "1.0.0-canary.1", "@ai-sdk/deepseek": "1.0.0-canary.1", "@ai-sdk/fal": "1.0.0-canary.1", "@ai-sdk/fireworks": "1.0.0-canary.1", - "@ai-sdk/google": "2.0.0-canary.1", - "@ai-sdk/google-vertex": "3.0.0-canary.1", - "@ai-sdk/groq": "2.0.0-canary.1", + "@ai-sdk/google": "2.0.0-canary.2", + "@ai-sdk/google-vertex": "3.0.0-canary.2", + "@ai-sdk/groq": "2.0.0-canary.2", "@ai-sdk/luma": "1.0.0-canary.1", "@ai-sdk/mistral": "2.0.0-canary.1", "@ai-sdk/openai": "2.0.0-canary.1", @@ -24,12 +24,12 @@ "@ai-sdk/replicate": "1.0.0-canary.1", "@ai-sdk/togetherai": "1.0.0-canary.1", "@ai-sdk/xai": "2.0.0-canary.1", - "@ai-sdk/valibot": "1.0.0-canary.1", + "@ai-sdk/valibot": "1.0.0-canary.2", "@google/generative-ai": "0.21.0", "@opentelemetry/auto-instrumentations-node": "0.54.0", "@opentelemetry/sdk-node": "0.54.2", "@opentelemetry/sdk-trace-node": "1.28.0", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "dotenv": "16.4.5", "image-type": "^5.2.0", "mathjs": "14.0.0", diff --git a/examples/express/package.json b/examples/express/package.json index 5e9a2b130849..b1ed86bb9748 100644 --- a/examples/express/package.json +++ b/examples/express/package.json @@ -8,7 +8,7 @@ }, "dependencies": { "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "dotenv": "16.4.5", "express": "5.0.1" }, diff --git a/examples/fastify/package.json b/examples/fastify/package.json index b6b36c68ff04..70756c5ffd93 100644 --- a/examples/fastify/package.json +++ b/examples/fastify/package.json @@ -4,7 +4,7 @@ "private": true, "dependencies": { "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "dotenv": "16.4.5", "fastify": "5.1.0" }, diff --git a/examples/hono/package.json b/examples/hono/package.json index 3753abe22e2e..87778031b83f 100644 --- a/examples/hono/package.json +++ b/examples/hono/package.json @@ -5,7 +5,7 @@ "dependencies": { "@ai-sdk/openai": "2.0.0-canary.1", "@hono/node-server": "1.13.7", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "dotenv": "16.4.5", "hono": "4.6.9" }, diff --git a/examples/mcp/package.json b/examples/mcp/package.json index 1783ad4d963d..899eedbfe4e0 100644 --- a/examples/mcp/package.json +++ b/examples/mcp/package.json @@ -14,7 +14,7 @@ "dependencies": { "@ai-sdk/openai": "2.0.0-canary.1", "@modelcontextprotocol/sdk": "^1.7.0", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "dotenv": "16.4.5", "express": "5.0.1", "zod": "3.23.8" diff --git a/examples/nest/package.json b/examples/nest/package.json index b29dd73b9310..f044317f94f9 100644 --- a/examples/nest/package.json +++ b/examples/nest/package.json @@ -19,7 +19,7 @@ "@nestjs/common": "^10.4.15", "@nestjs/core": "^10.0.0", "@nestjs/platform-express": "^10.4.9", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1" }, diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index f88d4d2abc25..924a3fd0c086 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -13,7 +13,7 @@ "dependencies": { "@ai-sdk/ui-utils": "2.0.0-canary.1", "@ai-sdk/react": "2.0.0-canary.1", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-google-vertex/package.json b/examples/next-google-vertex/package.json index 95c46e4cb8c8..3c88c306081e 100644 --- a/examples/next-google-vertex/package.json +++ b/examples/next-google-vertex/package.json @@ -9,8 +9,8 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/google-vertex": "3.0.0-canary.1", - "ai": "5.0.0-canary.1", + "@ai-sdk/google-vertex": "3.0.0-canary.2", + "ai": "5.0.0-canary.2", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-langchain/package.json b/examples/next-langchain/package.json index 4ceb88db54d7..4cf0d2588437 100644 --- a/examples/next-langchain/package.json +++ b/examples/next-langchain/package.json @@ -12,7 +12,7 @@ "@ai-sdk/react": "2.0.0-canary.1", "@langchain/openai": "0.0.28", "@langchain/core": "0.1.63", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "langchain": "0.1.36", "next": "latest", "react": "^18", diff --git a/examples/next-openai-kasada-bot-protection/package.json b/examples/next-openai-kasada-bot-protection/package.json index dd4c889c7e4f..d8376abab978 100644 --- a/examples/next-openai-kasada-bot-protection/package.json +++ b/examples/next-openai-kasada-bot-protection/package.json @@ -12,7 +12,7 @@ "@ai-sdk/openai": "2.0.0-canary.1", "@ai-sdk/react": "2.0.0-canary.1", "@vercel/functions": "latest", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai-pages/package.json b/examples/next-openai-pages/package.json index 4195fa98e4d7..af940ba35f1d 100644 --- a/examples/next-openai-pages/package.json +++ b/examples/next-openai-pages/package.json @@ -11,7 +11,7 @@ "dependencies": { "@ai-sdk/openai": "2.0.0-canary.1", "@ai-sdk/react": "2.0.0-canary.1", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry-sentry/package.json b/examples/next-openai-telemetry-sentry/package.json index c63d6b9fd8bc..e9b73d2a14e6 100644 --- a/examples/next-openai-telemetry-sentry/package.json +++ b/examples/next-openai-telemetry-sentry/package.json @@ -17,7 +17,7 @@ "@sentry/nextjs": "^8.42.0", "@sentry/opentelemetry": "8.22.0", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry/package.json b/examples/next-openai-telemetry/package.json index 345b767efa62..a9437a3a3bab 100644 --- a/examples/next-openai-telemetry/package.json +++ b/examples/next-openai-telemetry/package.json @@ -15,7 +15,7 @@ "@opentelemetry/sdk-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-upstash-rate-limits/package.json b/examples/next-openai-upstash-rate-limits/package.json index a2cd2cda115e..a2fc895d5b7d 100644 --- a/examples/next-openai-upstash-rate-limits/package.json +++ b/examples/next-openai-upstash-rate-limits/package.json @@ -13,7 +13,7 @@ "@ai-sdk/react": "2.0.0-canary.1", "@upstash/ratelimit": "^0.4.3", "@vercel/kv": "^0.2.2", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 4c514bf1f999..98dbc49b0757 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -13,13 +13,13 @@ "@ai-sdk/deepseek": "1.0.0-canary.1", "@ai-sdk/fireworks": "1.0.0-canary.1", "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/google": "2.0.0-canary.1", - "@ai-sdk/google-vertex": "3.0.0-canary.1", + "@ai-sdk/google": "2.0.0-canary.2", + "@ai-sdk/google-vertex": "3.0.0-canary.2", "@ai-sdk/perplexity": "2.0.0-canary.1", "@ai-sdk/ui-utils": "2.0.0-canary.1", "@ai-sdk/react": "2.0.0-canary.1", "@vercel/blob": "^0.26.0", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/node-http-server/package.json b/examples/node-http-server/package.json index d4bbcd581cac..7b9a07bfa59d 100644 --- a/examples/node-http-server/package.json +++ b/examples/node-http-server/package.json @@ -4,7 +4,7 @@ "private": true, "dependencies": { "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "dotenv": "16.4.5", "zod": "3.23.8", "zod-to-json-schema": "3.23.5" diff --git a/examples/nuxt-openai/package.json b/examples/nuxt-openai/package.json index 65a3b3449185..a6f3053d38d2 100644 --- a/examples/nuxt-openai/package.json +++ b/examples/nuxt-openai/package.json @@ -11,7 +11,7 @@ "dependencies": { "@ai-sdk/vue": "2.0.0-canary.1", "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "zod": "3.23.8" }, "devDependencies": { diff --git a/examples/sveltekit-openai/package.json b/examples/sveltekit-openai/package.json index 6292ddeb1e68..29a1b2f76794 100644 --- a/examples/sveltekit-openai/package.json +++ b/examples/sveltekit-openai/package.json @@ -25,7 +25,7 @@ "@sveltejs/adapter-vercel": "^5.5.2", "@sveltejs/kit": "^2.16.0", "@sveltejs/vite-plugin-svelte": "^5.0.0", - "ai": "5.0.0-canary.1", + "ai": "5.0.0-canary.2", "autoprefixer": "^10.4.20", "bits-ui": "^1.3.9", "clsx": "^2.1.1", diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index cb9416ef3f18..970db033de09 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -1,5 +1,11 @@ # ai +## 5.0.0-canary.2 + +### Patch Changes + +- bd398e4: fix (core): improve error handling in streamText's consumeStream method + ## 5.0.0-canary.1 ### Minor Changes diff --git a/packages/ai/package.json b/packages/ai/package.json index 707c6b8da40e..721e82689b3f 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "ai", - "version": "5.0.0-canary.1", + "version": "5.0.0-canary.2", "description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript", "license": "Apache-2.0", "sideEffects": false, diff --git a/packages/ai/tests/e2e/next-server/CHANGELOG.md b/packages/ai/tests/e2e/next-server/CHANGELOG.md index a589a799538a..5840ac753dd7 100644 --- a/packages/ai/tests/e2e/next-server/CHANGELOG.md +++ b/packages/ai/tests/e2e/next-server/CHANGELOG.md @@ -4,6 +4,13 @@ ### Patch Changes +- Updated dependencies [bd398e4] + - ai@5.0.0-canary.2 + +## 0.0.1-canary.0 + +### Patch Changes + - Updated dependencies [c22ad54] - Updated dependencies [b7eae2d] - Updated dependencies [a4f3007] diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index 12b1a3d4ed0f..961acf277e17 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/azure +## 2.0.0-canary.2 + +### Patch Changes + +- c467b38: feat (provider/azure): add OpenAI responses API support + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index 92fc41b5fe02..1d81f0e57e23 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/google-vertex/CHANGELOG.md b/packages/google-vertex/CHANGELOG.md index 9b91181f7660..8110b81b1afc 100644 --- a/packages/google-vertex/CHANGELOG.md +++ b/packages/google-vertex/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/google-vertex +## 3.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [5cf30ea] + - @ai-sdk/google@2.0.0-canary.2 + ## 3.0.0-canary.1 ### Patch Changes diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index 4f065070082b..c25e61a6f855 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google-vertex", - "version": "3.0.0-canary.1", + "version": "3.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -50,7 +50,7 @@ }, "dependencies": { "@ai-sdk/anthropic": "2.0.0-canary.1", - "@ai-sdk/google": "2.0.0-canary.1", + "@ai-sdk/google": "2.0.0-canary.2", "@ai-sdk/provider": "2.0.0-canary.0", "@ai-sdk/provider-utils": "3.0.0-canary.1", "google-auth-library": "^9.15.0" diff --git a/packages/google/CHANGELOG.md b/packages/google/CHANGELOG.md index dcdfe76e8fae..d59bc3a5f162 100644 --- a/packages/google/CHANGELOG.md +++ b/packages/google/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/google +## 2.0.0-canary.2 + +### Patch Changes + +- 5cf30ea: fix (provider/google): allow "OFF" for Google HarmBlockThreshold + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/google/package.json b/packages/google/package.json index 6a04682cb5c3..b0f09d0d8b59 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/groq/CHANGELOG.md b/packages/groq/CHANGELOG.md index 648ca563ce8c..72d9de98e85f 100644 --- a/packages/groq/CHANGELOG.md +++ b/packages/groq/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/groq +## 2.0.0-canary.2 + +### Patch Changes + +- 67b9604: feat (provider/groq): add llama 4 model + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/groq/package.json b/packages/groq/package.json index 1af250006c95..676fd01a0248 100644 --- a/packages/groq/package.json +++ b/packages/groq/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/groq", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/valibot/CHANGELOG.md b/packages/valibot/CHANGELOG.md index 3ddbd465638d..fa868e583556 100644 --- a/packages/valibot/CHANGELOG.md +++ b/packages/valibot/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/valibot +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [bd398e4] + - ai@5.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/valibot/package.json b/packages/valibot/package.json index 954187cd2731..a08a71bfdd1b 100644 --- a/packages/valibot/package.json +++ b/packages/valibot/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/valibot", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -27,7 +27,7 @@ } }, "dependencies": { - "ai": "5.0.0-canary.1" + "ai": "5.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index fa601e6e694d..dcc0685ee686 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -66,7 +66,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/anthropic '@ai-sdk/azure': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/azure '@ai-sdk/cerebras': specifier: 1.0.0-canary.1 @@ -87,13 +87,13 @@ importers: specifier: 1.0.0-canary.1 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../../packages/google-vertex '@ai-sdk/groq': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/groq '@ai-sdk/luma': specifier: 1.0.0-canary.1 @@ -120,7 +120,7 @@ importers: specifier: 1.0.0-canary.1 version: link:../../packages/togetherai '@ai-sdk/valibot': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/valibot '@ai-sdk/xai': specifier: 2.0.0-canary.1 @@ -138,7 +138,7 @@ importers: specifier: 1.28.0 version: 1.28.0(@opentelemetry/api@1.9.0) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -181,7 +181,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -209,7 +209,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -237,7 +237,7 @@ importers: specifier: 1.13.7 version: 1.13.7(hono@4.6.9) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -265,7 +265,7 @@ importers: specifier: ^1.7.0 version: 1.7.0 ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -305,7 +305,7 @@ importers: specifier: ^10.4.9 version: 10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai reflect-metadata: specifier: ^0.2.0 @@ -387,7 +387,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/ui-utils ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -436,10 +436,10 @@ importers: examples/next-google-vertex: dependencies: '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../../packages/google-vertex ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -488,7 +488,7 @@ importers: specifier: 0.0.28 version: 0.0.28 ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai langchain: specifier: 0.1.36 @@ -543,10 +543,10 @@ importers: specifier: 1.0.0-canary.1 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../../packages/google-vertex '@ai-sdk/openai': specifier: 2.0.0-canary.1 @@ -564,7 +564,7 @@ importers: specifier: ^0.26.0 version: 0.26.0 ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai next: specifier: latest @@ -625,7 +625,7 @@ importers: specifier: latest version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0)) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai next: specifier: latest @@ -677,7 +677,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/react ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai next: specifier: latest @@ -744,7 +744,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.29.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai next: specifier: latest @@ -817,7 +817,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.28.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai next: specifier: latest @@ -878,7 +878,7 @@ importers: specifier: ^0.2.2 version: 0.2.4 ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai next: specifier: latest @@ -927,7 +927,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -958,7 +958,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../../packages/vue ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai zod: specifier: 3.23.8 @@ -1034,7 +1034,7 @@ importers: specifier: ^5.0.0 version: 5.0.3(svelte@5.22.4)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../../packages/ai autoprefixer: specifier: ^10.4.20 @@ -1493,7 +1493,7 @@ importers: specifier: 2.0.0-canary.1 version: link:../anthropic '@ai-sdk/google': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../google '@ai-sdk/provider': specifier: 2.0.0-canary.0 @@ -1942,7 +1942,7 @@ importers: specifier: ^1.0.0-rc.0 || ^1.0.0 version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.6.3)) ai: - specifier: 5.0.0-canary.1 + specifier: 5.0.0-canary.2 version: link:../ai devDependencies: '@types/node': From 7677477b776db8eb21b5b41b9217d21ea603be20 Mon Sep 17 00:00:00 2001 From: Walter Korman Date: Sun, 6 Apr 2025 10:19:06 -0700 Subject: [PATCH 0037/1307] feat (providers/deepinfra): add llama 4 models (#5572) --- .changeset/beige-penguins-greet.md | 5 + .../01-ai-sdk-providers/11-deepinfra.mdx | 50 +++++----- .../providers/01-ai-sdk-providers/index.mdx | 92 ++++++++++--------- examples/ai-core/src/e2e/deepinfra.test.ts | 2 + .../deepinfra/src/deepinfra-chat-settings.ts | 2 + 5 files changed, 82 insertions(+), 69 deletions(-) create mode 100644 .changeset/beige-penguins-greet.md diff --git a/.changeset/beige-penguins-greet.md b/.changeset/beige-penguins-greet.md new file mode 100644 index 000000000000..22e6868c97d1 --- /dev/null +++ b/.changeset/beige-penguins-greet.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/deepinfra': patch +--- + +feat (providers/deepinfra): add llama 4 models diff --git a/content/providers/01-ai-sdk-providers/11-deepinfra.mdx b/content/providers/01-ai-sdk-providers/11-deepinfra.mdx index b451445fd84c..23296f0e2b2e 100644 --- a/content/providers/01-ai-sdk-providers/11-deepinfra.mdx +++ b/content/providers/01-ai-sdk-providers/11-deepinfra.mdx @@ -82,30 +82,32 @@ DeepInfra language models can also be used in the `streamText` function (see [AI ## Model Capabilities -| Model | Image Input | Object Generation | Tool Usage | Tool Streaming | -| ---------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `meta-llama/Llama-3.3-70B-Instruct-Turbo` | | | | | -| `meta-llama/Llama-3.3-70B-Instruct` | | | | | -| `meta-llama/Meta-Llama-3.1-405B-Instruct` | | | | | -| `meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo` | | | | | -| `meta-llama/Meta-Llama-3.1-70B-Instruct` | | | | | -| `meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo` | | | | | -| `meta-llama/Meta-Llama-3.1-8B-Instruct` | | | | | -| `meta-llama/Llama-3.2-11B-Vision-Instruct` | | | | | -| `meta-llama/Llama-3.2-90B-Vision-Instruct` | | | | | -| `mistralai/Mixtral-8x7B-Instruct-v0.1` | | | | | -| `deepseek-ai/DeepSeek-V3` | | | | | -| `deepseek-ai/DeepSeek-R1` | | | | | -| `deepseek-ai/DeepSeek-R1-Distill-Llama-70B` | | | | | -| `deepseek-ai/DeepSeek-R1-Turbo` | | | | | -| `nvidia/Llama-3.1-Nemotron-70B-Instruct` | | | | | -| `Qwen/Qwen2-7B-Instruct` | | | | | -| `Qwen/Qwen2.5-72B-Instruct` | | | | | -| `Qwen/Qwen2.5-Coder-32B-Instruct` | | | | | -| `Qwen/QwQ-32B-Preview` | | | | | -| `google/codegemma-7b-it` | | | | | -| `google/gemma-2-9b-it` | | | | | -| `microsoft/WizardLM-2-8x22B` | | | | | +| Model | Image Input | Object Generation | Tool Usage | Tool Streaming | +| --------------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8` | | | | | +| `meta-llama/Llama-4-Scout-17B-16E-Instruct` | | | | | +| `meta-llama/Llama-3.3-70B-Instruct-Turbo` | | | | | +| `meta-llama/Llama-3.3-70B-Instruct` | | | | | +| `meta-llama/Meta-Llama-3.1-405B-Instruct` | | | | | +| `meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo` | | | | | +| `meta-llama/Meta-Llama-3.1-70B-Instruct` | | | | | +| `meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo` | | | | | +| `meta-llama/Meta-Llama-3.1-8B-Instruct` | | | | | +| `meta-llama/Llama-3.2-11B-Vision-Instruct` | | | | | +| `meta-llama/Llama-3.2-90B-Vision-Instruct` | | | | | +| `mistralai/Mixtral-8x7B-Instruct-v0.1` | | | | | +| `deepseek-ai/DeepSeek-V3` | | | | | +| `deepseek-ai/DeepSeek-R1` | | | | | +| `deepseek-ai/DeepSeek-R1-Distill-Llama-70B` | | | | | +| `deepseek-ai/DeepSeek-R1-Turbo` | | | | | +| `nvidia/Llama-3.1-Nemotron-70B-Instruct` | | | | | +| `Qwen/Qwen2-7B-Instruct` | | | | | +| `Qwen/Qwen2.5-72B-Instruct` | | | | | +| `Qwen/Qwen2.5-Coder-32B-Instruct` | | | | | +| `Qwen/QwQ-32B-Preview` | | | | | +| `google/codegemma-7b-it` | | | | | +| `google/gemma-2-9b-it` | | | | | +| `microsoft/WizardLM-2-8x22B` | | | | | The table above lists popular models. Please see the [DeepInfra diff --git a/content/providers/01-ai-sdk-providers/index.mdx b/content/providers/01-ai-sdk-providers/index.mdx index 503a3119e3de..dd0b183414b0 100644 --- a/content/providers/01-ai-sdk-providers/index.mdx +++ b/content/providers/01-ai-sdk-providers/index.mdx @@ -17,51 +17,53 @@ There are also [community providers](./community-providers) that have been creat Not all providers support all AI SDK features. Here's a quick comparison of the capabilities of popular models: -| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming | -| ------------------------------------------------------------------------ | --------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `meta-llama/llama-4-scout-17b-16e-instruct` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `deepseek-r1-distill-llama-70b` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `mistral-saba-24b` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `qwen-qwq-32b` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | | -| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo` | | | | | -| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `meta-llama/Llama-3.3-70B-Instruct` | | | | | -| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-V3` | | | | | -| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-R1` | | | | | -| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-R1-Distill-Llama-70B` | | | | | -| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-R1-Turbo` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | | -| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | | -| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | | +| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming | +| ------------------------------------------------------------------------ | --------------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `meta-llama/llama-4-scout-17b-16e-instruct` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `deepseek-r1-distill-llama-70b` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `mistral-saba-24b` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `qwen-qwq-32b` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `meta-llama/Llama-4-Scout-17B-16E-Instruct` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `meta-llama/Llama-3.3-70B-Instruct` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-V3` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-R1` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-R1-Distill-Llama-70B` | | | | | +| [DeepInfra](/providers/ai-sdk-providers/deepinfra) | `deepseek-ai/DeepSeek-R1-Turbo` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | | +| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | | +| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | | This table is not exhaustive. Additional models can be found in the provider diff --git a/examples/ai-core/src/e2e/deepinfra.test.ts b/examples/ai-core/src/e2e/deepinfra.test.ts index fc549841dcc3..bc1943a7d61e 100644 --- a/examples/ai-core/src/e2e/deepinfra.test.ts +++ b/examples/ai-core/src/e2e/deepinfra.test.ts @@ -13,6 +13,8 @@ createFeatureTestSuite({ name: 'DeepInfra', models: { languageModels: [ + createChatModel('meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8'), + createChatModel('meta-llama/Llama-4-Scout-17B-16E-Instruct'), createChatModel('deepseek-ai/DeepSeek-V3'), createChatModel('deepseek-ai/DeepSeek-R1'), createChatModel('deepseek-ai/DeepSeek-R1-Distill-Llama-70B'), diff --git a/packages/deepinfra/src/deepinfra-chat-settings.ts b/packages/deepinfra/src/deepinfra-chat-settings.ts index 8694d195e1e3..6c45c9f7dd0d 100644 --- a/packages/deepinfra/src/deepinfra-chat-settings.ts +++ b/packages/deepinfra/src/deepinfra-chat-settings.ts @@ -2,6 +2,8 @@ import { OpenAICompatibleChatSettings } from '@ai-sdk/openai-compatible'; // https://deepinfra.com/models/text-generation export type DeepInfraChatModelId = + | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8' + | 'meta-llama/Llama-4-Scout-17B-16E-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct' From 33f4a6a4d9d4b549c56a4a87a95fe19fd81a02ad Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Mon, 7 Apr 2025 10:55:53 +0200 Subject: [PATCH 0038/1307] chore (provider): rename providerMetadata inputs to providerOptions (#5579) --- .changeset/wild-candles-judge.md | 5 ++ content/docs/03-ai-sdk-core/40-middleware.mdx | 3 +- .../openai-log-metadata-middleware.ts | 2 +- .../middleware/default-settings-example.ts | 2 +- .../generate-object/generate-object.test.ts | 38 +++++------ .../core/generate-object/generate-object.ts | 4 +- .../generate-object/stream-object.test.ts | 36 +++++----- .../ai/core/generate-object/stream-object.ts | 4 +- .../core/generate-text/generate-text.test.ts | 66 +++++++++--------- .../ai/core/generate-text/generate-text.ts | 2 +- .../ai/core/generate-text/stream-text.test.ts | 68 +++++++++---------- packages/ai/core/generate-text/stream-text.ts | 2 +- .../default-settings-middleware.test.ts | 16 ++--- .../middleware/default-settings-middleware.ts | 10 +-- .../convert-to-language-model-prompt.test.ts | 20 +++--- .../convert-to-language-model-prompt.ts | 30 ++++---- packages/ai/rsc/stream-ui/stream-ui.tsx | 2 +- .../ai/rsc/stream-ui/stream-ui.ui.test.tsx | 4 +- .../src/bedrock-chat-language-model.test.ts | 14 ++-- .../src/bedrock-chat-language-model.ts | 6 +- .../convert-to-bedrock-chat-messages.test.ts | 6 +- .../src/convert-to-bedrock-chat-messages.ts | 8 +-- .../anthropic-messages-language-model.test.ts | 4 +- .../src/anthropic-messages-language-model.ts | 2 +- ...nvert-to-anthropic-messages-prompt.test.ts | 16 ++--- .../convert-to-anthropic-messages-prompt.ts | 16 ++--- ...oogle-generative-ai-language-model.test.ts | 6 +- .../google-generative-ai-language-model.ts | 4 +- .../groq/src/groq-chat-language-model.test.ts | 2 +- packages/groq/src/groq-chat-language-model.ts | 4 +- .../src/mistral-chat-language-model.ts | 6 +- ...to-openai-compatible-chat-messages.test.ts | 42 ++++++------ ...vert-to-openai-compatible-chat-messages.ts | 4 +- ...nai-compatible-chat-language-model.test.ts | 8 +-- .../openai-compatible-chat-language-model.ts | 4 +- ...mpatible-completion-language-model.test.ts | 8 +-- ...ai-compatible-completion-language-model.ts | 4 +- .../convert-to-openai-chat-messages.test.ts | 2 +- .../src/convert-to-openai-chat-messages.ts | 2 +- .../src/openai-chat-language-model.test.ts | 16 ++--- .../openai/src/openai-chat-language-model.ts | 12 ++-- ...nvert-to-openai-responses-messages.test.ts | 2 +- .../convert-to-openai-responses-messages.ts | 2 +- .../openai-responses-language-model.test.ts | 16 ++--- .../openai-responses-language-model.ts | 4 +- .../src/perplexity-language-model.test.ts | 2 +- .../src/perplexity-language-model.ts | 6 +- .../provider/src/language-model/v2/index.ts | 3 +- .../v2/language-model-v2-call-options.ts | 10 +-- .../v2/language-model-v2-prompt.ts | 42 ++++++------ .../v2/language-model-v2-provider-metadata.ts | 9 +-- .../v2/language-model-v2-provider-options.ts | 27 ++++++++ 52 files changed, 333 insertions(+), 300 deletions(-) create mode 100644 .changeset/wild-candles-judge.md create mode 100644 packages/provider/src/language-model/v2/language-model-v2-provider-options.ts diff --git a/.changeset/wild-candles-judge.md b/.changeset/wild-candles-judge.md new file mode 100644 index 000000000000..df33fc279eb2 --- /dev/null +++ b/.changeset/wild-candles-judge.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): rename providerMetadata inputs to providerOptions diff --git a/content/docs/03-ai-sdk-core/40-middleware.mdx b/content/docs/03-ai-sdk-core/40-middleware.mdx index 58a6c05ee3bb..aa6f95ecadd3 100644 --- a/content/docs/03-ai-sdk-core/40-middleware.mdx +++ b/content/docs/03-ai-sdk-core/40-middleware.mdx @@ -108,8 +108,7 @@ const model = wrapLanguageModel({ settings: { temperature: 0.5, maxTokens: 800, - // note: use providerMetadata instead of providerOptions here: - providerMetadata: { openai: { store: false } }, + providerOptions: { openai: { store: false } }, }, }), }); diff --git a/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts b/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts index 6507c2a12ce6..ea261d32e834 100644 --- a/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts +++ b/examples/ai-core/src/generate-text/openai-log-metadata-middleware.ts @@ -6,7 +6,7 @@ import 'dotenv/config'; const logProviderMetadataMiddleware: LanguageModelV2Middleware = { transformParams: async ({ params }) => { console.log( - 'providerMetadata: ' + JSON.stringify(params.providerMetadata, null, 2), + 'providerOptions: ' + JSON.stringify(params.providerOptions, null, 2), ); return params; }, diff --git a/examples/ai-core/src/middleware/default-settings-example.ts b/examples/ai-core/src/middleware/default-settings-example.ts index 034effe5e737..4ce405f9d930 100644 --- a/examples/ai-core/src/middleware/default-settings-example.ts +++ b/examples/ai-core/src/middleware/default-settings-example.ts @@ -9,7 +9,7 @@ async function main() { middleware: defaultSettingsMiddleware({ settings: { temperature: 0.5, - providerMetadata: { + providerOptions: { openai: { store: false, }, diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index c341cb36f3e1..9c746f4b1d8c 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -45,7 +45,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -85,7 +85,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -125,7 +125,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -169,7 +169,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -217,7 +217,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -403,7 +403,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -450,7 +450,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -501,7 +501,7 @@ describe('output = "object"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -725,8 +725,8 @@ describe('output = "object"', () => { it('should pass provider options to model in json mode', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doGenerate: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); @@ -752,8 +752,8 @@ describe('output = "object"', () => { it('should pass provider options to model in tool mode', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doGenerate: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); @@ -1016,7 +1016,7 @@ describe('output = "array"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1114,7 +1114,7 @@ describe('output = "no-schema"', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1326,7 +1326,7 @@ describe('options.messages', () => { type: 'text', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, { @@ -1335,13 +1335,13 @@ describe('options.messages', () => { args: { value: 'test-value', }, - providerMetadata: undefined, + providerOptions: undefined, toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-call', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'assistant', }, { @@ -1349,14 +1349,14 @@ describe('options.messages', () => { { content: undefined, isError: undefined, - providerMetadata: undefined, + providerOptions: undefined, result: 'test result', toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-result', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'tool', }, ]); diff --git a/packages/ai/core/generate-object/generate-object.ts b/packages/ai/core/generate-object/generate-object.ts index 1a2e279d3726..1539849bb2f5 100644 --- a/packages/ai/core/generate-object/generate-object.ts +++ b/packages/ai/core/generate-object/generate-object.ts @@ -539,7 +539,7 @@ export async function generateObject({ ...prepareCallSettings(settings), inputFormat: standardizedPrompt.type, prompt: promptMessages, - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }); @@ -664,7 +664,7 @@ export async function generateObject({ ...prepareCallSettings(settings), inputFormat, prompt: promptMessages, - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }); diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index 7f3c7129a24a..bc0241d72d1c 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -48,7 +48,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -108,7 +108,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); return { @@ -167,7 +167,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -230,7 +230,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -327,7 +327,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1118,8 +1118,8 @@ describe('streamObject', () => { it('should pass provider options to model in json mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doStream: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); @@ -1155,8 +1155,8 @@ describe('streamObject', () => { it('should pass provider options to model in tool mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doStream: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); @@ -1221,7 +1221,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1593,7 +1593,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1748,7 +1748,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1840,7 +1840,7 @@ describe('streamObject', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2220,7 +2220,7 @@ describe('streamObject', () => { type: 'text', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, { @@ -2229,13 +2229,13 @@ describe('streamObject', () => { args: { value: 'test-value', }, - providerMetadata: undefined, + providerOptions: undefined, toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-call', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'assistant', }, { @@ -2243,14 +2243,14 @@ describe('streamObject', () => { { content: undefined, isError: undefined, - providerMetadata: undefined, + providerOptions: undefined, result: 'test result', toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-result', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'tool', }, ]); diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index e166d21e9c10..acf34e345752 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -616,7 +616,7 @@ class DefaultStreamObjectResult modelSupportsImageUrls: model.supportsImageUrls, modelSupportsUrl: model.supportsUrl?.bind(model), // support 'this' context }), - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }; @@ -663,7 +663,7 @@ class DefaultStreamObjectResult modelSupportsImageUrls: model.supportsImageUrls, modelSupportsUrl: model.supportsUrl?.bind(model), // support 'this' context, }), - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }; diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index 26635d5a9884..98b8880bd62d 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -89,7 +89,7 @@ describe('result.text', () => { { role: 'user', content: [{ type: 'text', text: 'prompt' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -227,7 +227,7 @@ describe('result.toolCalls', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -301,7 +301,7 @@ describe('result.toolResults', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -530,7 +530,7 @@ describe('options.maxSteps', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -584,7 +584,7 @@ describe('options.maxSteps', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -594,10 +594,10 @@ describe('options.maxSteps', () => { toolCallId: 'call-1', toolName: 'tool1', args: { value: 'value' }, - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'tool', @@ -609,10 +609,10 @@ describe('options.maxSteps', () => { result: 'result1', content: undefined, isError: undefined, - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); return { @@ -711,7 +711,7 @@ describe('options.maxSteps', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -739,7 +739,7 @@ describe('options.maxSteps', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -747,10 +747,10 @@ describe('options.maxSteps', () => { { type: 'text', text: 'part 1 \n ', - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -799,7 +799,7 @@ describe('options.maxSteps', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -807,15 +807,15 @@ describe('options.maxSteps', () => { { type: 'text', text: 'part 1 \n ', - providerMetadata: undefined, + providerOptions: undefined, }, { type: 'text', text: 'no-whitespace', - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -858,7 +858,7 @@ describe('options.maxSteps', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -866,20 +866,20 @@ describe('options.maxSteps', () => { { type: 'text', text: 'part 1 \n ', - providerMetadata: undefined, + providerOptions: undefined, }, { type: 'text', text: 'no-whitespace', - providerMetadata: undefined, + providerOptions: undefined, }, { type: 'text', text: 'immediatefollow ', - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1005,8 +1005,8 @@ describe('options.providerOptions', () => { it('should pass provider options to model', async () => { const result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doGenerate: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); @@ -1239,7 +1239,7 @@ describe('tools with custom schema', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -1312,7 +1312,7 @@ describe('options.messages', () => { type: 'text', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, { @@ -1321,13 +1321,13 @@ describe('options.messages', () => { args: { value: 'test-value', }, - providerMetadata: undefined, + providerOptions: undefined, toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-call', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'assistant', }, { @@ -1335,14 +1335,14 @@ describe('options.messages', () => { { content: undefined, isError: undefined, - providerMetadata: undefined, + providerOptions: undefined, result: 'test result', toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-result', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'tool', }, ]); @@ -1474,7 +1474,7 @@ describe('options.output', () => { prompt: [ { content: [{ text: 'prompt', type: 'text' }], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, ], @@ -1537,7 +1537,7 @@ describe('options.output', () => { }, { content: [{ text: 'prompt', type: 'text' }], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, ], @@ -1601,7 +1601,7 @@ describe('options.output', () => { prompt: [ { content: [{ text: 'prompt', type: 'text' }], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, ], diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index 3f4293114b3f..25913751d83b 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -354,7 +354,7 @@ A function that attempts to repair a tool call that failed to parse. inputFormat: promptFormat, responseFormat: output?.responseFormat({ model }), prompt: promptMessages, - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }); diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index 52f621010968..f05eccb7ca35 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -181,7 +181,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -278,7 +278,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -360,7 +360,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -420,7 +420,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -492,7 +492,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2357,7 +2357,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2415,7 +2415,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -2423,7 +2423,7 @@ describe('streamText', () => { { type: 'reasoning', text: 'thinking', - providerMetadata: undefined, + providerOptions: undefined, signature: undefined, }, { @@ -2431,10 +2431,10 @@ describe('streamText', () => { toolCallId: 'call-1', toolName: 'tool1', args: { value: 'value' }, - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'tool', @@ -2446,10 +2446,10 @@ describe('streamText', () => { result: 'result1', content: undefined, isError: undefined, - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2581,7 +2581,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2620,7 +2620,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -2628,10 +2628,10 @@ describe('streamText', () => { { type: 'text', text: 'part 1 \n ', - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2677,7 +2677,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -2685,15 +2685,15 @@ describe('streamText', () => { { type: 'text', text: 'part 1 \n ', - providerMetadata: undefined, + providerOptions: undefined, }, { type: 'text', text: 'no-whitespace', - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2749,7 +2749,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, { role: 'assistant', @@ -2757,20 +2757,20 @@ describe('streamText', () => { { type: 'text', text: 'part 1 \n ', - providerMetadata: undefined, + providerOptions: undefined, }, { type: 'text', text: 'no-whitespace', - providerMetadata: undefined, + providerOptions: undefined, }, { type: 'text', text: 'immediatefollow ', - providerMetadata: undefined, + providerOptions: undefined, }, ], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -2954,8 +2954,8 @@ describe('streamText', () => { it('should pass provider metadata to model', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doStream: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); @@ -3200,7 +3200,7 @@ describe('streamText', () => { { role: 'user', content: [{ type: 'text', text: 'test-input' }], - providerMetadata: undefined, + providerOptions: undefined, }, ]); @@ -3267,7 +3267,7 @@ describe('streamText', () => { type: 'text', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, { @@ -3276,13 +3276,13 @@ describe('streamText', () => { args: { value: 'test-value', }, - providerMetadata: undefined, + providerOptions: undefined, toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-call', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'assistant', }, { @@ -3290,14 +3290,14 @@ describe('streamText', () => { { content: undefined, isError: undefined, - providerMetadata: undefined, + providerOptions: undefined, result: 'test result', toolCallId: 'call-1', toolName: 'test-tool', type: 'tool-result', }, ], - providerMetadata: undefined, + providerOptions: undefined, role: 'tool', }, ]); @@ -4476,7 +4476,7 @@ describe('streamText', () => { }, { content: [{ text: 'prompt', type: 'text' }], - providerMetadata: undefined, + providerOptions: undefined, role: 'user', }, ], diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index d918e7a1bc88..7c7221c4a5a3 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -1034,7 +1034,7 @@ class DefaultStreamTextResult inputFormat: promptFormat, responseFormat: output?.responseFormat({ model }), prompt: promptMessages, - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }), diff --git a/packages/ai/core/middleware/default-settings-middleware.test.ts b/packages/ai/core/middleware/default-settings-middleware.test.ts index 49e84568508c..4b16e5bb5ec9 100644 --- a/packages/ai/core/middleware/default-settings-middleware.test.ts +++ b/packages/ai/core/middleware/default-settings-middleware.test.ts @@ -46,7 +46,7 @@ describe('defaultSettingsMiddleware', () => { const middleware = defaultSettingsMiddleware({ settings: { temperature: 0.7, - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -62,7 +62,7 @@ describe('defaultSettingsMiddleware', () => { }); expect(result.temperature).toBe(0.7); - expect(result.providerMetadata).toEqual({ + expect(result.providerOptions).toEqual({ anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -72,7 +72,7 @@ describe('defaultSettingsMiddleware', () => { it('should merge complex provider metadata objects', async () => { const middleware = defaultSettingsMiddleware({ settings: { - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, feature: { enabled: true }, @@ -88,7 +88,7 @@ describe('defaultSettingsMiddleware', () => { type: 'generate', params: { ...BASE_PARAMS, - providerMetadata: { + providerOptions: { anthropic: { feature: { enabled: false }, otherSetting: 'value', @@ -97,7 +97,7 @@ describe('defaultSettingsMiddleware', () => { }, }); - expect(result.providerMetadata).toEqual({ + expect(result.providerOptions).toEqual({ anthropic: { cacheControl: { type: 'ephemeral' }, feature: { enabled: false }, @@ -112,7 +112,7 @@ describe('defaultSettingsMiddleware', () => { it('should handle nested provider metadata objects correctly', async () => { const middleware = defaultSettingsMiddleware({ settings: { - providerMetadata: { + providerOptions: { anthropic: { tools: { retrieval: { enabled: true }, @@ -127,7 +127,7 @@ describe('defaultSettingsMiddleware', () => { type: 'generate', params: { ...BASE_PARAMS, - providerMetadata: { + providerOptions: { anthropic: { tools: { retrieval: { enabled: false }, @@ -138,7 +138,7 @@ describe('defaultSettingsMiddleware', () => { }, }); - expect(result.providerMetadata).toEqual({ + expect(result.providerOptions).toEqual({ anthropic: { tools: { retrieval: { enabled: false }, diff --git a/packages/ai/core/middleware/default-settings-middleware.ts b/packages/ai/core/middleware/default-settings-middleware.ts index dbbb446f00e4..aa1dc589ce22 100644 --- a/packages/ai/core/middleware/default-settings-middleware.ts +++ b/packages/ai/core/middleware/default-settings-middleware.ts @@ -1,7 +1,7 @@ import { LanguageModelV2CallOptions, LanguageModelV2Middleware, - LanguageModelV2ProviderMetadata, + LanguageModelV2ProviderOptions, } from '@ai-sdk/provider'; import { mergeObjects } from '../util/merge-objects'; @@ -13,7 +13,7 @@ export function defaultSettingsMiddleware({ }: { settings: Partial< LanguageModelV2CallOptions & { - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } >; }): LanguageModelV2Middleware { @@ -23,9 +23,9 @@ export function defaultSettingsMiddleware({ return { ...settings, ...params, - providerMetadata: mergeObjects( - settings.providerMetadata, - params.providerMetadata, + providerOptions: mergeObjects( + settings.providerOptions, + params.providerOptions, ), // special case for temperature 0 diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts index d24601b07e61..6516c9ba841f 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts @@ -527,8 +527,8 @@ describe('convertToLanguageModelPrompt', () => { }); }); - describe('provider metadata', async () => { - it('should add provider metadata to messages', async () => { + describe('provider options', async () => { + it('should add provider options to messages', async () => { const result = await convertToLanguageModelPrompt({ prompt: { type: 'messages', @@ -564,7 +564,7 @@ describe('convertToLanguageModelPrompt', () => { providerMetadata: undefined, }, ], - providerMetadata: { + providerOptions: { 'test-provider': { 'key-a': 'test-value-1', 'key-b': 'test-value-2', @@ -782,7 +782,7 @@ describe('convertToLanguageModelMessage', () => { }); describe('reasoning parts', () => { - it('should pass through provider metadata', () => { + it('should pass through provider options', () => { const result = convertToLanguageModelMessage( { role: 'assistant', @@ -808,7 +808,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'reasoning', text: 'hello, world!', - providerMetadata: { + providerOptions: { 'test-provider': { 'key-a': 'test-value-1', 'key-b': 'test-value-2', @@ -870,7 +870,7 @@ describe('convertToLanguageModelMessage', () => { }); describe('tool call parts', () => { - it('should pass through provider metadata', () => { + it('should pass through provider options', () => { const result = convertToLanguageModelMessage( { role: 'assistant', @@ -900,7 +900,7 @@ describe('convertToLanguageModelMessage', () => { args: {}, toolCallId: 'toolCallId', toolName: 'toolName', - providerMetadata: { + providerOptions: { 'test-provider': { 'key-a': 'test-value-1', 'key-b': 'test-value-2', @@ -969,7 +969,7 @@ describe('convertToLanguageModelMessage', () => { }); }); - it('should handle provider metadata', async () => { + it('should handle provider options', async () => { const result = convertToLanguageModelMessage( { role: 'assistant', @@ -997,7 +997,7 @@ describe('convertToLanguageModelMessage', () => { type: 'file', data: 'dGVzdA==', mimeType: 'application/pdf', - providerMetadata: { + providerOptions: { 'test-provider': { 'key-a': 'test-value-1', 'key-b': 'test-value-2', @@ -1070,7 +1070,7 @@ describe('convertToLanguageModelMessage', () => { result: { some: 'result' }, toolCallId: 'toolCallId', toolName: 'toolName', - providerMetadata: { + providerOptions: { 'test-provider': { 'key-a': 'test-value-1', 'key-b': 'test-value-2', diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index d6ebece4b1a0..eeac30a672be 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -66,7 +66,7 @@ export function convertToLanguageModelMessage( return { role: 'system', content: message.content, - providerMetadata: + providerOptions: message.providerOptions ?? message.experimental_providerMetadata, }; } @@ -76,7 +76,7 @@ export function convertToLanguageModelMessage( return { role: 'user', content: [{ type: 'text', text: message.content }], - providerMetadata: + providerOptions: message.providerOptions ?? message.experimental_providerMetadata, }; } @@ -87,7 +87,7 @@ export function convertToLanguageModelMessage( .map(part => convertPartToLanguageModelPart(part, downloadedAssets)) // remove empty text parts: .filter(part => part.type !== 'text' || part.text !== ''), - providerMetadata: + providerOptions: message.providerOptions ?? message.experimental_providerMetadata, }; } @@ -97,7 +97,7 @@ export function convertToLanguageModelMessage( return { role: 'assistant', content: [{ type: 'text', text: message.content }], - providerMetadata: + providerOptions: message.providerOptions ?? message.experimental_providerMetadata, }; } @@ -123,7 +123,7 @@ export function convertToLanguageModelMessage( : convertDataContentToBase64String(part.data), filename: part.filename, mimeType: part.mimeType, - providerMetadata: providerOptions, + providerOptions, }; } case 'reasoning': { @@ -131,21 +131,21 @@ export function convertToLanguageModelMessage( type: 'reasoning', text: part.text, signature: part.signature, - providerMetadata: providerOptions, + providerOptions, }; } case 'redacted-reasoning': { return { type: 'redacted-reasoning', data: part.data, - providerMetadata: providerOptions, + providerOptions, }; } case 'text': { return { type: 'text' as const, text: part.text, - providerMetadata: providerOptions, + providerOptions, }; } case 'tool-call': { @@ -154,12 +154,12 @@ export function convertToLanguageModelMessage( toolCallId: part.toolCallId, toolName: part.toolName, args: part.args, - providerMetadata: providerOptions, + providerOptions, }; } } }), - providerMetadata: + providerOptions: message.providerOptions ?? message.experimental_providerMetadata, }; } @@ -174,10 +174,10 @@ export function convertToLanguageModelMessage( result: part.result, content: part.experimental_content, isError: part.isError, - providerMetadata: + providerOptions: part.providerOptions ?? part.experimental_providerMetadata, })), - providerMetadata: + providerOptions: message.providerOptions ?? message.experimental_providerMetadata, }; } @@ -266,7 +266,7 @@ function convertPartToLanguageModelPart( return { type: 'text', text: part.text, - providerMetadata: + providerOptions: part.providerOptions ?? part.experimental_providerMetadata, }; } @@ -347,7 +347,7 @@ function convertPartToLanguageModelPart( type: 'image', image: normalizedData, mimeType, - providerMetadata: + providerOptions: part.providerOptions ?? part.experimental_providerMetadata, }; } @@ -366,7 +366,7 @@ function convertPartToLanguageModelPart( : normalizedData, filename: part.filename, mimeType, - providerMetadata: + providerOptions: part.providerOptions ?? part.experimental_providerMetadata, }; } diff --git a/packages/ai/rsc/stream-ui/stream-ui.tsx b/packages/ai/rsc/stream-ui/stream-ui.tsx index ff25c9f11669..f7b59e2097d3 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.tsx +++ b/packages/ai/rsc/stream-ui/stream-ui.tsx @@ -281,7 +281,7 @@ functionality that can be fully encapsulated in the provider. modelSupportsImageUrls: model.supportsImageUrls, modelSupportsUrl: model.supportsUrl?.bind(model), // support 'this' context }), - providerMetadata: providerOptions, + providerOptions, abortSignal, headers, }), diff --git a/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx b/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx index 8f98466ee9e5..be65786d7f3f 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx +++ b/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx @@ -269,8 +269,8 @@ describe('options.providerMetadata', () => { it('should pass provider metadata to model', async () => { const result = await streamUI({ model: new MockLanguageModelV2({ - doStream: async ({ providerMetadata }) => { - expect(providerMetadata).toStrictEqual({ + doStream: async ({ providerOptions }) => { + expect(providerOptions).toStrictEqual({ aProvider: { someKey: 'someValue' }, }); diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts index 8b8b45bb9bd9..5dee90d16099 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts @@ -639,7 +639,7 @@ describe('doStream', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { bedrock: { guardrailConfig: { guardrailIdentifier: '-1', @@ -863,7 +863,7 @@ describe('doStream', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { bedrock: { foo: 'bar', }, @@ -955,7 +955,7 @@ describe('doStream', () => { { role: 'system', content: 'System Prompt', - providerMetadata: { bedrock: { cachePoint: { type: 'default' } } }, + providerOptions: { bedrock: { cachePoint: { type: 'default' } } }, }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ], @@ -1279,7 +1279,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { bedrock: { guardrailConfig: { guardrailIdentifier: '-1', @@ -1485,7 +1485,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { bedrock: { foo: 'bar', }, @@ -1539,7 +1539,7 @@ describe('doGenerate', () => { { role: 'system', content: 'System Prompt', - providerMetadata: { bedrock: { cachePoint: { type: 'default' } } }, + providerOptions: { bedrock: { cachePoint: { type: 'default' } } }, }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ], @@ -1562,7 +1562,7 @@ describe('doGenerate', () => { reasoningContent: { reasoningText: { text: reasoningText, - signature: signature, + signature, }, }, }, diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts index 3bbab5140304..4954f039d89f 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts @@ -65,7 +65,7 @@ export class BedrockChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, - providerMetadata, + providerOptions, }: Parameters[0]): { command: BedrockConverseInput; warnings: LanguageModelV2CallWarning[]; @@ -115,7 +115,7 @@ export class BedrockChatLanguageModel implements LanguageModelV2 { // Parse thinking options from provider metadata const reasoningConfigOptions = BedrockReasoningConfigOptionsSchema.safeParse( - providerMetadata?.bedrock?.reasoning_config, + providerOptions?.bedrock?.reasoning_config, ); if (!reasoningConfigOptions.success) { @@ -183,7 +183,7 @@ export class BedrockChatLanguageModel implements LanguageModelV2 { inferenceConfig, }), messages, - ...providerMetadata?.bedrock, + ...providerOptions?.bedrock, }; switch (type) { diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts index c559f9d7506b..2f79a06698bf 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts @@ -24,7 +24,7 @@ describe('system messages', () => { { role: 'system', content: 'Hello', - providerMetadata: { bedrock: { cachePoint: { type: 'default' } } }, + providerOptions: { bedrock: { cachePoint: { type: 'default' } } }, }, ]); @@ -99,7 +99,7 @@ describe('user messages', () => { { role: 'user', content: [{ type: 'text', text: 'Hello' }], - providerMetadata: { bedrock: { cachePoint: { type: 'default' } } }, + providerOptions: { bedrock: { cachePoint: { type: 'default' } } }, }, ]); @@ -233,7 +233,7 @@ describe('assistant messages', () => { { role: 'assistant', content: [{ type: 'text', text: 'Hello' }], - providerMetadata: { bedrock: { cachePoint: { type: 'default' } } }, + providerOptions: { bedrock: { cachePoint: { type: 'default' } } }, }, ]); diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts index 8e840812150e..d957638a38e6 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts @@ -53,7 +53,7 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { for (const message of block.messages) { system.push({ text: message.content }); - if (getCachePoint(message.providerMetadata)) { + if (getCachePoint(message.providerOptions)) { system.push(BEDROCK_CACHE_POINT); } } @@ -65,7 +65,7 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { const bedrockContent: BedrockUserMessage['content'] = []; for (const message of block.messages) { - const { role, content, providerMetadata } = message; + const { role, content, providerOptions } = message; switch (role) { case 'user': { for (let j = 0; j < content.length; j++) { @@ -179,7 +179,7 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { } } - if (getCachePoint(providerMetadata)) { + if (getCachePoint(providerOptions)) { bedrockContent.push(BEDROCK_CACHE_POINT); } } @@ -262,7 +262,7 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { } } } - if (getCachePoint(message.providerMetadata)) { + if (getCachePoint(message.providerOptions)) { bedrockContent.push(BEDROCK_CACHE_POINT); } } diff --git a/packages/anthropic/src/anthropic-messages-language-model.test.ts b/packages/anthropic/src/anthropic-messages-language-model.test.ts index 504649a5468d..dd1f5b6b343d 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.test.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.test.ts @@ -75,7 +75,7 @@ describe('AnthropicMessagesLanguageModel', () => { temperature: 0.5, topP: 0.7, topK: 0.1, - providerMetadata: { + providerOptions: { anthropic: { thinking: { type: 'enabled', budgetTokens: 1000 }, } satisfies AnthropicProviderOptions, @@ -476,7 +476,7 @@ describe('AnthropicMessagesLanguageModel', () => { { role: 'user', content: [{ type: 'text', text: 'Hello' }], - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index 1e21fa8b5469..c3e99278bc23 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -77,7 +77,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, - providerMetadata: providerOptions, + providerOptions, }: Parameters[0]) { const type = mode.type; diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts index 75b154ac2c58..b6fb61813ba2 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts @@ -746,7 +746,7 @@ describe('cache control', () => { { role: 'system', content: 'system message', - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' } }, }, }, @@ -781,7 +781,7 @@ describe('cache control', () => { { type: 'text', text: 'test', - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -822,7 +822,7 @@ describe('cache control', () => { { type: 'text', text: 'part1' }, { type: 'text', text: 'part2' }, ], - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -869,7 +869,7 @@ describe('cache control', () => { { type: 'text', text: 'test', - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -914,7 +914,7 @@ describe('cache control', () => { toolCallId: 'test-id', toolName: 'test-tool', args: { some: 'arg' }, - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -959,7 +959,7 @@ describe('cache control', () => { { type: 'text', text: 'part1' }, { type: 'text', text: 'part2' }, ], - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -1008,7 +1008,7 @@ describe('cache control', () => { toolName: 'test', toolCallId: 'test', result: { test: 'test' }, - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, @@ -1061,7 +1061,7 @@ describe('cache control', () => { result: { test: 'part2' }, }, ], - providerMetadata: { + providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' }, }, diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts index 72fa374c9dc8..b3efcb695c93 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts @@ -59,10 +59,10 @@ export function convertToAnthropicMessagesPrompt({ }); } - system = block.messages.map(({ content, providerMetadata }) => ({ + system = block.messages.map(({ content, providerOptions }) => ({ type: 'text', text: content, - cache_control: getCacheControl(providerMetadata), + cache_control: getCacheControl(providerOptions), })); break; @@ -85,9 +85,9 @@ export function convertToAnthropicMessagesPrompt({ const isLastPart = j === content.length - 1; const cacheControl = - getCacheControl(part.providerMetadata) ?? + getCacheControl(part.providerOptions) ?? (isLastPart - ? getCacheControl(message.providerMetadata) + ? getCacheControl(message.providerOptions) : undefined); switch (part.type) { @@ -163,9 +163,9 @@ export function convertToAnthropicMessagesPrompt({ const isLastPart = i === content.length - 1; const cacheControl = - getCacheControl(part.providerMetadata) ?? + getCacheControl(part.providerOptions) ?? (isLastPart - ? getCacheControl(message.providerMetadata) + ? getCacheControl(message.providerOptions) : undefined); const toolResultContent = @@ -232,9 +232,9 @@ export function convertToAnthropicMessagesPrompt({ // for the last part of a message, // check also if the message has cache control. const cacheControl = - getCacheControl(part.providerMetadata) ?? + getCacheControl(part.providerOptions) ?? (isLastContentPart - ? getCacheControl(message.providerMetadata) + ? getCacheControl(message.providerOptions) : undefined); switch (part.type) { diff --git a/packages/google/src/google-generative-ai-language-model.test.ts b/packages/google/src/google-generative-ai-language-model.test.ts index 75dcc2661b8e..4fdfad776f4c 100644 --- a/packages/google/src/google-generative-ai-language-model.test.ts +++ b/packages/google/src/google-generative-ai-language-model.test.ts @@ -426,7 +426,7 @@ describe('doGenerate', () => { ], seed: 123, temperature: 0.5, - providerMetadata: { + providerOptions: { google: { foo: 'bar', responseModalities: ['TEXT', 'IMAGE'] }, }, }); @@ -1230,7 +1230,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { google: { responseModalities: ['TEXT', 'IMAGE'], }, @@ -1910,7 +1910,7 @@ describe('doStream', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { google: { foo: 'bar', responseModalities: ['TEXT', 'IMAGE'] }, }, }); diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index 23685bac9afe..8ce4ef73ce54 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -79,7 +79,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, - providerMetadata, + providerOptions, }: Parameters[0]) { const type = mode.type; @@ -87,7 +87,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { const googleOptions = parseProviderOptions({ provider: 'google', - providerOptions: providerMetadata, + providerOptions, schema: googleGenerativeAIProviderOptionsSchema, }); diff --git a/packages/groq/src/groq-chat-language-model.test.ts b/packages/groq/src/groq-chat-language-model.test.ts index d998efa0af19..da088ee9cec6 100644 --- a/packages/groq/src/groq-chat-language-model.test.ts +++ b/packages/groq/src/groq-chat-language-model.test.ts @@ -269,7 +269,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { groq: { reasoningFormat: 'hidden' }, }, }); diff --git a/packages/groq/src/groq-chat-language-model.ts b/packages/groq/src/groq-chat-language-model.ts index 9236323b815e..d690e515d814 100644 --- a/packages/groq/src/groq-chat-language-model.ts +++ b/packages/groq/src/groq-chat-language-model.ts @@ -75,7 +75,7 @@ export class GroqChatLanguageModel implements LanguageModelV2 { responseFormat, seed, stream, - providerMetadata, + providerOptions, }: Parameters[0] & { stream: boolean; }) { @@ -104,7 +104,7 @@ export class GroqChatLanguageModel implements LanguageModelV2 { const groqOptions = parseProviderOptions({ provider: 'groq', - providerOptions: providerMetadata, + providerOptions, schema: z.object({ reasoningFormat: z.enum(['parsed', 'raw', 'hidden']).nullish(), }), diff --git a/packages/mistral/src/mistral-chat-language-model.ts b/packages/mistral/src/mistral-chat-language-model.ts index 9acd8a729fed..3ebd59b80f5c 100644 --- a/packages/mistral/src/mistral-chat-language-model.ts +++ b/packages/mistral/src/mistral-chat-language-model.ts @@ -70,7 +70,7 @@ export class MistralChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, - providerMetadata, + providerOptions, }: Parameters[0]) { const type = mode.type; @@ -134,8 +134,8 @@ export class MistralChatLanguageModel implements LanguageModelV2 { responseFormat?.type === 'json' ? { type: 'json_object' } : undefined, // mistral-specific provider options: - document_image_limit: providerMetadata?.mistral?.documentImageLimit, - document_page_limit: providerMetadata?.mistral?.documentPageLimit, + document_image_limit: providerOptions?.mistral?.documentImageLimit, + document_page_limit: providerOptions?.mistral?.documentPageLimit, // messages: messages: convertToMistralChatMessages(prompt), diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts index b5956b46f5df..27d6ca3e9447 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts @@ -126,7 +126,7 @@ describe('provider-specific metadata merging', () => { { role: 'system', content: 'You are a helpful assistant.', - providerMetadata: { + providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, }, @@ -151,7 +151,7 @@ describe('provider-specific metadata merging', () => { { type: 'text', text: 'Hello', - providerMetadata: { + providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, }, @@ -174,7 +174,7 @@ describe('provider-specific metadata merging', () => { const result = convertToOpenAICompatibleChatMessages([ { role: 'user', - providerMetadata: { + providerOptions: { openaiCompatible: { messageLevel: true, }, @@ -183,7 +183,7 @@ describe('provider-specific metadata merging', () => { { type: 'text', text: 'Hello', - providerMetadata: { + providerOptions: { openaiCompatible: { contentLevel: true, }, @@ -212,7 +212,7 @@ describe('provider-specific metadata merging', () => { toolCallId: 'call1', toolName: 'calculator', args: { x: 1, y: 2 }, - providerMetadata: { + providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, }, @@ -251,7 +251,7 @@ describe('provider-specific metadata merging', () => { type: 'image', image: imageUrl, mimeType: 'image/jpeg', - providerMetadata: { + providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, }, @@ -280,7 +280,7 @@ describe('provider-specific metadata merging', () => { { role: 'system', content: 'Hello', - providerMetadata: { + providerOptions: { someOtherProvider: { shouldBeIgnored: true, }, @@ -304,7 +304,7 @@ describe('provider-specific metadata merging', () => { { type: 'text', text: 'Hello from part 1', - providerMetadata: { + providerOptions: { openaiCompatible: { sentiment: 'positive' }, leftoverKey: { foo: 'some leftover data' }, }, @@ -313,12 +313,12 @@ describe('provider-specific metadata merging', () => { type: 'image', image: new Uint8Array([0, 1, 2, 3]), mimeType: 'image/png', - providerMetadata: { + providerOptions: { openaiCompatible: { alt_text: 'A sample image' }, }, }, ], - providerMetadata: { + providerOptions: { openaiCompatible: { priority: 'high' }, }, }, @@ -327,7 +327,7 @@ describe('provider-specific metadata merging', () => { expect(result).toEqual([ { role: 'user', - priority: 'high', // hoisted from message-level providerMetadata + priority: 'high', // hoisted from message-level providerOptions content: [ { type: 'text', @@ -380,7 +380,7 @@ describe('provider-specific metadata merging', () => { toolCallId: 'call1', toolName: 'searchTool', args: { query: 'Weather' }, - providerMetadata: { + providerOptions: { openaiCompatible: { function_call_reason: 'user request' }, }, }, @@ -426,7 +426,7 @@ describe('provider-specific metadata merging', () => { const result = convertToOpenAICompatibleChatMessages([ { role: 'tool', - providerMetadata: { + providerOptions: { // this just gets omitted as we prioritize content-level metadata openaiCompatible: { responseTier: 'detailed' }, }, @@ -441,7 +441,7 @@ describe('provider-specific metadata merging', () => { type: 'tool-result', toolCallId: 'call123', toolName: 'calculator', - providerMetadata: { + providerOptions: { openaiCompatible: { partial: true }, }, result: { stepTwo: 'data chunk 2' }, @@ -469,7 +469,7 @@ describe('provider-specific metadata merging', () => { const result = convertToOpenAICompatibleChatMessages([ { role: 'user', - providerMetadata: { + providerOptions: { openaiCompatible: { messageLevel: 'global-metadata' }, leftoverForMessage: { x: 123 }, }, @@ -477,7 +477,7 @@ describe('provider-specific metadata merging', () => { { type: 'text', text: 'Part A', - providerMetadata: { + providerOptions: { openaiCompatible: { textPartLevel: 'localized' }, leftoverForText: { info: 'text leftover' }, }, @@ -486,7 +486,7 @@ describe('provider-specific metadata merging', () => { type: 'image', image: new Uint8Array([9, 8, 7, 6]), mimeType: 'image/png', - providerMetadata: { + providerOptions: { openaiCompatible: { imagePartLevel: 'image-data' }, }, }, @@ -520,7 +520,7 @@ describe('provider-specific metadata merging', () => { const result = convertToOpenAICompatibleChatMessages([ { role: 'assistant', - providerMetadata: { + providerOptions: { openaiCompatible: { globalPriority: 'high' }, }, content: [ @@ -530,7 +530,7 @@ describe('provider-specific metadata merging', () => { toolCallId: 'callXYZ', toolName: 'awesomeTool', args: { param: 'someValue' }, - providerMetadata: { + providerOptions: { openaiCompatible: { toolPriority: 'critical', }, @@ -564,7 +564,7 @@ describe('provider-specific metadata merging', () => { const result = convertToOpenAICompatibleChatMessages([ { role: 'assistant', - providerMetadata: { + providerOptions: { openaiCompatible: { cacheControl: { type: 'default' }, sharedKey: 'assistantLevel', @@ -576,7 +576,7 @@ describe('provider-specific metadata merging', () => { toolCallId: 'collisionToolCall', toolName: 'collider', args: { num: 42 }, - providerMetadata: { + providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, // overwrites top-level sharedKey: 'toolLevel', diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts index 85c271e41cd0..e428e9423c74 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts @@ -7,9 +7,9 @@ import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { OpenAICompatibleChatPrompt } from './openai-compatible-api-types'; function getOpenAIMetadata(message: { - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderMetadata; }) { - return message?.providerMetadata?.openaiCompatible ?? {}; + return message?.providerOptions?.openaiCompatible ?? {}; } export function convertToOpenAICompatibleChatMessages( diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts index b8597a3cee65..717eb4fb660f 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts @@ -336,7 +336,7 @@ describe('doGenerate', () => { await provider('grok-beta').doGenerate({ inputFormat: 'prompt', mode: { type: 'regular' }, - providerMetadata: { + providerOptions: { 'test-provider': { someCustomOption: 'test-value', }, @@ -357,7 +357,7 @@ describe('doGenerate', () => { await provider('grok-beta').doGenerate({ inputFormat: 'prompt', mode: { type: 'regular' }, - providerMetadata: { + providerOptions: { notThisProviderName: { someCustomOption: 'test-value', }, @@ -1683,7 +1683,7 @@ describe('doStream', () => { await provider('grok-beta').doStream({ inputFormat: 'prompt', mode: { type: 'regular' }, - providerMetadata: { + providerOptions: { 'test-provider': { someCustomOption: 'test-value', }, @@ -1705,7 +1705,7 @@ describe('doStream', () => { await provider('grok-beta').doStream({ inputFormat: 'prompt', mode: { type: 'regular' }, - providerMetadata: { + providerOptions: { notThisProviderName: { someCustomOption: 'test-value', }, diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts index caef59f1d6aa..e637be09c7fc 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts @@ -109,7 +109,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { topK, frequencyPenalty, presencePenalty, - providerMetadata, + providerOptions, stopSequences, responseFormat, seed, @@ -168,7 +168,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { stop: stopSequences, seed, - ...providerMetadata?.[this.providerOptionsName], + ...providerOptions?.[this.providerOptionsName], // messages: messages: convertToOpenAICompatibleChatMessages(prompt), diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts index accef4e986a4..908851f527a3 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts @@ -284,7 +284,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { 'test-provider': { someCustomOption: 'test-value', }, @@ -305,7 +305,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { notThisProviderName: { someCustomOption: 'test-value', }, @@ -552,7 +552,7 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', mode: { type: 'regular' }, - providerMetadata: { + providerOptions: { 'test-provider': { someCustomOption: 'test-value', }, @@ -574,7 +574,7 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', mode: { type: 'regular' }, - providerMetadata: { + providerOptions: { notThisProviderName: { someCustomOption: 'test-value', }, diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts index 530c8fd7c720..ecca2544bdc0 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts @@ -89,7 +89,7 @@ export class OpenAICompatibleCompletionLanguageModel stopSequences: userStopSequences, responseFormat, seed, - providerMetadata, + providerOptions, }: Parameters[0]) { const type = mode.type; @@ -132,7 +132,7 @@ export class OpenAICompatibleCompletionLanguageModel frequency_penalty: frequencyPenalty, presence_penalty: presencePenalty, seed, - ...providerMetadata?.[this.providerOptionsName], + ...providerOptions?.[this.providerOptionsName], // prompt: prompt: completionPrompt, diff --git a/packages/openai/src/convert-to-openai-chat-messages.test.ts b/packages/openai/src/convert-to-openai-chat-messages.test.ts index 8df86d657bcc..856f3f8c3a92 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.test.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.test.ts @@ -87,7 +87,7 @@ describe('user messages', () => { type: 'image', image: new Uint8Array([0, 1, 2, 3]), mimeType: 'image/png', - providerMetadata: { + providerOptions: { openai: { imageDetail: 'low', }, diff --git a/packages/openai/src/convert-to-openai-chat-messages.ts b/packages/openai/src/convert-to-openai-chat-messages.ts index 10c29145c062..1bd4c37fd5dc 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.ts @@ -75,7 +75,7 @@ export function convertToOpenAIChatMessages({ };base64,${convertUint8ArrayToBase64(part.image)}`, // OpenAI specific extension: image detail - detail: part.providerMetadata?.openai?.imageDetail, + detail: part.providerOptions?.openai?.imageDetail, }, }; } diff --git a/packages/openai/src/openai-chat-language-model.test.ts b/packages/openai/src/openai-chat-language-model.test.ts index e69ae35bf5b0..8e9d960d5842 100644 --- a/packages/openai/src/openai-chat-language-model.test.ts +++ b/packages/openai/src/openai-chat-language-model.test.ts @@ -423,7 +423,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { reasoningEffort: 'low' }, }, }); @@ -462,7 +462,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { reasoningEffort: 'low' }, }, }); @@ -1271,7 +1271,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { maxCompletionTokens: 255, }, @@ -1292,7 +1292,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { prediction: { type: 'content', @@ -1319,7 +1319,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { store: true, }, @@ -1340,7 +1340,7 @@ describe('doGenerate', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { metadata: { custom: 'value', @@ -2253,7 +2253,7 @@ describe('doStream', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { store: true, }, @@ -2276,7 +2276,7 @@ describe('doStream', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { metadata: { custom: 'value', diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index 7b25ab229643..9d2a9a7afc0d 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -93,7 +93,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, - providerMetadata, + providerOptions, }: Parameters[0]) { const type = mode.type; @@ -190,12 +190,12 @@ export class OpenAIChatLanguageModel implements LanguageModelV2 { // openai specific settings: // TODO remove in next major version; we auto-map maxTokens now - max_completion_tokens: providerMetadata?.openai?.maxCompletionTokens, - store: providerMetadata?.openai?.store, - metadata: providerMetadata?.openai?.metadata, - prediction: providerMetadata?.openai?.prediction, + max_completion_tokens: providerOptions?.openai?.maxCompletionTokens, + store: providerOptions?.openai?.store, + metadata: providerOptions?.openai?.metadata, + prediction: providerOptions?.openai?.prediction, reasoning_effort: - providerMetadata?.openai?.reasoningEffort ?? + providerOptions?.openai?.reasoningEffort ?? this.settings.reasoningEffort, // messages: diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts index 4b63ff4d0f88..30026149fd3e 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts @@ -149,7 +149,7 @@ describe('convertToOpenAIResponsesMessages', () => { type: 'image', image: new Uint8Array([0, 1, 2, 3]), mimeType: 'image/png', - providerMetadata: { + providerOptions: { openai: { imageDetail: 'low', }, diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.ts index 3f0dc99cbe3a..f662cb6d96c6 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.ts @@ -67,7 +67,7 @@ export function convertToOpenAIResponsesMessages({ };base64,${convertUint8ArrayToBase64(part.image)}`, // OpenAI specific extension: image detail - detail: part.providerMetadata?.openai?.imageDetail, + detail: part.providerOptions?.openai?.imageDetail, }; } case 'file': { diff --git a/packages/openai/src/responses/openai-responses-language-model.test.ts b/packages/openai/src/responses/openai-responses-language-model.test.ts index ce1e5b35638c..c0c7f50d558d 100644 --- a/packages/openai/src/responses/openai-responses-language-model.test.ts +++ b/packages/openai/src/responses/openai-responses-language-model.test.ts @@ -332,7 +332,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { parallelToolCalls: false, }, @@ -355,7 +355,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { store: false, }, @@ -378,7 +378,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { store: false, }, @@ -401,7 +401,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { previousResponseId: 'resp_123', }, @@ -424,7 +424,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { user: 'user_123', }, @@ -447,7 +447,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { reasoningEffort: 'low', }, @@ -472,7 +472,7 @@ describe('OpenAIResponsesLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { instructions: 'You are a friendly assistant.', }, @@ -617,7 +617,7 @@ describe('OpenAIResponsesLanguageModel', () => { }, }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { openai: { strictSchemas: false, }, diff --git a/packages/openai/src/responses/openai-responses-language-model.ts b/packages/openai/src/responses/openai-responses-language-model.ts index 41ae402013a1..c6531c8f769f 100644 --- a/packages/openai/src/responses/openai-responses-language-model.ts +++ b/packages/openai/src/responses/openai-responses-language-model.ts @@ -49,7 +49,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 { frequencyPenalty, seed, prompt, - providerMetadata, + providerOptions, responseFormat, }: Parameters[0]) { const warnings: LanguageModelV2CallWarning[] = []; @@ -101,7 +101,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 { const openaiOptions = parseProviderOptions({ provider: 'openai', - providerOptions: providerMetadata, + providerOptions, schema: openaiResponsesProviderOptionsSchema, }); diff --git a/packages/perplexity/src/perplexity-language-model.test.ts b/packages/perplexity/src/perplexity-language-model.test.ts index 39972430d3d6..ecee90ff6385 100644 --- a/packages/perplexity/src/perplexity-language-model.test.ts +++ b/packages/perplexity/src/perplexity-language-model.test.ts @@ -130,7 +130,7 @@ describe('PerplexityLanguageModel', () => { inputFormat: 'prompt', mode: { type: 'regular' }, prompt: TEST_PROMPT, - providerMetadata: { + providerOptions: { perplexity: { search_recency_filter: 'month', return_images: true, diff --git a/packages/perplexity/src/perplexity-language-model.ts b/packages/perplexity/src/perplexity-language-model.ts index 62e66d7952a7..3f948c001c81 100644 --- a/packages/perplexity/src/perplexity-language-model.ts +++ b/packages/perplexity/src/perplexity-language-model.ts @@ -15,9 +15,9 @@ import { postJsonToApi, } from '@ai-sdk/provider-utils'; import { z } from 'zod'; -import { PerplexityLanguageModelId } from './perplexity-language-model-settings'; import { convertToPerplexityMessages } from './convert-to-perplexity-messages'; import { mapPerplexityFinishReason } from './map-perplexity-finish-reason'; +import { PerplexityLanguageModelId } from './perplexity-language-model-settings'; type PerplexityChatConfig = { baseURL: string; @@ -57,7 +57,7 @@ export class PerplexityLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, - providerMetadata, + providerOptions, }: Parameters[0]) { const type = mode.type; @@ -106,7 +106,7 @@ export class PerplexityLanguageModel implements LanguageModelV2 { : undefined, // provider extensions - ...(providerMetadata?.perplexity ?? {}), + ...(providerOptions?.perplexity ?? {}), // messages: messages: convertToPerplexityMessages(prompt), diff --git a/packages/provider/src/language-model/v2/index.ts b/packages/provider/src/language-model/v2/index.ts index 673a8d0f53cf..93c4f47e40b0 100644 --- a/packages/provider/src/language-model/v2/index.ts +++ b/packages/provider/src/language-model/v2/index.ts @@ -1,4 +1,3 @@ -export * from './language-model-v2-source'; export * from './language-model-v2'; export * from './language-model-v2-call-options'; export * from './language-model-v2-call-warning'; @@ -9,4 +8,6 @@ export * from './language-model-v2-logprobs'; export * from './language-model-v2-prompt'; export * from './language-model-v2-provider-defined-tool'; export * from './language-model-v2-provider-metadata'; +export * from './language-model-v2-provider-options'; +export * from './language-model-v2-source'; export * from './language-model-v2-tool-choice'; diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-options.ts b/packages/provider/src/language-model/v2/language-model-v2-call-options.ts index 23aa48e9d2f0..490f10baab18 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-call-options.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-call-options.ts @@ -3,7 +3,7 @@ import { LanguageModelV2CallSettings } from './language-model-v2-call-settings'; import { LanguageModelV2FunctionTool } from './language-model-v2-function-tool'; import { LanguageModelV2Prompt } from './language-model-v2-prompt'; import { LanguageModelV2ProviderDefinedTool } from './language-model-v2-provider-defined-tool'; -import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-metadata'; +import { LanguageModelV2ProviderOptions } from './language-model-v2-provider-options'; import { LanguageModelV2ToolChoice } from './language-model-v2-tool-choice'; export type LanguageModelV2CallOptions = LanguageModelV2CallSettings & { @@ -82,9 +82,9 @@ the language model interface. prompt: LanguageModelV2Prompt; /** -Additional provider-specific metadata. -The metadata is passed through to the provider from the AI SDK and enables -provider-specific functionality that can be fully encapsulated in the provider. + * Additional provider-specific options. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; }; diff --git a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts index bdd145646412..f7beaa4c2133 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts @@ -1,4 +1,4 @@ -import { LanguageModelV2ProviderMetadata } from './language-model-v2-provider-metadata'; +import { LanguageModelV2ProviderOptions } from './language-model-v2-provider-options'; /** A prompt is a list of messages. @@ -44,11 +44,11 @@ export type LanguageModelV2Message = } ) & { /** - * Additional provider-specific metadata. They are passed through + * Additional provider-specific options. They are passed through * to the provider from the AI SDK and enable provider-specific * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; }; /** @@ -63,11 +63,11 @@ The text content. text: string; /** - * Additional provider-specific metadata. They are passed through + * Additional provider-specific options. They are passed through * to the provider from the AI SDK and enable provider-specific * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } /** @@ -87,11 +87,11 @@ An optional signature for verifying that the reasoning originated from the model signature?: string; /** -Additional provider-specific metadata. They are passed through -to the provider from the AI SDK and enable provider-specific -functionality that can be fully encapsulated in the provider. + * Additional provider-specific options. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } /** @@ -106,11 +106,11 @@ Redacted reasoning data. data: string; /** -Additional provider-specific metadata. They are passed through -to the provider from the AI SDK and enable provider-specific -functionality that can be fully encapsulated in the provider. + * Additional provider-specific options. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } /** @@ -131,11 +131,11 @@ Optional mime type of the image. mimeType?: string; /** - * Additional provider-specific metadata. They are passed through + * Additional provider-specific options. They are passed through * to the provider from the AI SDK and enable provider-specific * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } /** @@ -162,11 +162,11 @@ Mime type of the file. mimeType: string; /** - * Additional provider-specific metadata. They are passed through + * Additional provider-specific options. They are passed through * to the provider from the AI SDK and enable provider-specific * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } /** @@ -191,11 +191,11 @@ Arguments of the tool call. This is a JSON-serializable object that matches the args: unknown; /** - * Additional provider-specific metadata. They are passed through + * Additional provider-specific options. They are passed through * to the provider from the AI SDK and enable provider-specific * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } /** @@ -253,9 +253,9 @@ Mime type of the image. >; /** - * Additional provider-specific metadata. They are passed through + * Additional provider-specific options. They are passed through * to the provider from the AI SDK and enable provider-specific * functionality that can be fully encapsulated in the provider. */ - providerMetadata?: LanguageModelV2ProviderMetadata; + providerOptions?: LanguageModelV2ProviderOptions; } diff --git a/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts b/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts index 47979b00d677..37df05f77e06 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-provider-metadata.ts @@ -1,9 +1,11 @@ import { JSONValue } from '../../json-value/json-value'; /** - * Additional provider-specific metadata. They are passed through - * to the provider from the AI SDK and enable provider-specific - * functionality that can be fully encapsulated in the provider. + * Additional provider-specific metadata. + * Metadata are additional outputs from the provider. + * They are passed through to the provider from the AI SDK + * and enable provider-specific functionality + * that can be fully encapsulated in the provider. * * This enables us to quickly ship provider-specific functionality * without affecting the core AI SDK. @@ -19,7 +21,6 @@ import { JSONValue } from '../../json-value/json-value'; * } * ``` */ -// TODO language model v2 separate provider metadata (output) from provider options (input) export type LanguageModelV2ProviderMetadata = Record< string, Record diff --git a/packages/provider/src/language-model/v2/language-model-v2-provider-options.ts b/packages/provider/src/language-model/v2/language-model-v2-provider-options.ts new file mode 100644 index 000000000000..0e9731abf7b9 --- /dev/null +++ b/packages/provider/src/language-model/v2/language-model-v2-provider-options.ts @@ -0,0 +1,27 @@ +import { JSONValue } from '../../json-value/json-value'; + +/** + * Additional provider-specific options. + * Options are additional input to the provider. + * They are passed through to the provider from the AI SDK + * and enable provider-specific functionality + * that can be fully encapsulated in the provider. + * + * This enables us to quickly ship provider-specific functionality + * without affecting the core AI SDK. + * + * The outer record is keyed by the provider name, and the inner + * record is keyed by the provider-specific metadata key. + * + * ```ts + * { + * "anthropic": { + * "cacheControl": { "type": "ephemeral" } + * } + * } + * ``` + */ +export type LanguageModelV2ProviderOptions = Record< + string, + Record +>; From c57e248934fb380799bb7eebc23d2d630c148863 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Mon, 7 Apr 2025 17:27:55 +0200 Subject: [PATCH 0039/1307] chore (provider): remove mode (#5580) --- .changeset/thick-chairs-remain.md | 5 + examples/ai-core/src/test/response-format.ts | 1 - .../generate-object/generate-object.test.ts | 75 ++-- .../core/generate-object/generate-object.ts | 12 +- .../generate-object/stream-object.test.ts | 63 +-- .../ai/core/generate-object/stream-object.ts | 12 +- .../core/generate-text/generate-text.test.ts | 243 +++++------- .../ai/core/generate-text/generate-text.ts | 12 +- .../ai/core/generate-text/stream-text.test.ts | 214 ++++------- packages/ai/core/generate-text/stream-text.ts | 14 +- .../default-settings-middleware.test.ts | 1 - .../middleware/wrap-language-model.test.ts | 8 - packages/ai/rsc/stream-ui/stream-ui.tsx | 13 +- .../src/bedrock-chat-language-model.test.ts | 194 +++------- .../src/bedrock-chat-language-model.ts | 75 +--- .../src/bedrock-prepare-tools.ts | 18 +- .../anthropic-messages-language-model.test.ts | 164 ++------ .../src/anthropic-messages-language-model.ts | 60 +-- .../src/anthropic-prepare-tools.test.ts | 25 +- .../anthropic/src/anthropic-prepare-tools.ts | 32 +- .../azure/src/azure-openai-provider.test.ts | 9 - .../src/cohere-chat-language-model.test.ts | 284 +++----------- .../cohere/src/cohere-chat-language-model.ts | 118 ++---- packages/cohere/src/cohere-chat-prompt.ts | 2 + .../cohere/src/cohere-prepare-tools.test.ts | 7 - packages/cohere/src/cohere-prepare-tools.ts | 23 +- ...oogle-generative-ai-language-model.test.ts | 152 +++----- .../google-generative-ai-language-model.ts | 162 +++----- packages/google/src/google-prepare-tools.ts | 29 +- .../groq/src/groq-chat-language-model.test.ts | 196 ++++------ packages/groq/src/groq-chat-language-model.ts | 130 +++---- packages/groq/src/groq-prepare-tools.ts | 25 +- .../src/mistral-chat-language-model.test.ts | 75 ++-- .../src/mistral-chat-language-model.ts | 56 +-- packages/mistral/src/mistral-chat-prompt.ts | 6 + packages/mistral/src/mistral-prepare-tools.ts | 37 +- ...nai-compatible-chat-language-model.test.ts | 331 +++++----------- .../openai-compatible-chat-language-model.ts | 152 +++----- ...mpatible-completion-language-model.test.ts | 20 - ...ai-compatible-completion-language-model.ts | 105 ++--- .../src/openai-compatible-prepare-tools.ts | 31 +- .../src/openai-chat-language-model.test.ts | 361 +++++++----------- .../openai/src/openai-chat-language-model.ts | 112 ++---- .../openai-completion-language-model.test.ts | 17 - .../src/openai-completion-language-model.ts | 119 +++--- packages/openai/src/openai-prepare-tools.ts | 24 +- .../openai-responses-language-model.test.ts | 152 ++------ .../openai-responses-language-model.ts | 97 ++--- .../openai-responses-prepare-tools.ts | 42 +- .../src/perplexity-language-model.test.ts | 26 -- .../src/perplexity-language-model.ts | 88 ++--- .../v2/language-model-v2-call-options.ts | 129 +++++-- .../v2/language-model-v2-call-settings.ts | 92 ----- .../v2/language-model-v2-call-warning.ts | 4 +- 54 files changed, 1492 insertions(+), 2962 deletions(-) create mode 100644 .changeset/thick-chairs-remain.md delete mode 100644 packages/provider/src/language-model/v2/language-model-v2-call-settings.ts diff --git a/.changeset/thick-chairs-remain.md b/.changeset/thick-chairs-remain.md new file mode 100644 index 000000000000..9e0bc14bbcaf --- /dev/null +++ b/.changeset/thick-chairs-remain.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): remove mode diff --git a/examples/ai-core/src/test/response-format.ts b/examples/ai-core/src/test/response-format.ts index 27af3011deed..a6fb4c972ff5 100644 --- a/examples/ai-core/src/test/response-format.ts +++ b/examples/ai-core/src/test/response-format.ts @@ -3,7 +3,6 @@ import 'dotenv/config'; async function main() { const result = await openai('gpt-4-turbo').doStream({ - mode: { type: 'regular' }, inputFormat: 'prompt', responseFormat: { type: 'json', diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index 9c746f4b1d8c..47c38592b12d 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -20,9 +20,9 @@ describe('output = "object"', () => { it('should generate object with json mode', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -67,9 +67,9 @@ describe('output = "object"', () => { const result = await generateObject({ model: new MockLanguageModelV2({ supportsStructuredOutputs: true, - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -107,9 +107,9 @@ describe('output = "object"', () => { const result = await generateObject({ model: new MockLanguageModelV2({ supportsStructuredOutputs: true, - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: 'test-name', description: 'test description', schema: { @@ -148,10 +148,9 @@ describe('output = "object"', () => { it('should generate object with tool mode', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-tool', - tool: { + doGenerate: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { type: 'function', name: 'json', description: 'Respond with a JSON object.', @@ -163,7 +162,8 @@ describe('output = "object"', () => { type: 'object', }, }, - }); + ]); + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { @@ -197,10 +197,9 @@ describe('output = "object"', () => { it('should use name and description with tool mode', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-tool', - tool: { + doGenerate: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { type: 'function', name: 'test-name', description: 'test description', @@ -212,7 +211,9 @@ describe('output = "object"', () => { type: 'object', }, }, - }); + ]); + expect(toolChoice).toStrictEqual({ type: 'required' }); + expect(prompt).toStrictEqual([ { role: 'user', @@ -379,9 +380,9 @@ describe('output = "object"', () => { it('should generate object when using zod transform', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -426,9 +427,9 @@ describe('output = "object"', () => { it('should generate object with tool mode when using zod prePreprocess', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -478,9 +479,9 @@ describe('output = "object"', () => { it('should generate object with json mode', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -980,9 +981,9 @@ describe('output = "array"', () => { it('should generate an array with 3 elements', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -1050,9 +1051,9 @@ describe('output = "enum"', () => { it('should generate an enum value', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - expect(mode).toEqual({ - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -1101,9 +1102,9 @@ describe('output = "no-schema"', () => { it('should generate object', async () => { const result = await generateObject({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doGenerate: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: undefined, diff --git a/packages/ai/core/generate-object/generate-object.ts b/packages/ai/core/generate-object/generate-object.ts index 1539849bb2f5..3b6c3822272e 100644 --- a/packages/ai/core/generate-object/generate-object.ts +++ b/packages/ai/core/generate-object/generate-object.ts @@ -530,8 +530,8 @@ export async function generateObject({ tracer, fn: async span => { const result = await model.doGenerate({ - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: outputStrategy.jsonSchema, name: schemaName, description: schemaDescription, @@ -651,16 +651,16 @@ export async function generateObject({ tracer, fn: async span => { const result = await model.doGenerate({ - mode: { - type: 'object-tool', - tool: { + tools: [ + { type: 'function', name: schemaName ?? 'json', description: schemaDescription ?? 'Respond with a JSON object.', parameters: outputStrategy.jsonSchema!, }, - }, + ], + toolChoice: { type: 'required' }, ...prepareCallSettings(settings), inputFormat, prompt: promptMessages, diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index bc0241d72d1c..5e6b8f1a9fa1 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -23,9 +23,9 @@ describe('streamObject', () => { it('should send object deltas with json mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -90,9 +90,9 @@ describe('streamObject', () => { const result = streamObject({ model: new MockLanguageModelV2({ supportsStructuredOutputs: true, - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -149,9 +149,9 @@ describe('streamObject', () => { const result = streamObject({ model: new MockLanguageModelV2({ supportsStructuredOutputs: true, - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: 'test-name', description: 'test description', schema: { @@ -210,10 +210,9 @@ describe('streamObject', () => { it('should send object deltas with tool mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-tool', - tool: { + doStream: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { type: 'function', name: 'json', description: 'Respond with a JSON object.', @@ -225,7 +224,8 @@ describe('streamObject', () => { type: 'object', }, }, - }); + ]); + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { role: 'user', @@ -307,10 +307,9 @@ describe('streamObject', () => { it('should use name and description with tool mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-tool', - tool: { + doStream: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { type: 'function', name: 'test-name', description: 'test description', @@ -322,7 +321,9 @@ describe('streamObject', () => { type: 'object', }, }, - }); + ]); + expect(toolChoice).toStrictEqual({ type: 'required' }); + expect(prompt).toStrictEqual([ { role: 'user', @@ -1197,9 +1198,9 @@ describe('streamObject', () => { it('should send object deltas with json mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: jsonSchema({ @@ -1557,9 +1558,9 @@ describe('streamObject', () => { beforeEach(async () => { result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -1712,9 +1713,9 @@ describe('streamObject', () => { beforeEach(async () => { result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: { @@ -1824,9 +1825,9 @@ describe('streamObject', () => { it('should send object deltas with json mode', async () => { const result = streamObject({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'object-json', + doStream: async ({ prompt, responseFormat }) => { + expect(responseFormat).toStrictEqual({ + type: 'json', name: undefined, description: undefined, schema: undefined, diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index acf34e345752..883add9ce324 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -603,8 +603,8 @@ class DefaultStreamObjectResult }); callOptions = { - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: outputStrategy.jsonSchema, name: schemaName, description: schemaDescription, @@ -646,16 +646,16 @@ class DefaultStreamObjectResult }); callOptions = { - mode: { - type: 'object-tool', - tool: { + tools: [ + { type: 'function', name: schemaName ?? 'json', description: schemaDescription ?? 'Respond with a JSON object.', parameters: outputStrategy.jsonSchema!, }, - }, + ], + toolChoice: { type: 'required' }, ...prepareCallSettings(settings), inputFormat: standardizedPrompt.type, prompt: await convertToLanguageModelPrompt({ diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index 98b8880bd62d..e7df0eb97238 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -78,13 +78,7 @@ describe('result.text', () => { it('should generate text', async () => { const result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: undefined, - toolChoice: undefined, - }); - + doGenerate: async ({ prompt }) => { expect(prompt).toStrictEqual([ { role: 'user', @@ -191,37 +185,35 @@ describe('result.toolCalls', () => { it('should contain tool calls', async () => { const result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'regular', - toolChoice: { type: 'required' }, - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + doGenerate: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - { - type: 'function', - name: 'tool2', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { somethingElse: { type: 'string' } }, - required: ['somethingElse'], - type: 'object', - }, + }, + { + type: 'function', + name: 'tool2', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { somethingElse: { type: 'string' } }, + required: ['somethingElse'], + type: 'object', }, - ], - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { @@ -277,25 +269,23 @@ describe('result.toolResults', () => { it('should contain tool results', async () => { const result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: { type: 'auto' }, - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + doGenerate: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'auto' }); expect(prompt).toStrictEqual([ { @@ -504,27 +494,25 @@ describe('options.maxSteps', () => { let responseCount = 0; result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { + doGenerate: async ({ prompt, tools, toolChoice }) => { switch (responseCount++) { case 0: - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: { type: 'auto' }, - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'auto' }); expect(prompt).toStrictEqual([ { @@ -561,24 +549,22 @@ describe('options.maxSteps', () => { }, }; case 1: - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: { type: 'auto' }, - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'auto' }); expect(prompt).toStrictEqual([ { @@ -698,15 +684,9 @@ describe('options.maxSteps', () => { let responseCount = 0; result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { + doGenerate: async ({ prompt }) => { switch (responseCount++) { case 0: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); - expect(prompt).toStrictEqual([ { role: 'user', @@ -729,12 +709,6 @@ describe('options.maxSteps', () => { }; } case 1: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); - expect(prompt).toStrictEqual([ { role: 'user', @@ -790,11 +764,6 @@ describe('options.maxSteps', () => { }; } case 2: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); expect(prompt).toStrictEqual([ { role: 'user', @@ -849,11 +818,6 @@ describe('options.maxSteps', () => { }; } case 3: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); expect(prompt).toStrictEqual([ { role: 'user', @@ -1205,35 +1169,33 @@ describe('tools with custom schema', () => { it('should contain tool calls', async () => { const result = await generateText({ model: new MockLanguageModelV2({ - doGenerate: async ({ prompt, mode }) => { - assert.deepStrictEqual(mode, { - type: 'regular', - toolChoice: { type: 'required' }, - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + doGenerate: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - { - type: 'function', - name: 'tool2', - description: undefined, - parameters: { - additionalProperties: false, - properties: { somethingElse: { type: 'string' } }, - required: ['somethingElse'], - type: 'object', - }, + }, + { + type: 'function', + name: 'tool2', + description: undefined, + parameters: { + additionalProperties: false, + properties: { somethingElse: { type: 'string' } }, + required: ['somethingElse'], + type: 'object', }, - ], - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { @@ -1468,7 +1430,6 @@ describe('options.output', () => { expect(callOptions!).toEqual({ temperature: 0, - mode: { type: 'regular' }, responseFormat: { type: 'text' }, inputFormat: 'prompt', prompt: [ @@ -1524,7 +1485,6 @@ describe('options.output', () => { expect(callOptions!).toEqual({ temperature: 0, - mode: { type: 'regular' }, inputFormat: 'prompt', responseFormat: { type: 'json', schema: undefined }, prompt: [ @@ -1586,7 +1546,6 @@ describe('options.output', () => { expect(callOptions!).toEqual({ temperature: 0, - mode: { type: 'regular' }, inputFormat: 'prompt', responseFormat: { type: 'json', diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index 25913751d83b..c206a2dcc103 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -263,8 +263,7 @@ A function that attempts to repair a tool call that failed to parse. }), tracer, fn: async span => { - const mode = { - type: 'regular' as const, + const toolsAndToolChoice = { ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools }), }; @@ -325,12 +324,13 @@ A function that attempts to repair a tool call that failed to parse. }, 'ai.prompt.tools': { // convert the language model level tools: - input: () => mode.tools?.map(tool => JSON.stringify(tool)), + input: () => + toolsAndToolChoice.tools?.map(tool => JSON.stringify(tool)), }, 'ai.prompt.toolChoice': { input: () => - mode.toolChoice != null - ? JSON.stringify(mode.toolChoice) + toolsAndToolChoice.toolChoice != null + ? JSON.stringify(toolsAndToolChoice.toolChoice) : undefined, }, @@ -349,8 +349,8 @@ A function that attempts to repair a tool call that failed to parse. tracer, fn: async span => { const result = await model.doGenerate({ - mode, ...callSettings, + ...toolsAndToolChoice, inputFormat: promptFormat, responseFormat: output?.responseFormat({ model }), prompt: promptMessages, diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index f05eccb7ca35..7aa9565850b2 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -170,13 +170,7 @@ describe('streamText', () => { it('should send text deltas', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: undefined, - toolChoice: undefined, - }); - + doStream: async ({ prompt }) => { expect(prompt).toStrictEqual([ { role: 'user', @@ -267,13 +261,7 @@ describe('streamText', () => { it('should send text deltas', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: undefined, - toolChoice: undefined, - }); - + doStream: async ({ prompt }) => { expect(prompt).toStrictEqual([ { role: 'user', @@ -349,13 +337,7 @@ describe('streamText', () => { it('should use fallback response metadata when response metadata is not provided', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: undefined, - toolChoice: undefined, - }); - + doStream: async ({ prompt }) => { expect(prompt).toStrictEqual([ { role: 'user', @@ -396,25 +378,23 @@ describe('streamText', () => { it('should send tool calls', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + doStream: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - toolChoice: { type: 'required' }, - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { @@ -468,25 +448,23 @@ describe('streamText', () => { it('should not send tool call deltas when toolCallStreaming is disabled', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + doStream: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'test-tool', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - toolChoice: { type: 'required' }, - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { @@ -2331,27 +2309,25 @@ describe('streamText', () => { let responseCount = 0; result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { + doStream: async ({ prompt, tools, toolChoice }) => { switch (responseCount++) { case 0: { - expect(mode).toStrictEqual({ - type: 'regular', - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - toolChoice: { type: 'auto' }, - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'auto' }); expect(prompt).toStrictEqual([ { @@ -2392,24 +2368,22 @@ describe('streamText', () => { }; } case 1: { - expect(mode).toStrictEqual({ - type: 'regular', - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + $schema: 'http://json-schema.org/draft-07/schema#', + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - toolChoice: { type: 'auto' }, - }); + }, + ]); + + expect(toolChoice).toStrictEqual({ type: 'auto' }); expect(prompt).toStrictEqual([ { @@ -2569,14 +2543,9 @@ describe('streamText', () => { let responseCount = 0; result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { + doStream: async ({ prompt }) => { switch (responseCount++) { case 0: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); expect(prompt).toStrictEqual([ { role: 'user', @@ -2611,11 +2580,6 @@ describe('streamText', () => { }; } case 1: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); expect(prompt).toStrictEqual([ { role: 'user', @@ -2667,12 +2631,6 @@ describe('streamText', () => { }; } case 2: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); - expect(prompt).toStrictEqual([ { role: 'user', @@ -2739,12 +2697,6 @@ describe('streamText', () => { }; } case 3: { - expect(mode).toStrictEqual({ - type: 'regular', - toolChoice: undefined, - tools: undefined, - }); - expect(prompt).toStrictEqual([ { role: 'user', @@ -3177,24 +3129,21 @@ describe('streamText', () => { it('should send tool calls', async () => { const result = streamText({ model: new MockLanguageModelV2({ - doStream: async ({ prompt, mode }) => { - expect(mode).toStrictEqual({ - type: 'regular', - tools: [ - { - type: 'function', - name: 'tool1', - description: undefined, - parameters: { - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, + doStream: async ({ prompt, tools, toolChoice }) => { + expect(tools).toStrictEqual([ + { + type: 'function', + name: 'tool1', + description: undefined, + parameters: { + additionalProperties: false, + properties: { value: { type: 'string' } }, + required: ['value'], + type: 'object', }, - ], - toolChoice: { type: 'required' }, - }); + }, + ]); + expect(toolChoice).toStrictEqual({ type: 'required' }); expect(prompt).toStrictEqual([ { @@ -4463,7 +4412,6 @@ describe('streamText', () => { expect(callOptions).toEqual({ temperature: 0, - mode: { type: 'regular' }, inputFormat: 'prompt', responseFormat: { type: 'json', schema: undefined }, prompt: [ diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index 7c7221c4a5a3..2ab9cf41a06c 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -974,8 +974,7 @@ class DefaultStreamTextResult modelSupportsUrl: model.supportsUrl?.bind(model), // support 'this' context }); - const mode = { - type: 'regular' as const, + const toolsAndToolChoice = { ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools }), }; @@ -1002,12 +1001,15 @@ class DefaultStreamTextResult }, 'ai.prompt.tools': { // convert the language model level tools: - input: () => mode.tools?.map(tool => JSON.stringify(tool)), + input: () => + toolsAndToolChoice.tools?.map(tool => + JSON.stringify(tool), + ), }, 'ai.prompt.toolChoice': { input: () => - mode.toolChoice != null - ? JSON.stringify(mode.toolChoice) + toolsAndToolChoice.toolChoice != null + ? JSON.stringify(toolsAndToolChoice.toolChoice) : undefined, }, @@ -1029,8 +1031,8 @@ class DefaultStreamTextResult startTimestampMs: now(), // get before the call doStreamSpan, result: await model.doStream({ - mode, ...prepareCallSettings(settings), + ...toolsAndToolChoice, inputFormat: promptFormat, responseFormat: output?.responseFormat({ model }), prompt: promptMessages, diff --git a/packages/ai/core/middleware/default-settings-middleware.test.ts b/packages/ai/core/middleware/default-settings-middleware.test.ts index 4b16e5bb5ec9..07e1d80d6c00 100644 --- a/packages/ai/core/middleware/default-settings-middleware.test.ts +++ b/packages/ai/core/middleware/default-settings-middleware.test.ts @@ -2,7 +2,6 @@ import { LanguageModelV2CallOptions } from '@ai-sdk/provider'; import { defaultSettingsMiddleware } from './default-settings-middleware'; const BASE_PARAMS: LanguageModelV2CallOptions = { - mode: { type: 'regular' }, prompt: [ { role: 'user', content: [{ type: 'text', text: 'Hello, world!' }] }, ], diff --git a/packages/ai/core/middleware/wrap-language-model.test.ts b/packages/ai/core/middleware/wrap-language-model.test.ts index efb3a82ecdeb..b5cc6d12ba21 100644 --- a/packages/ai/core/middleware/wrap-language-model.test.ts +++ b/packages/ai/core/middleware/wrap-language-model.test.ts @@ -56,7 +56,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; await wrappedModel.doGenerate(params); @@ -91,7 +90,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; await wrappedModel.doGenerate(params); @@ -124,7 +122,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; await wrappedModel.doStream(params); @@ -156,7 +153,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; await wrappedModel.doStream(params); @@ -269,7 +265,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; await wrappedModel.doGenerate(params); @@ -323,7 +318,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; await wrappedModel.doStream(params); @@ -379,7 +373,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; const result = await wrappedModel.doGenerate(params); @@ -427,7 +420,6 @@ describe('wrapLanguageModel', () => { const params: LanguageModelV2CallOptions = { inputFormat: 'messages', prompt: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }], - mode: { type: 'regular' }, }; const result = await wrappedModel.doStream(params); diff --git a/packages/ai/rsc/stream-ui/stream-ui.tsx b/packages/ai/rsc/stream-ui/stream-ui.tsx index f7b59e2097d3..b9dca3368743 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.tsx +++ b/packages/ai/rsc/stream-ui/stream-ui.tsx @@ -266,15 +266,12 @@ functionality that can be fully encapsulated in the provider. }); const result = await retry(async () => model.doStream({ - mode: { - type: 'regular', - ...prepareToolsAndToolChoice({ - tools, - toolChoice, - activeTools: undefined, - }), - }, ...prepareCallSettings(settings), + ...prepareToolsAndToolChoice({ + tools, + toolChoice, + activeTools: undefined, + }), inputFormat: validatedPrompt.type, prompt: await convertToLanguageModelPrompt({ prompt: validatedPrompt, diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts index 5dee90d16099..62daad6b2424 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts @@ -170,7 +170,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -224,23 +223,20 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { type: 'tool', toolName: 'test-tool' }, - }, + }, + ], + toolChoice: { type: 'tool', toolName: 'test-tool' }, prompt: TEST_PROMPT, }); @@ -335,34 +331,31 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool-1', - parameters: { - type: 'object', - properties: { value1: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool-1', + parameters: { + type: 'object', + properties: { value1: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - { - type: 'function', - name: 'test-tool-2', - parameters: { - type: 'object', - properties: { value2: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + }, + { + type: 'function', + name: 'test-tool-2', + parameters: { + type: 'object', + properties: { value2: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { type: 'tool', toolName: 'test-tool' }, - }, + }, + ], + toolChoice: { type: 'tool', toolName: 'test-tool' }, prompt: TEST_PROMPT, }); @@ -435,7 +428,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -479,7 +471,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -520,7 +511,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -561,7 +551,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -592,7 +581,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); const result = await convertReadableStreamToArray(stream); @@ -618,7 +606,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -637,7 +624,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { bedrock: { @@ -691,7 +677,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -735,7 +720,6 @@ describe('doStream', () => { const response = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -798,7 +782,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: optionsHeaders, }); @@ -832,7 +815,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -861,7 +843,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { bedrock: { @@ -907,7 +888,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -950,7 +930,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', @@ -1013,7 +992,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1062,7 +1040,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1140,7 +1117,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1154,7 +1130,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1169,7 +1144,6 @@ describe('doGenerate', () => { const { finishReason } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1181,7 +1155,6 @@ describe('doGenerate', () => { const { finishReason } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1193,7 +1166,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1208,7 +1180,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, maxTokens: 100, temperature: 0.5, @@ -1224,60 +1195,11 @@ describe('doGenerate', () => { }); }); - it('should pass tool specification in object-tool mode', async () => { - prepareJsonResponse({}); - - await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { - name: 'test-tool', - type: 'function', - parameters: { - type: 'object', - properties: { - property1: { type: 'string' }, - property2: { type: 'number' }, - }, - required: ['property1', 'property2'], - additionalProperties: false, - }, - }, - }, - prompt: TEST_PROMPT, - }); - - expect(await server.calls[0].requestBody).toMatchObject({ - toolConfig: { - tools: [ - { - toolSpec: { - name: 'test-tool', - inputSchema: { - json: { - type: 'object', - properties: { - property1: { type: 'string' }, - property2: { type: 'number' }, - }, - required: ['property1', 'property2'], - additionalProperties: false, - }, - }, - }, - }, - ], - }, - }); - }); - it('should support guardrails', async () => { prepareJsonResponse({}); await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { bedrock: { @@ -1304,7 +1226,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1336,7 +1257,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1353,26 +1273,23 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool-1', - description: 'A test tool', - parameters: { - type: 'object', - properties: { - param1: { type: 'string' }, - param2: { type: 'number' }, - }, - required: ['param1'], - additionalProperties: false, + tools: [ + { + type: 'function', + name: 'test-tool-1', + description: 'A test tool', + parameters: { + type: 'object', + properties: { + param1: { type: 'string' }, + param2: { type: 'number' }, }, + required: ['param1'], + additionalProperties: false, }, - ], - toolChoice: { type: 'auto' }, - }, + }, + ], + toolChoice: { type: 'auto' }, prompt: TEST_PROMPT, }); @@ -1431,7 +1348,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: optionsHeaders, }); @@ -1466,7 +1382,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1483,7 +1398,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { bedrock: { @@ -1511,7 +1425,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1534,7 +1447,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', @@ -1572,7 +1484,6 @@ describe('doGenerate', () => { const { reasoning, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1604,7 +1515,6 @@ describe('doGenerate', () => { const { reasoning, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1633,7 +1543,6 @@ describe('doGenerate', () => { const { reasoning, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1670,7 +1579,6 @@ describe('doGenerate', () => { const { reasoning, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts index 4954f039d89f..a7370aeb8537 100644 --- a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts +++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts @@ -54,7 +54,6 @@ export class BedrockChatLanguageModel implements LanguageModelV2 { ) {} private getArgs({ - mode, prompt, maxTokens, temperature, @@ -65,13 +64,13 @@ export class BedrockChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, + tools, + toolChoice, providerOptions, }: Parameters[0]): { command: BedrockConverseInput; warnings: LanguageModelV2CallWarning[]; } { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (frequencyPenalty != null) { @@ -176,62 +175,22 @@ export class BedrockChatLanguageModel implements LanguageModelV2 { }); } - const baseArgs: BedrockConverseInput = { - system, - additionalModelRequestFields: this.settings.additionalModelRequestFields, - ...(Object.keys(inferenceConfig).length > 0 && { - inferenceConfig, - }), - messages, - ...providerOptions?.bedrock, - }; - - switch (type) { - case 'regular': { - const { toolConfig, toolWarnings } = prepareTools(mode); - return { - command: { - ...baseArgs, - ...(toolConfig.tools?.length ? { toolConfig } : {}), - }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - throw new UnsupportedFunctionalityError({ - functionality: 'json-mode object generation', - }); - } - - case 'object-tool': { - return { - command: { - ...baseArgs, - toolConfig: { - tools: [ - { - toolSpec: { - name: mode.tool.name, - description: mode.tool.description, - inputSchema: { - json: mode.tool.parameters as JSONObject, - }, - }, - }, - ], - toolChoice: { tool: { name: mode.tool.name } }, - }, - }, - warnings, - }; - } + const { toolConfig, toolWarnings } = prepareTools({ tools, toolChoice }); - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + return { + command: { + system, + messages, + additionalModelRequestFields: + this.settings.additionalModelRequestFields, + ...(Object.keys(inferenceConfig).length > 0 && { + inferenceConfig, + }), + ...providerOptions?.bedrock, + ...(toolConfig.tools?.length ? { toolConfig } : {}), + }, + warnings: [...warnings, ...toolWarnings], + }; } async doGenerate( diff --git a/packages/amazon-bedrock/src/bedrock-prepare-tools.ts b/packages/amazon-bedrock/src/bedrock-prepare-tools.ts index c90b7a3dbe0b..5c7679f9a653 100644 --- a/packages/amazon-bedrock/src/bedrock-prepare-tools.ts +++ b/packages/amazon-bedrock/src/bedrock-prepare-tools.ts @@ -1,21 +1,23 @@ import { JSONObject, - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { BedrockTool, BedrockToolConfiguration } from './bedrock-api-types'; -export function prepareTools( - mode: Parameters[0]['mode'] & { - type: 'regular'; - }, -): { +export function prepareTools({ + tools, + toolChoice, +}: { + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; +}): { toolConfig: BedrockToolConfiguration; // note: do not rename, name required by Bedrock toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; if (tools == null) { return { @@ -43,8 +45,6 @@ export function prepareTools( } } - const toolChoice = mode.toolChoice; - if (toolChoice == null) { return { toolConfig: { tools: bedrockTools, toolChoice: undefined }, diff --git a/packages/anthropic/src/anthropic-messages-language-model.test.ts b/packages/anthropic/src/anthropic-messages-language-model.test.ts index dd1f5b6b343d..12f5c21c5642 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.test.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.test.ts @@ -70,7 +70,6 @@ describe('AnthropicMessagesLanguageModel', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, temperature: 0.5, topP: 0.7, @@ -121,7 +120,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { text } = await provider('claude-3-haiku-20240307').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -142,7 +140,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { reasoning, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -165,7 +162,6 @@ describe('AnthropicMessagesLanguageModel', () => { 'claude-3-haiku-20240307', ).doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -188,59 +184,10 @@ describe('AnthropicMessagesLanguageModel', () => { const { toolCalls, finishReason, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - ], - }, - prompt: TEST_PROMPT, - }); - - expect(toolCalls).toStrictEqual([ - { - toolCallId: 'toolu_1', - toolCallType: 'function', - toolName: 'test-tool', - args: '{"value":"example value"}', - }, - ]); - expect(text).toStrictEqual('Some text\n\n'); - expect(finishReason).toStrictEqual('tool-calls'); - }); - - it('should support object-tool mode', async () => { - prepareJsonResponse({ - content: [ - { type: 'text', text: 'Some text\n\n' }, + tools: [ { - type: 'tool_use', - id: 'toolu_1', - name: 'json', - input: { value: 'example value' }, - }, - ], - stopReason: 'tool_use', - }); - - const { toolCalls, finishReason } = await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { type: 'function', - name: 'json', - description: 'Respond with a JSON object.', + name: 'test-tool', parameters: { type: 'object', properties: { value: { type: 'string' } }, @@ -249,7 +196,7 @@ describe('AnthropicMessagesLanguageModel', () => { $schema: 'http://json-schema.org/draft-07/schema#', }, }, - }, + ], prompt: TEST_PROMPT, }); @@ -257,37 +204,12 @@ describe('AnthropicMessagesLanguageModel', () => { { toolCallId: 'toolu_1', toolCallType: 'function', - toolName: 'json', + toolName: 'test-tool', args: '{"value":"example value"}', }, ]); + expect(text).toStrictEqual('Some text\n\n'); expect(finishReason).toStrictEqual('tool-calls'); - - // check request to Anthropic - expect(await server.calls[0].requestBody).toStrictEqual({ - max_tokens: 4096, - messages: [ - { - content: [{ text: 'Hello', type: 'text' }], - role: 'user', - }, - ], - model: 'claude-3-haiku-20240307', - tool_choice: { name: 'json', type: 'tool' }, - tools: [ - { - description: 'Respond with a JSON object.', - input_schema: { - $schema: 'http://json-schema.org/draft-07/schema#', - additionalProperties: false, - properties: { value: { type: 'string' } }, - required: ['value'], - type: 'object', - }, - name: 'json', - }, - ], - }); }); it('should extract usage', async () => { @@ -297,7 +219,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -315,7 +236,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -334,7 +254,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -353,7 +272,6 @@ describe('AnthropicMessagesLanguageModel', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, temperature: 0.5, maxTokens: 100, @@ -381,25 +299,22 @@ describe('AnthropicMessagesLanguageModel', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -441,7 +356,7 @@ describe('AnthropicMessagesLanguageModel', () => { await provider('claude-3-haiku-20240307').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, + prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -470,7 +385,6 @@ describe('AnthropicMessagesLanguageModel', () => { const model = provider('claude-3-haiku-20240307'); const result = await model.doGenerate({ - mode: { type: 'regular' }, inputFormat: 'messages', prompt: [ { @@ -515,7 +429,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -543,7 +456,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -593,7 +505,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -641,7 +552,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -686,7 +596,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -739,22 +648,19 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -851,7 +757,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -881,7 +786,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -912,7 +816,6 @@ describe('AnthropicMessagesLanguageModel', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -949,7 +852,6 @@ describe('AnthropicMessagesLanguageModel', () => { await provider('claude-3-haiku-20240307').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -986,7 +888,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1028,7 +929,6 @@ describe('AnthropicMessagesLanguageModel', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index c3e99278bc23..e506f98e903b 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -66,7 +66,6 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV2 { } private async getArgs({ - mode, prompt, maxTokens = 4096, // 4096: max model output tokens TODO update default in v5 temperature, @@ -77,10 +76,10 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, + tools, + toolChoice, providerOptions, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (frequencyPenalty != null) { @@ -187,47 +186,22 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV2 { baseArgs.max_tokens = maxTokens + thinkingBudget; } - switch (type) { - case 'regular': { - const { - tools, - tool_choice, - toolWarnings, - betas: toolsBetas, - } = prepareTools(mode); - - return { - args: { ...baseArgs, tools, tool_choice }, - warnings: [...warnings, ...toolWarnings], - betas: new Set([...messagesBetas, ...toolsBetas]), - }; - } - - case 'object-json': { - throw new UnsupportedFunctionalityError({ - functionality: 'json-mode object generation', - }); - } - - case 'object-tool': { - const { name, description, parameters } = mode.tool; - - return { - args: { - ...baseArgs, - tools: [{ name, description, input_schema: parameters }], - tool_choice: { type: 'tool', name }, - }, - warnings, - betas: messagesBetas, - }; - } + const { + tools: anthropicTools, + toolChoice: anthropicToolChoice, + toolWarnings, + betas: toolsBetas, + } = prepareTools({ tools, toolChoice }); - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + return { + args: { + ...baseArgs, + tools: anthropicTools, + tool_choice: anthropicToolChoice, + }, + warnings: [...warnings, ...toolWarnings], + betas: new Set([...messagesBetas, ...toolsBetas]), + }; } private async getHeaders({ diff --git a/packages/anthropic/src/anthropic-prepare-tools.test.ts b/packages/anthropic/src/anthropic-prepare-tools.test.ts index 72da5d96da1d..d98ffde4d697 100644 --- a/packages/anthropic/src/anthropic-prepare-tools.test.ts +++ b/packages/anthropic/src/anthropic-prepare-tools.test.ts @@ -1,7 +1,7 @@ import { prepareTools } from './anthropic-prepare-tools'; it('should return undefined tools and tool_choice when tools are null', () => { - const result = prepareTools({ type: 'regular', tools: undefined }); + const result = prepareTools({ tools: undefined }); expect(result).toEqual({ tools: undefined, tool_choice: undefined, @@ -11,7 +11,7 @@ it('should return undefined tools and tool_choice when tools are null', () => { }); it('should return undefined tools and tool_choice when tools are empty', () => { - const result = prepareTools({ type: 'regular', tools: [] }); + const result = prepareTools({ tools: [] }); expect(result).toEqual({ tools: undefined, tool_choice: undefined, @@ -22,7 +22,6 @@ it('should return undefined tools and tool_choice when tools are empty', () => { it('should correctly prepare function tools', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'function', @@ -39,13 +38,12 @@ it('should correctly prepare function tools', () => { input_schema: { type: 'object', properties: {} }, }, ]); - expect(result.tool_choice).toBeUndefined(); + expect(result.toolChoice).toBeUndefined(); expect(result.toolWarnings).toEqual([]); }); it('should correctly prepare provider-defined tools', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'provider-defined', @@ -84,13 +82,12 @@ it('should correctly prepare provider-defined tools', () => { type: 'bash_20241022', }, ]); - expect(result.tool_choice).toBeUndefined(); + expect(result.toolChoice).toBeUndefined(); expect(result.toolWarnings).toEqual([]); }); it('should add warnings for unsupported tools', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'provider-defined', @@ -101,7 +98,7 @@ it('should add warnings for unsupported tools', () => { ], }); expect(result.tools).toEqual([]); - expect(result.tool_choice).toBeUndefined(); + expect(result.toolChoice).toBeUndefined(); expect(result.toolWarnings).toEqual([ { type: 'unsupported-tool', @@ -117,7 +114,6 @@ it('should add warnings for unsupported tools', () => { it('should handle tool choice "auto"', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'function', @@ -128,12 +124,11 @@ it('should handle tool choice "auto"', () => { ], toolChoice: { type: 'auto' }, }); - expect(result.tool_choice).toEqual({ type: 'auto' }); + expect(result.toolChoice).toEqual({ type: 'auto' }); }); it('should handle tool choice "required"', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'function', @@ -144,12 +139,11 @@ it('should handle tool choice "required"', () => { ], toolChoice: { type: 'required' }, }); - expect(result.tool_choice).toEqual({ type: 'any' }); + expect(result.toolChoice).toEqual({ type: 'any' }); }); it('should handle tool choice "none"', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'function', @@ -161,12 +155,11 @@ it('should handle tool choice "none"', () => { toolChoice: { type: 'none' }, }); expect(result.tools).toBeUndefined(); - expect(result.tool_choice).toBeUndefined(); + expect(result.toolChoice).toBeUndefined(); }); it('should handle tool choice "tool"', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'function', @@ -177,5 +170,5 @@ it('should handle tool choice "tool"', () => { ], toolChoice: { type: 'tool', toolName: 'testFunction' }, }); - expect(result.tool_choice).toEqual({ type: 'tool', name: 'testFunction' }); + expect(result.toolChoice).toEqual({ type: 'tool', name: 'testFunction' }); }); diff --git a/packages/anthropic/src/anthropic-prepare-tools.ts b/packages/anthropic/src/anthropic-prepare-tools.ts index be0a11d3539d..67261abbf59c 100644 --- a/packages/anthropic/src/anthropic-prepare-tools.ts +++ b/packages/anthropic/src/anthropic-prepare-tools.ts @@ -1,28 +1,30 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { AnthropicTool, AnthropicToolChoice } from './anthropic-api-types'; -export function prepareTools( - mode: Parameters[0]['mode'] & { - type: 'regular'; - }, -): { +export function prepareTools({ + tools, + toolChoice, +}: { + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; +}): { tools: Array | undefined; - tool_choice: AnthropicToolChoice | undefined; + toolChoice: AnthropicToolChoice | undefined; toolWarnings: LanguageModelV2CallWarning[]; betas: Set; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; const toolWarnings: LanguageModelV2CallWarning[] = []; const betas = new Set(); if (tools == null) { - return { tools: undefined, tool_choice: undefined, toolWarnings, betas }; + return { tools: undefined, toolChoice: undefined, toolWarnings, betas }; } const anthropicTools: AnthropicTool[] = []; @@ -97,12 +99,10 @@ export function prepareTools( } } - const toolChoice = mode.toolChoice; - if (toolChoice == null) { return { tools: anthropicTools, - tool_choice: undefined, + toolChoice: undefined, toolWarnings, betas, }; @@ -114,24 +114,24 @@ export function prepareTools( case 'auto': return { tools: anthropicTools, - tool_choice: { type: 'auto' }, + toolChoice: { type: 'auto' }, toolWarnings, betas, }; case 'required': return { tools: anthropicTools, - tool_choice: { type: 'any' }, + toolChoice: { type: 'any' }, toolWarnings, betas, }; case 'none': // Anthropic does not support 'none' tool choice, so we remove the tools: - return { tools: undefined, tool_choice: undefined, toolWarnings, betas }; + return { tools: undefined, toolChoice: undefined, toolWarnings, betas }; case 'tool': return { tools: anthropicTools, - tool_choice: { type: 'tool', name: toolChoice.toolName }, + toolChoice: { type: 'tool', name: toolChoice.toolName }, toolWarnings, betas, }; diff --git a/packages/azure/src/azure-openai-provider.test.ts b/packages/azure/src/azure-openai-provider.test.ts index 3c65f7487fa7..ba55662bdca0 100644 --- a/packages/azure/src/azure-openai-provider.test.ts +++ b/packages/azure/src/azure-openai-provider.test.ts @@ -69,7 +69,6 @@ describe('chat', () => { await provider('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -83,7 +82,6 @@ describe('chat', () => { await providerApiVersionChanged('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -105,7 +103,6 @@ describe('chat', () => { await provider('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -130,7 +127,6 @@ describe('chat', () => { await provider('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); expect(server.calls[0].requestUrl).toStrictEqual( @@ -192,7 +188,6 @@ describe('completion', () => { await provider.completion('gpt-35-turbo-instruct').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); expect( @@ -213,7 +208,6 @@ describe('completion', () => { await provider.completion('gpt-35-turbo-instruct').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -510,7 +504,6 @@ describe('responses', () => { await provider.responses('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -532,7 +525,6 @@ describe('responses', () => { await provider.responses('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -557,7 +549,6 @@ describe('responses', () => { await provider.responses('test-deployment').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/cohere/src/cohere-chat-language-model.test.ts b/packages/cohere/src/cohere-chat-language-model.test.ts index d8b8ce75fa2d..4b5b4c434247 100644 --- a/packages/cohere/src/cohere-chat-language-model.test.ts +++ b/packages/cohere/src/cohere-chat-language-model.test.ts @@ -69,15 +69,15 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); expect(text).toStrictEqual('Hello, World!'); }); - it('should extract tool plan', async () => { + it('should extract tool calls', async () => { prepareJsonResponse({ + text: 'Hello, World!', tool_calls: [ { id: 'test-id-1', @@ -92,71 +92,19 @@ describe('doGenerate', () => { const { text, toolCalls, finishReason } = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { - value: { type: 'string' }, - }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - ], - }, - prompt: TEST_PROMPT, - }); - - expect(toolCalls).toStrictEqual([ - { - toolCallId: 'test-id-1', - toolCallType: 'function', - toolName: 'test-tool', - args: '{"value":"example value"}', - }, - ]); - expect(finishReason).toStrictEqual('stop'); - }); - - it('should extract tool calls', async () => { - prepareJsonResponse({ - text: 'Hello, World!', - tool_calls: [ + tools: [ { - id: 'test-id-1', type: 'function', - function: { - name: 'test-tool', - arguments: '{"value":"example value"}', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, }, ], - }); - - const { text, toolCalls, finishReason } = await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - ], - }, prompt: TEST_PROMPT, }); @@ -179,7 +127,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -196,7 +143,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -212,7 +158,6 @@ describe('doGenerate', () => { const { finishReason } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -226,7 +171,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -245,7 +189,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -258,119 +201,28 @@ describe('doGenerate', () => { }); }); - it('should send correct request in object-tool mode', async () => { - prepareJsonResponse({}); - - await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { - type: 'function', - name: 'test-tool', - description: 'test description', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - }, - prompt: TEST_PROMPT, - }); + describe('should pass tools', async () => { + it('should support "none" tool choice', async () => { + prepareJsonResponse({}); - expect(await server.calls[0].requestBody).toStrictEqual({ - model: 'command-r-plus', - messages: [ - { role: 'system', content: 'you are a friendly bot!' }, - { role: 'user', content: 'Hello' }, - ], - tool_choice: 'REQUIRED', - tools: [ - { - type: 'function', - function: { + await model.doGenerate({ + inputFormat: 'prompt', + toolChoice: { type: 'none' }, + tools: [ + { + type: 'function', name: 'test-tool', - description: 'test description', parameters: { type: 'object', - properties: { value: { type: 'string' } }, + properties: { + value: { type: 'string' }, + }, required: ['value'], additionalProperties: false, $schema: 'http://json-schema.org/draft-07/schema#', }, }, - }, - ], - }); - }); - - it('should send correct request in object-json mode', async () => { - prepareJsonResponse({}); - - await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'object-json', - schema: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - prompt: TEST_PROMPT, - }); - - expect(await server.calls[0].requestBody).toStrictEqual({ - model: 'command-r-plus', - messages: [ - { role: 'system', content: 'you are a friendly bot!' }, - { role: 'user', content: 'Hello' }, - ], - response_format: { - type: 'json_object', - json_schema: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - }); - }); - - describe('should pass tools', async () => { - it('should support "none" tool choice', async () => { - prepareJsonResponse({}); - - await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'regular', - toolChoice: { - type: 'none', - }, - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { - value: { type: 'string' }, - }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - ], - }, + ], prompt: TEST_PROMPT, }); @@ -417,7 +269,6 @@ describe('doGenerate', () => { await provider('command-r-plus').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -437,7 +288,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -475,7 +325,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -500,22 +349,19 @@ describe('doGenerate', () => { const { toolCalls } = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'currentTime', - parameters: { - type: 'object', - properties: {}, - required: [], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'currentTime', + parameters: { + type: 'object', + properties: {}, + required: [], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: [ { role: 'user', @@ -582,7 +428,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -625,22 +470,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', prompt: TEST_PROMPT, - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], }); const responseArray = await convertReadableStreamToArray(stream); @@ -759,7 +601,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -784,7 +625,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -804,7 +644,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -836,7 +675,6 @@ describe('doStream', () => { await provider('command-r-plus').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -856,7 +694,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -880,22 +717,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', prompt: TEST_PROMPT, - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: {}, - required: [], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: {}, + required: [], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ diff --git a/packages/cohere/src/cohere-chat-language-model.ts b/packages/cohere/src/cohere-chat-language-model.ts index 65ec0e1f3be7..e21c4fd1636e 100644 --- a/packages/cohere/src/cohere-chat-language-model.ts +++ b/packages/cohere/src/cohere-chat-language-model.ts @@ -2,7 +2,6 @@ import { LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2StreamPart, - UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { FetchFunction, @@ -53,7 +52,6 @@ export class CohereChatLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, prompt, maxTokens, temperature, @@ -64,89 +62,47 @@ export class CohereChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, + tools, + toolChoice, }: Parameters[0]) { - const type = mode.type; - const chatPrompt = convertToCohereChatPrompt(prompt); - const baseArgs = { - // model id: - model: this.modelId, - - // standardized settings: - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - max_tokens: maxTokens, - temperature, - p: topP, - k: topK, - seed, - stop_sequences: stopSequences, - - // response format: - response_format: - responseFormat?.type === 'json' - ? { type: 'json_object', json_schema: responseFormat.schema } - : undefined, - - // messages: - messages: chatPrompt, - }; - - switch (type) { - case 'regular': { - const { tools, toolChoice, toolWarnings } = prepareTools(mode); + const { + tools: cohereTools, + toolChoice: cohereToolChoice, + toolWarnings, + } = prepareTools({ tools, toolChoice }); - return { - args: { - ...baseArgs, - tools, - tool_choice: toolChoice, - }, - warnings: toolWarnings, - }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - response_format: - mode.schema == null - ? { type: 'json_object' } - : { type: 'json_object', json_schema: mode.schema }, - }, - warnings: [], - }; - } - - case 'object-tool': { - return { - args: { - ...baseArgs, - tools: [ - { - type: 'function', - function: { - name: mode.tool.name, - description: mode.tool.description ?? '', - parameters: mode.tool.parameters, - }, - }, - ], - tool_choice: 'REQUIRED', - }, - warnings: [], - }; - } - - default: { - const _exhaustiveCheck: never = type; - throw new UnsupportedFunctionalityError({ - functionality: `Unsupported mode: ${_exhaustiveCheck}`, - }); - } - } + return { + args: { + // model id: + model: this.modelId, + + // standardized settings: + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + max_tokens: maxTokens, + temperature, + p: topP, + k: topK, + seed, + stop_sequences: stopSequences, + + // response format: + response_format: + responseFormat?.type === 'json' + ? { type: 'json_object', json_schema: responseFormat.schema } + : undefined, + + // messages: + messages: chatPrompt, + + // tools: + tools: cohereTools, + tool_choice: cohereToolChoice, + }, + warnings: toolWarnings, + }; } async doGenerate( diff --git a/packages/cohere/src/cohere-chat-prompt.ts b/packages/cohere/src/cohere-chat-prompt.ts index 98130d7ed142..ee72c06d9bd5 100644 --- a/packages/cohere/src/cohere-chat-prompt.ts +++ b/packages/cohere/src/cohere-chat-prompt.ts @@ -37,3 +37,5 @@ export interface CohereToolMessage { content: string; tool_call_id: string; } + +export type CohereToolChoice = 'NONE' | 'REQUIRED' | undefined; diff --git a/packages/cohere/src/cohere-prepare-tools.test.ts b/packages/cohere/src/cohere-prepare-tools.test.ts index 5c6547db649b..6d136fd8f927 100644 --- a/packages/cohere/src/cohere-prepare-tools.test.ts +++ b/packages/cohere/src/cohere-prepare-tools.test.ts @@ -2,7 +2,6 @@ import { prepareTools } from './cohere-prepare-tools'; it('should return undefined tools when no tools are provided', () => { const result = prepareTools({ - type: 'regular', tools: [], }); @@ -22,7 +21,6 @@ it('should process function tools correctly', () => { }; const result = prepareTools({ - type: 'regular', tools: [functionTool], }); @@ -44,7 +42,6 @@ it('should process function tools correctly', () => { it('should add warnings for provider-defined tools', () => { const result = prepareTools({ - type: 'regular', tools: [ { type: 'provider-defined' as const, @@ -82,7 +79,6 @@ describe('tool choice handling', () => { it('should handle auto tool choice', () => { const result = prepareTools({ - type: 'regular', tools: [basicTool], toolChoice: { type: 'auto' }, }); @@ -92,7 +88,6 @@ describe('tool choice handling', () => { it('should handle none tool choice', () => { const result = prepareTools({ - type: 'regular', tools: [basicTool], toolChoice: { type: 'none' }, }); @@ -115,7 +110,6 @@ describe('tool choice handling', () => { it('should handle required tool choice', () => { const result = prepareTools({ - type: 'regular', tools: [basicTool], toolChoice: { type: 'required' }, }); @@ -138,7 +132,6 @@ describe('tool choice handling', () => { it('should handle tool type tool choice by filtering tools', () => { const result = prepareTools({ - type: 'regular', tools: [basicTool], toolChoice: { type: 'tool', toolName: 'testFunction' }, }); diff --git a/packages/cohere/src/cohere-prepare-tools.ts b/packages/cohere/src/cohere-prepare-tools.ts index e27a53c5f75f..b2ac25418440 100644 --- a/packages/cohere/src/cohere-prepare-tools.ts +++ b/packages/cohere/src/cohere-prepare-tools.ts @@ -1,14 +1,17 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; +import { CohereToolChoice } from './cohere-chat-prompt'; -export function prepareTools( - mode: Parameters[0]['mode'] & { - type: 'regular'; - }, -): { +export function prepareTools({ + tools, + toolChoice, +}: { + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; +}): { tools: | Array<{ type: 'function'; @@ -19,10 +22,12 @@ export function prepareTools( }; }> | undefined; - toolChoice: 'NONE' | 'REQUIRED' | undefined; + toolChoice: CohereToolChoice; toolWarnings: LanguageModelV2CallWarning[]; } { - const tools = mode.tools?.length ? mode.tools : undefined; + // when the tools array is empty, change it to undefined to prevent errors: + tools = tools?.length ? tools : undefined; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { @@ -53,8 +58,6 @@ export function prepareTools( } } - const toolChoice = mode.toolChoice; - if (toolChoice == null) { return { tools: cohereTools, toolChoice: undefined, toolWarnings }; } diff --git a/packages/google/src/google-generative-ai-language-model.test.ts b/packages/google/src/google-generative-ai-language-model.test.ts index 4fdfad776f4c..405948232c04 100644 --- a/packages/google/src/google-generative-ai-language-model.test.ts +++ b/packages/google/src/google-generative-ai-language-model.test.ts @@ -245,7 +245,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -263,7 +262,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -299,7 +297,6 @@ describe('doGenerate', () => { const { text, finishReason } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -335,22 +332,19 @@ describe('doGenerate', () => { const { toolCalls, finishReason, text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -371,7 +365,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -390,7 +383,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'test system instruction' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -419,7 +411,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'test system instruction' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -452,25 +443,22 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -500,13 +488,13 @@ describe('doGenerate', () => { }); }); - it('should set response mime type in object-json mode', async () => { + it('should set response mime type with responseFormat', async () => { prepareJsonResponse({}); await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: { type: 'object', properties: { location: { type: 'string' } }, @@ -536,13 +524,13 @@ describe('doGenerate', () => { }); }); - it('should pass specification in object-json mode with structuredOutputs = true (default)', async () => { + it('should pass specification with responseFormat and structuredOutputs = true (default)', async () => { prepareJsonResponse({}); await provider.languageModel('gemini-pro').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: { type: 'object', properties: { @@ -572,15 +560,15 @@ describe('doGenerate', () => { }); }); - it('should not pass specification in object-json mode with structuredOutputs = false', async () => { + it('should not pass specification with responseFormat and structuredOutputs = false', async () => { prepareJsonResponse({}); await provider .languageModel('gemini-pro', { structuredOutputs: false }) .doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: { type: 'object', properties: { @@ -602,14 +590,13 @@ describe('doGenerate', () => { }); }); - it('should pass tool specification in object-tool mode', async () => { + it('should pass tools and toolChoice', async () => { prepareJsonResponse({}); await provider.languageModel('gemini-pro').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { + tools: [ + { name: 'test-tool', type: 'function', parameters: { @@ -622,7 +609,8 @@ describe('doGenerate', () => { additionalProperties: false, }, }, - }, + ], + toolChoice: { type: 'required' }, prompt: TEST_PROMPT, }); @@ -661,7 +649,6 @@ describe('doGenerate', () => { await provider.chat('gemini-pro').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -683,7 +670,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -717,7 +703,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -748,7 +733,6 @@ describe('doGenerate', () => { const { sources } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -804,7 +788,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'X-Sync-Request': 'sync-request-value', @@ -860,7 +843,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -888,7 +870,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -929,7 +910,6 @@ describe('doGenerate', () => { const { providerMetadata } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -980,7 +960,6 @@ describe('doGenerate', () => { const { providerMetadata } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1030,7 +1009,6 @@ describe('doGenerate', () => { }); await gemini2Pro.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1049,7 +1027,6 @@ describe('doGenerate', () => { }); await gemini2Flash.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1068,7 +1045,6 @@ describe('doGenerate', () => { }); await geminiPro.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1092,7 +1068,6 @@ describe('doGenerate', () => { await geminiPro.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1150,7 +1125,6 @@ describe('doGenerate', () => { const { text, files } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1206,7 +1180,6 @@ describe('doGenerate', () => { const { text, files } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1228,7 +1201,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { google: { @@ -1279,7 +1251,6 @@ describe('doGenerate', () => { const { text, files } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1400,7 +1371,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1445,7 +1415,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1492,7 +1461,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1511,7 +1479,6 @@ describe('doStream', () => { prepareStreamResponse({ content: [''] }); await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1530,7 +1497,6 @@ describe('doStream', () => { prepareStreamResponse({ content: [''] }); await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1549,7 +1515,6 @@ describe('doStream', () => { await provider.chat('gemini-pro').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -1569,7 +1534,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1595,7 +1559,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1645,7 +1608,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1684,7 +1646,6 @@ describe('doStream', () => { }); await gemini2Pro.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1704,7 +1665,6 @@ describe('doStream', () => { }); await gemini2Flash.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1724,7 +1684,6 @@ describe('doStream', () => { }); await geminiPro.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1749,7 +1708,6 @@ describe('doStream', () => { await geminiPro.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1783,7 +1741,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1819,7 +1776,6 @@ describe('doStream', () => { }; const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1876,22 +1832,19 @@ describe('doStream', () => { }; const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1908,7 +1861,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { google: { foo: 'bar', responseModalities: ['TEXT', 'IMAGE'] }, diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index 8ce4ef73ce54..6e806e95be12 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -68,7 +68,6 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { } private async getArgs({ - mode, prompt, maxTokens, temperature, @@ -79,10 +78,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, + tools, + toolChoice, providerOptions, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; const googleOptions = parseProviderOptions({ @@ -91,114 +90,61 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { schema: googleGenerativeAIProviderOptionsSchema, }); - const generationConfig = { - // standardized settings: - maxOutputTokens: maxTokens, - temperature, - topK, - topP, - frequencyPenalty, - presencePenalty, - stopSequences, - seed, - - // response format: - responseMimeType: - responseFormat?.type === 'json' ? 'application/json' : undefined, - responseSchema: - responseFormat?.type === 'json' && - responseFormat.schema != null && - // Google GenAI does not support all OpenAPI Schema features, - // so this is needed as an escape hatch: - this.supportsStructuredOutputs - ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) - : undefined, - ...(this.settings.audioTimestamp && { - audioTimestamp: this.settings.audioTimestamp, - }), - - // provider options: - responseModalities: googleOptions?.responseModalities, - }; - const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt); - switch (type) { - case 'regular': { - const { tools, toolConfig, toolWarnings } = prepareTools( - mode, - this.settings.useSearchGrounding ?? false, - this.settings.dynamicRetrievalConfig, - this.modelId, - ); - - return { - args: { - generationConfig, - contents, - systemInstruction, - safetySettings: this.settings.safetySettings, - tools, - toolConfig, - cachedContent: this.settings.cachedContent, - }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - return { - args: { - generationConfig: { - ...generationConfig, - responseMimeType: 'application/json', - responseSchema: - mode.schema != null && - // Google GenAI does not support all OpenAPI Schema features, - // so this is needed as an escape hatch: - this.supportsStructuredOutputs - ? convertJSONSchemaToOpenAPISchema(mode.schema) - : undefined, - }, - contents, - systemInstruction, - safetySettings: this.settings.safetySettings, - cachedContent: this.settings.cachedContent, - }, - warnings, - }; - } - - case 'object-tool': { - return { - args: { - generationConfig, - contents, - tools: { - functionDeclarations: [ - { - name: mode.tool.name, - description: mode.tool.description ?? '', - parameters: convertJSONSchemaToOpenAPISchema( - mode.tool.parameters, - ), - }, - ], - }, - toolConfig: { functionCallingConfig: { mode: 'ANY' } }, - safetySettings: this.settings.safetySettings, - cachedContent: this.settings.cachedContent, - }, - warnings, - }; - } - - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + const { + tools: googleTools, + toolConfig: googleToolConfig, + toolWarnings, + } = prepareTools({ + tools, + toolChoice, + useSearchGrounding: this.settings.useSearchGrounding ?? false, + dynamicRetrievalConfig: this.settings.dynamicRetrievalConfig, + modelId: this.modelId, + }); + + return { + args: { + generationConfig: { + // standardized settings: + maxOutputTokens: maxTokens, + temperature, + topK, + topP, + frequencyPenalty, + presencePenalty, + stopSequences, + seed, + + // response format: + responseMimeType: + responseFormat?.type === 'json' ? 'application/json' : undefined, + responseSchema: + responseFormat?.type === 'json' && + responseFormat.schema != null && + // Google GenAI does not support all OpenAPI Schema features, + // so this is needed as an escape hatch: + this.supportsStructuredOutputs + ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) + : undefined, + ...(this.settings.audioTimestamp && { + audioTimestamp: this.settings.audioTimestamp, + }), + + // provider options: + responseModalities: googleOptions?.responseModalities, + }, + contents, + systemInstruction, + safetySettings: this.settings.safetySettings, + tools: googleTools, + toolConfig: googleToolConfig, + cachedContent: this.settings.cachedContent, + }, + warnings: [...warnings, ...toolWarnings], + }; } supportsUrl(url: URL): boolean { diff --git a/packages/google/src/google-prepare-tools.ts b/packages/google/src/google-prepare-tools.ts index a50f23888527..25b61cca4eb1 100644 --- a/packages/google/src/google-prepare-tools.ts +++ b/packages/google/src/google-prepare-tools.ts @@ -1,5 +1,5 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; @@ -9,14 +9,19 @@ import { GoogleGenerativeAIModelId, } from './google-generative-ai-settings'; -export function prepareTools( - mode: Parameters[0]['mode'] & { - type: 'regular'; - }, - useSearchGrounding: boolean, - dynamicRetrievalConfig: DynamicRetrievalConfig | undefined, - modelId: GoogleGenerativeAIModelId, -): { +export function prepareTools({ + tools, + toolChoice, + useSearchGrounding, + dynamicRetrievalConfig, + modelId, +}: { + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; + useSearchGrounding: boolean; + dynamicRetrievalConfig: DynamicRetrievalConfig | undefined; + modelId: GoogleGenerativeAIModelId; +}): { tools: | undefined | { @@ -42,7 +47,9 @@ export function prepareTools( }; toolWarnings: LanguageModelV2CallWarning[]; } { - const tools = mode.tools?.length ? mode.tools : undefined; + // when the tools array is empty, change it to undefined to prevent errors: + tools = tools?.length ? tools : undefined; + const toolWarnings: LanguageModelV2CallWarning[] = []; const isGemini2 = modelId.includes('gemini-2'); @@ -81,8 +88,6 @@ export function prepareTools( } } - const toolChoice = mode.toolChoice; - if (toolChoice == null) { return { tools: { functionDeclarations }, diff --git a/packages/groq/src/groq-chat-language-model.test.ts b/packages/groq/src/groq-chat-language-model.test.ts index da088ee9cec6..5a325e0926c0 100644 --- a/packages/groq/src/groq-chat-language-model.test.ts +++ b/packages/groq/src/groq-chat-language-model.test.ts @@ -114,7 +114,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -128,7 +127,6 @@ describe('doGenerate', () => { const { reasoning } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -143,7 +141,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -162,7 +159,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -181,7 +177,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -199,7 +194,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -214,7 +208,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -230,7 +223,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -249,7 +241,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -267,7 +258,6 @@ describe('doGenerate', () => { user: 'test-user-id', }).doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { groq: { reasoningFormat: 'hidden' }, @@ -288,25 +278,22 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -348,7 +335,6 @@ describe('doGenerate', () => { await provider('gemma2-9b-it').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -379,25 +365,22 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -412,15 +395,15 @@ describe('doGenerate', () => { ]); }); - it('should pass object-json mode', async () => { + it('should pass response format information', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = provider('gemma2-9b-it'); await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'test-name', description: 'test description', schema: { @@ -448,7 +431,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -499,7 +481,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -546,7 +527,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -608,22 +588,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -737,22 +714,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -879,22 +853,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'searchGoogle', - parameters: { - type: 'object', - properties: { query: { type: 'string' } }, - required: ['query'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'searchGoogle', + parameters: { + type: 'object', + properties: { query: { type: 'string' } }, + required: ['query'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -974,22 +945,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1033,7 +1001,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1065,7 +1032,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1092,7 +1058,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1112,7 +1077,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1135,7 +1099,6 @@ describe('doStream', () => { await provider('gemma2-9b-it').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -1155,7 +1118,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/groq/src/groq-chat-language-model.ts b/packages/groq/src/groq-chat-language-model.ts index d690e515d814..acf49b52b612 100644 --- a/packages/groq/src/groq-chat-language-model.ts +++ b/packages/groq/src/groq-chat-language-model.ts @@ -63,7 +63,6 @@ export class GroqChatLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, prompt, maxTokens, temperature, @@ -75,12 +74,12 @@ export class GroqChatLanguageModel implements LanguageModelV2 { responseFormat, seed, stream, + tools, + toolChoice, providerOptions, }: Parameters[0] & { stream: boolean; }) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { @@ -110,90 +109,49 @@ export class GroqChatLanguageModel implements LanguageModelV2 { }), }); - const baseArgs = { - // model id: - model: this.modelId, - - // model specific settings: - user: this.settings.user, - parallel_tool_calls: this.settings.parallelToolCalls, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - stop: stopSequences, - seed, - - // response format: - response_format: - // json object response format is not supported for streaming: - stream === false && responseFormat?.type === 'json' - ? { type: 'json_object' } - : undefined, - - // provider options: - reasoning_format: groqOptions?.reasoningFormat, - - // messages: - messages: convertToGroqChatMessages(prompt), - }; + const { + tools: groqTools, + toolChoice: groqToolChoice, + toolWarnings, + } = prepareTools({ tools, toolChoice }); - switch (type) { - case 'regular': { - const { tools, tool_choice, toolWarnings } = prepareTools({ mode }); - return { - args: { - ...baseArgs, - tools, - tool_choice, - }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - response_format: - // json object response format is not supported for streaming: - stream === false ? { type: 'json_object' } : undefined, - }, - warnings, - }; - } - - case 'object-tool': { - return { - args: { - ...baseArgs, - tool_choice: { - type: 'function', - function: { name: mode.tool.name }, - }, - tools: [ - { - type: 'function', - function: { - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - }, - }, - ], - }, - warnings, - }; - } - - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + return { + args: { + // model id: + model: this.modelId, + + // model specific settings: + user: this.settings.user, + parallel_tool_calls: this.settings.parallelToolCalls, + + // standardized settings: + max_tokens: maxTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + stop: stopSequences, + seed, + + // response format: + response_format: + // json object response format is not supported for streaming: + stream === false && responseFormat?.type === 'json' + ? { type: 'json_object' } + : undefined, + + // provider options: + reasoning_format: groqOptions?.reasoningFormat, + + // messages: + messages: convertToGroqChatMessages(prompt), + + // tools: + tools: groqTools, + tool_choice: groqToolChoice, + }, + warnings: [...warnings, ...toolWarnings], + }; } async doGenerate( diff --git a/packages/groq/src/groq-prepare-tools.ts b/packages/groq/src/groq-prepare-tools.ts index 8e16a421f3b4..ef8a5ef8a8f4 100644 --- a/packages/groq/src/groq-prepare-tools.ts +++ b/packages/groq/src/groq-prepare-tools.ts @@ -1,15 +1,15 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; export function prepareTools({ - mode, + tools, + toolChoice, }: { - mode: Parameters[0]['mode'] & { - type: 'regular'; - }; + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; }): { tools: | undefined @@ -21,7 +21,7 @@ export function prepareTools({ parameters: unknown; }; }>; - tool_choice: + toolChoice: | { type: 'function'; function: { name: string } } | 'auto' | 'none' @@ -30,15 +30,14 @@ export function prepareTools({ toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { - return { tools: undefined, tool_choice: undefined, toolWarnings }; + return { tools: undefined, toolChoice: undefined, toolWarnings }; } - const toolChoice = mode.toolChoice; - const groqTools: Array<{ type: 'function'; function: { @@ -64,7 +63,7 @@ export function prepareTools({ } if (toolChoice == null) { - return { tools: groqTools, tool_choice: undefined, toolWarnings }; + return { tools: groqTools, toolChoice: undefined, toolWarnings }; } const type = toolChoice.type; @@ -73,11 +72,11 @@ export function prepareTools({ case 'auto': case 'none': case 'required': - return { tools: groqTools, tool_choice: type, toolWarnings }; + return { tools: groqTools, toolChoice: type, toolWarnings }; case 'tool': return { tools: groqTools, - tool_choice: { + toolChoice: { type: 'function', function: { name: toolChoice.toolName, diff --git a/packages/mistral/src/mistral-chat-language-model.test.ts b/packages/mistral/src/mistral-chat-language-model.test.ts index db826d0a9aef..7e500f9a141c 100644 --- a/packages/mistral/src/mistral-chat-language-model.test.ts +++ b/packages/mistral/src/mistral-chat-language-model.test.ts @@ -70,7 +70,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -82,7 +81,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'messages', - mode: { type: 'regular' }, prompt: [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, { @@ -129,7 +127,6 @@ describe('doGenerate', () => { const { toolCalls } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -151,7 +148,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -170,7 +166,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -188,7 +183,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -207,7 +201,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -222,25 +215,22 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -279,7 +269,6 @@ describe('doGenerate', () => { await provider.chat('mistral-small-latest').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -301,7 +290,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -341,7 +329,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -385,7 +372,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -414,7 +400,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'messages', - mode: { type: 'regular' }, prompt: [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, { @@ -463,22 +448,19 @@ describe('doStream', () => { .chat('mistral-large-latest') .doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -520,7 +502,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -540,7 +521,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -563,7 +543,6 @@ describe('doStream', () => { await provider.chat('mistral-small-latest').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -583,7 +562,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -607,7 +585,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/mistral/src/mistral-chat-language-model.ts b/packages/mistral/src/mistral-chat-language-model.ts index 3ebd59b80f5c..104f544bae85 100644 --- a/packages/mistral/src/mistral-chat-language-model.ts +++ b/packages/mistral/src/mistral-chat-language-model.ts @@ -59,7 +59,6 @@ export class MistralChatLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, prompt, maxTokens, temperature, @@ -71,9 +70,9 @@ export class MistralChatLanguageModel implements LanguageModelV2 { responseFormat, seed, providerOptions, + tools, + toolChoice, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { @@ -141,42 +140,23 @@ export class MistralChatLanguageModel implements LanguageModelV2 { messages: convertToMistralChatMessages(prompt), }; - switch (type) { - case 'regular': { - const { tools, tool_choice, toolWarnings } = prepareTools(mode); - - return { - args: { ...baseArgs, tools, tool_choice }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - response_format: { type: 'json_object' }, - }, - warnings, - }; - } - - case 'object-tool': { - return { - args: { - ...baseArgs, - tool_choice: 'any', - tools: [{ type: 'function', function: mode.tool }], - }, - warnings, - }; - } + const { + tools: mistralTools, + toolChoice: mistralToolChoice, + toolWarnings, + } = prepareTools({ + tools, + toolChoice, + }); - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + return { + args: { + ...baseArgs, + tools: mistralTools, + tool_choice: mistralToolChoice, + }, + warnings: [...warnings, ...toolWarnings], + }; } async doGenerate( diff --git a/packages/mistral/src/mistral-chat-prompt.ts b/packages/mistral/src/mistral-chat-prompt.ts index 10f32b60dcc5..13f1dced55ac 100644 --- a/packages/mistral/src/mistral-chat-prompt.ts +++ b/packages/mistral/src/mistral-chat-prompt.ts @@ -38,3 +38,9 @@ export interface MistralToolMessage { content: string; tool_call_id: string; } + +export type MistralToolChoice = + | { type: 'function'; function: { name: string } } + | 'auto' + | 'none' + | 'any'; diff --git a/packages/mistral/src/mistral-prepare-tools.ts b/packages/mistral/src/mistral-prepare-tools.ts index 669184b241c3..d6b17f1d9d1a 100644 --- a/packages/mistral/src/mistral-prepare-tools.ts +++ b/packages/mistral/src/mistral-prepare-tools.ts @@ -1,14 +1,17 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; +import { MistralToolChoice } from './mistral-chat-prompt'; -export function prepareTools( - mode: Parameters[0]['mode'] & { - type: 'regular'; - }, -): { +export function prepareTools({ + tools, + toolChoice, +}: { + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; +}): { tools: | Array<{ type: 'function'; @@ -19,20 +22,16 @@ export function prepareTools( }; }> | undefined; - tool_choice: - | { type: 'function'; function: { name: string } } - | 'auto' - | 'none' - | 'any' - | undefined; + toolChoice: MistralToolChoice | undefined; toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { - return { tools: undefined, tool_choice: undefined, toolWarnings }; + return { tools: undefined, toolChoice: undefined, toolWarnings }; } const mistralTools: Array<{ @@ -59,10 +58,8 @@ export function prepareTools( } } - const toolChoice = mode.toolChoice; - if (toolChoice == null) { - return { tools: mistralTools, tool_choice: undefined, toolWarnings }; + return { tools: mistralTools, toolChoice: undefined, toolWarnings }; } const type = toolChoice.type; @@ -70,9 +67,9 @@ export function prepareTools( switch (type) { case 'auto': case 'none': - return { tools: mistralTools, tool_choice: type, toolWarnings }; + return { tools: mistralTools, toolChoice: type, toolWarnings }; case 'required': - return { tools: mistralTools, tool_choice: 'any', toolWarnings }; + return { tools: mistralTools, toolChoice: 'any', toolWarnings }; // mistral does not support tool mode directly, // so we filter the tools and force the tool choice through 'any' @@ -81,7 +78,7 @@ export function prepareTools( tools: mistralTools.filter( tool => tool.function.name === toolChoice.toolName, ), - tool_choice: 'any', + toolChoice: 'any', toolWarnings, }; default: { diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts index 717eb4fb660f..74c8a0d38806 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts @@ -152,7 +152,6 @@ describe('doGenerate', () => { }); await modelWithUser.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); expect(await server.calls[0].requestBody).toMatchObject({ @@ -165,7 +164,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -180,7 +178,6 @@ describe('doGenerate', () => { const { text, reasoning } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -198,7 +195,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -217,7 +213,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -236,7 +231,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -254,7 +248,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -269,7 +262,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -283,7 +275,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -302,7 +293,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -319,7 +309,6 @@ describe('doGenerate', () => { user: 'test-user-id', }).doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -335,7 +324,6 @@ describe('doGenerate', () => { await provider('grok-beta').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, providerOptions: { 'test-provider': { someCustomOption: 'test-value', @@ -356,7 +344,6 @@ describe('doGenerate', () => { await provider('grok-beta').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, providerOptions: { notThisProviderName: { someCustomOption: 'test-value', @@ -376,25 +363,22 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -438,7 +422,6 @@ describe('doGenerate', () => { await provider('grok-beta').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -469,25 +452,22 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -519,7 +499,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'text' }, }); @@ -537,7 +516,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json' }, }); @@ -565,7 +543,6 @@ describe('doGenerate', () => { const { warnings } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -611,7 +588,6 @@ describe('doGenerate', () => { const { warnings } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -646,7 +622,7 @@ describe('doGenerate', () => { expect(warnings).toEqual([]); }); - it('should use json_schema & strict in object-json mode when structuredOutputs are enabled', async () => { + it('should use json_schema & strict with responseFormat json when structuredOutputs are enabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = new OpenAICompatibleChatLanguageModel( @@ -662,8 +638,8 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: { type: 'object', properties: { value: { type: 'string' } }, @@ -694,7 +670,7 @@ describe('doGenerate', () => { }); }); - it('should set name & description in object-json mode when structuredOutputs are enabled', async () => { + it('should set name & description with responseFormat json when structuredOutputs are enabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = new OpenAICompatibleChatLanguageModel( @@ -710,8 +686,8 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'test-name', description: 'test description', schema: { @@ -745,7 +721,7 @@ describe('doGenerate', () => { }); }); - it('should allow for undefined schema in object-json mode when structuredOutputs are enabled', async () => { + it('should allow for undefined schema with responseFormat json when structuredOutputs are enabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = new OpenAICompatibleChatLanguageModel( @@ -761,8 +737,8 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'test-name', description: 'test description', }, @@ -777,83 +753,6 @@ describe('doGenerate', () => { }, }); }); - - it('should set strict in object-tool mode when structuredOutputs are enabled', async () => { - prepareJsonResponse({ - tool_calls: [ - { - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - type: 'function', - function: { - name: 'test-tool', - arguments: '{"value":"Spark"}', - }, - }, - ], - }); - - const model = new OpenAICompatibleChatLanguageModel( - 'gpt-4o-2024-08-06', - {}, - { - provider: 'test-provider', - url: () => 'https://my.api.com/v1/chat/completions', - headers: () => ({}), - supportsStructuredOutputs: true, - }, - ); - - const result = await model.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { - type: 'function', - name: 'test-tool', - description: 'test description', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - }, - prompt: TEST_PROMPT, - }); - - expect(await server.calls[0].requestBody).toStrictEqual({ - model: 'gpt-4o-2024-08-06', - messages: [{ role: 'user', content: 'Hello' }], - tool_choice: { type: 'function', function: { name: 'test-tool' } }, - tools: [ - { - type: 'function', - function: { - name: 'test-tool', - description: 'test description', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - }, - ], - }); - - expect(result.toolCalls).toStrictEqual([ - { - args: '{"value":"Spark"}', - toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - toolCallType: 'function', - toolName: 'test-tool', - }, - ]); - }); }); it('should send request body', async () => { @@ -861,7 +760,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -890,7 +788,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -914,7 +811,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -939,7 +835,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -992,7 +887,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1040,7 +934,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1117,22 +1010,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1249,22 +1139,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1394,22 +1281,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'searchGoogle', - parameters: { - type: 'object', - properties: { query: { type: 'string' } }, - required: ['query'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'searchGoogle', + parameters: { + type: 'object', + properties: { query: { type: 'string' } }, + required: ['query'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1495,22 +1379,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1557,7 +1438,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1589,7 +1469,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1617,7 +1496,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1637,7 +1515,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1662,7 +1539,6 @@ describe('doStream', () => { await provider('grok-beta').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -1682,7 +1558,6 @@ describe('doStream', () => { await provider('grok-beta').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, providerOptions: { 'test-provider': { someCustomOption: 'test-value', @@ -1704,7 +1579,6 @@ describe('doStream', () => { await provider('grok-beta').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, providerOptions: { notThisProviderName: { someCustomOption: 'test-value', @@ -1725,7 +1599,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1753,7 +1626,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1781,7 +1653,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1806,7 +1677,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1890,7 +1760,6 @@ describe('doStream simulated streaming', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1927,7 +1796,6 @@ describe('doStream simulated streaming', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1979,22 +1847,19 @@ describe('doStream simulated streaming', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -2104,7 +1969,6 @@ describe('metadata extraction', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2143,7 +2007,6 @@ describe('metadata extraction', () => { const result = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts index e637be09c7fc..edcea8c529d4 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts @@ -101,7 +101,6 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, prompt, maxTokens, temperature, @@ -113,16 +112,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, + toolChoice, + tools, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { - warnings.push({ - type: 'unsupported-setting', - setting: 'topK', - }); + warnings.push({ type: 'unsupported-setting', setting: 'topK' }); } if ( @@ -138,103 +134,57 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { }); } - const baseArgs = { - // model id: - model: this.modelId, - - // model specific settings: - user: this.settings.user, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - response_format: - responseFormat?.type === 'json' - ? this.supportsStructuredOutputs === true && - responseFormat.schema != null - ? { - type: 'json_schema', - json_schema: { - schema: responseFormat.schema, - name: responseFormat.name ?? 'response', - description: responseFormat.description, - }, - } - : { type: 'json_object' } - : undefined, + const { + tools: openaiTools, + toolChoice: openaiToolChoice, + toolWarnings, + } = prepareTools({ + tools, + toolChoice, + }); - stop: stopSequences, - seed, - ...providerOptions?.[this.providerOptionsName], + return { + args: { + // model id: + model: this.modelId, + + // model specific settings: + user: this.settings.user, + + // standardized settings: + max_tokens: maxTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + response_format: + responseFormat?.type === 'json' + ? this.supportsStructuredOutputs === true && + responseFormat.schema != null + ? { + type: 'json_schema', + json_schema: { + schema: responseFormat.schema, + name: responseFormat.name ?? 'response', + description: responseFormat.description, + }, + } + : { type: 'json_object' } + : undefined, - // messages: - messages: convertToOpenAICompatibleChatMessages(prompt), - }; + stop: stopSequences, + seed, + ...providerOptions?.[this.providerOptionsName], - switch (type) { - case 'regular': { - const { tools, tool_choice, toolWarnings } = prepareTools({ - mode, - structuredOutputs: this.supportsStructuredOutputs, - }); - - return { - args: { ...baseArgs, tools, tool_choice }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - response_format: - this.supportsStructuredOutputs === true && mode.schema != null - ? { - type: 'json_schema', - json_schema: { - schema: mode.schema, - name: mode.name ?? 'response', - description: mode.description, - }, - } - : { type: 'json_object' }, - }, - warnings, - }; - } - - case 'object-tool': { - return { - args: { - ...baseArgs, - tool_choice: { - type: 'function', - function: { name: mode.tool.name }, - }, - tools: [ - { - type: 'function', - function: { - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - }, - }, - ], - }, - warnings, - }; - } - - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + // messages: + messages: convertToOpenAICompatibleChatMessages(prompt), + + // tools: + tools: openaiTools, + tool_choice: openaiToolChoice, + }, + warnings: [...warnings, ...toolWarnings], + }; } async doGenerate( diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts index 908851f527a3..f63872720852 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.test.ts @@ -119,7 +119,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -134,7 +133,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -149,7 +147,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -167,7 +164,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -188,7 +184,6 @@ describe('doGenerate', () => { .completionModel('gpt-3.5-turbo-instruct') .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -205,7 +200,6 @@ describe('doGenerate', () => { .completionModel('gpt-3.5-turbo-instruct') .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -219,7 +213,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -238,7 +231,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -262,7 +254,6 @@ describe('doGenerate', () => { await provider.completionModel('gpt-3.5-turbo-instruct').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -282,7 +273,6 @@ describe('doGenerate', () => { await provider.completionModel('gpt-3.5-turbo-instruct').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { 'test-provider': { @@ -303,7 +293,6 @@ describe('doGenerate', () => { await provider.completionModel('gpt-3.5-turbo-instruct').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { notThisProviderName: { @@ -373,7 +362,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -409,7 +397,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -445,7 +432,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -468,7 +454,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -485,7 +470,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -505,7 +489,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -531,7 +514,6 @@ describe('doStream', () => { await provider.completionModel('gpt-3.5-turbo-instruct').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -551,7 +533,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, providerOptions: { 'test-provider': { someCustomOption: 'test-value', @@ -573,7 +554,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, providerOptions: { notThisProviderName: { someCustomOption: 'test-value', diff --git a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts index ecca2544bdc0..dad58ef33f29 100644 --- a/packages/openai-compatible/src/openai-compatible-completion-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-completion-language-model.ts @@ -4,7 +4,6 @@ import { LanguageModelV2CallWarning, LanguageModelV2FinishReason, LanguageModelV2StreamPart, - UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { combineHeaders, @@ -77,7 +76,6 @@ export class OpenAICompatibleCompletionLanguageModel } private getArgs({ - mode, inputFormat, prompt, maxTokens, @@ -90,16 +88,21 @@ export class OpenAICompatibleCompletionLanguageModel responseFormat, seed, providerOptions, + tools, + toolChoice, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { - warnings.push({ - type: 'unsupported-setting', - setting: 'topK', - }); + warnings.push({ type: 'unsupported-setting', setting: 'topK' }); + } + + if (tools?.length) { + warnings.push({ type: 'unsupported-setting', setting: 'tools' }); + } + + if (toolChoice != null) { + warnings.push({ type: 'unsupported-setting', setting: 'toolChoice' }); } if (responseFormat != null && responseFormat.type !== 'text') { @@ -115,66 +118,34 @@ export class OpenAICompatibleCompletionLanguageModel const stop = [...(stopSequences ?? []), ...(userStopSequences ?? [])]; - const baseArgs = { - // model id: - model: this.modelId, - - // model specific settings: - echo: this.settings.echo, - logit_bias: this.settings.logitBias, - suffix: this.settings.suffix, - user: this.settings.user, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - ...providerOptions?.[this.providerOptionsName], - - // prompt: - prompt: completionPrompt, - - // stop sequences: - stop: stop.length > 0 ? stop : undefined, + return { + args: { + // model id: + model: this.modelId, + + // model specific settings: + echo: this.settings.echo, + logit_bias: this.settings.logitBias, + suffix: this.settings.suffix, + user: this.settings.user, + + // standardized settings: + max_tokens: maxTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + seed, + ...providerOptions?.[this.providerOptionsName], + + // prompt: + prompt: completionPrompt, + + // stop sequences: + stop: stop.length > 0 ? stop : undefined, + }, + warnings, }; - - switch (type) { - case 'regular': { - if (mode.tools?.length) { - throw new UnsupportedFunctionalityError({ - functionality: 'tools', - }); - } - - if (mode.toolChoice) { - throw new UnsupportedFunctionalityError({ - functionality: 'toolChoice', - }); - } - - return { args: baseArgs, warnings }; - } - - case 'object-json': { - throw new UnsupportedFunctionalityError({ - functionality: 'object-json mode', - }); - } - - case 'object-tool': { - throw new UnsupportedFunctionalityError({ - functionality: 'object-tool mode', - }); - } - - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } } async doGenerate( diff --git a/packages/openai-compatible/src/openai-compatible-prepare-tools.ts b/packages/openai-compatible/src/openai-compatible-prepare-tools.ts index ad5d53cdcc6b..d9fe75aafec5 100644 --- a/packages/openai-compatible/src/openai-compatible-prepare-tools.ts +++ b/packages/openai-compatible/src/openai-compatible-prepare-tools.ts @@ -1,17 +1,15 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; export function prepareTools({ - mode, - structuredOutputs, + tools, + toolChoice, }: { - mode: Parameters[0]['mode'] & { - type: 'regular'; - }; - structuredOutputs: boolean; + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; }): { tools: | undefined @@ -23,7 +21,7 @@ export function prepareTools({ parameters: unknown; }; }>; - tool_choice: + toolChoice: | { type: 'function'; function: { name: string } } | 'auto' | 'none' @@ -32,15 +30,14 @@ export function prepareTools({ toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; + const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { - return { tools: undefined, tool_choice: undefined, toolWarnings }; + return { tools: undefined, toolChoice: undefined, toolWarnings }; } - const toolChoice = mode.toolChoice; - const openaiCompatTools: Array<{ type: 'function'; function: { @@ -66,7 +63,7 @@ export function prepareTools({ } if (toolChoice == null) { - return { tools: openaiCompatTools, tool_choice: undefined, toolWarnings }; + return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings }; } const type = toolChoice.type; @@ -75,15 +72,13 @@ export function prepareTools({ case 'auto': case 'none': case 'required': - return { tools: openaiCompatTools, tool_choice: type, toolWarnings }; + return { tools: openaiCompatTools, toolChoice: type, toolWarnings }; case 'tool': return { tools: openaiCompatTools, - tool_choice: { + toolChoice: { type: 'function', - function: { - name: toolChoice.toolName, - }, + function: { name: toolChoice.toolName }, }, toolWarnings, }; diff --git a/packages/openai/src/openai-chat-language-model.test.ts b/packages/openai/src/openai-chat-language-model.test.ts index 8e9d960d5842..999e42d26f9a 100644 --- a/packages/openai/src/openai-chat-language-model.test.ts +++ b/packages/openai/src/openai-chat-language-model.test.ts @@ -227,7 +227,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -242,7 +241,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -257,7 +255,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -275,7 +272,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -294,7 +290,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -313,7 +308,6 @@ describe('doGenerate', () => { .chat('gpt-3.5-turbo', { logprobs: 1 }) .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); expect(response.logprobs).toStrictEqual( @@ -329,7 +323,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -344,7 +337,6 @@ describe('doGenerate', () => { const response = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -358,7 +350,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -377,7 +368,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -399,7 +389,6 @@ describe('doGenerate', () => { }) .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -421,7 +410,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { reasoningEffort: 'low' }, @@ -442,7 +430,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -460,7 +447,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { reasoningEffort: 'low' }, @@ -479,25 +465,22 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -541,7 +524,6 @@ describe('doGenerate', () => { await provider.chat('gpt-3.5-turbo').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -574,25 +556,22 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -624,25 +603,22 @@ describe('doGenerate', () => { result = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -688,7 +664,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'text' }, }); @@ -706,7 +681,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json' }, }); @@ -727,7 +701,6 @@ describe('doGenerate', () => { const { warnings } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -766,7 +739,6 @@ describe('doGenerate', () => { const { warnings } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -802,7 +774,7 @@ describe('doGenerate', () => { expect(warnings).toEqual([]); }); - it('should use json_schema & strict in object-json mode when structuredOutputs are enabled', async () => { + it('should use json_schema & strict with responseFormat json when structuredOutputs are enabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = provider.chat('gpt-4o-2024-08-06', { @@ -811,8 +783,8 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', schema: { type: 'object', properties: { value: { type: 'string' } }, @@ -844,7 +816,7 @@ describe('doGenerate', () => { }); }); - it('should set name & description in object-json mode when structuredOutputs are enabled', async () => { + it('should set name & description with responseFormat json when structuredOutputs are enabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = provider.chat('gpt-4o-2024-08-06', { @@ -853,8 +825,8 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'test-name', description: 'test description', schema: { @@ -889,7 +861,7 @@ describe('doGenerate', () => { }); }); - it('should allow for undefined schema in object-json mode when structuredOutputs are enabled', async () => { + it('should allow for undefined schema with responseFormat json when structuredOutputs are enabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = provider.chat('gpt-4o-2024-08-06', { @@ -898,8 +870,8 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'test-name', description: 'test description', }, @@ -915,7 +887,7 @@ describe('doGenerate', () => { }); }); - it('should set strict in object-tool mode when structuredOutputs are enabled', async () => { + it('should set strict with tool calls when structuredOutputs are enabled', async () => { prepareJsonResponse({ tool_calls: [ { @@ -935,9 +907,8 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { + tools: [ + { type: 'function', name: 'test-tool', description: 'test description', @@ -949,14 +920,15 @@ describe('doGenerate', () => { $schema: 'http://json-schema.org/draft-07/schema#', }, }, - }, + ], + toolChoice: { type: 'required' }, prompt: TEST_PROMPT, }); expect(await server.calls[0].requestBody).toStrictEqual({ model: 'gpt-4o-2024-08-06', messages: [{ role: 'user', content: 'Hello' }], - tool_choice: { type: 'function', function: { name: 'test-tool' } }, + tool_choice: 'required', tools: [ { type: 'function', @@ -1007,25 +979,22 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - toolChoice: { - type: 'tool', - toolName: 'test-tool', }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', }, prompt: TEST_PROMPT, }); @@ -1078,7 +1047,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1106,7 +1074,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1126,7 +1093,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, temperature: 0.5, topP: 0.7, @@ -1170,7 +1136,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, maxTokens: 1000, }); @@ -1190,7 +1155,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -1217,7 +1181,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -1251,7 +1214,6 @@ describe('doGenerate', () => { const result = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1269,7 +1231,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -1290,7 +1251,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -1317,7 +1277,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -1338,7 +1297,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -1438,7 +1396,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1503,22 +1460,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1634,22 +1588,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1778,22 +1729,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'searchGoogle', - parameters: { - type: 'object', - properties: { query: { type: 'string' } }, - required: ['query'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'searchGoogle', + parameters: { + type: 'object', + properties: { query: { type: 'string' } }, + required: ['query'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1878,22 +1826,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -1951,22 +1896,19 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -2020,7 +1962,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2058,7 +1999,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2083,7 +2023,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2099,7 +2038,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2119,7 +2057,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2145,7 +2082,6 @@ describe('doStream', () => { await provider.chat('gpt-3.5-turbo').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -2177,7 +2113,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2218,7 +2153,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2251,7 +2185,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -2274,7 +2207,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -2307,7 +2239,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2348,7 +2279,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2465,7 +2395,6 @@ describe('doStream simulated streaming', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -2508,22 +2437,19 @@ describe('doStream simulated streaming', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'function', - name: 'test-tool', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -2578,7 +2504,6 @@ describe('doStream simulated streaming', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index 9d2a9a7afc0d..8626c311a99e 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -82,7 +82,6 @@ export class OpenAIChatLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, prompt, maxTokens, temperature, @@ -93,10 +92,10 @@ export class OpenAIChatLanguageModel implements LanguageModelV2 { stopSequences, responseFormat, seed, + tools, + toolChoice, providerOptions, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { @@ -171,6 +170,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV2 { top_p: topP, frequency_penalty: frequencyPenalty, presence_penalty: presencePenalty, + // TODO improve below: response_format: responseFormat?.type === 'json' ? this.supportsStructuredOutputs && responseFormat.schema != null @@ -268,91 +268,29 @@ export class OpenAIChatLanguageModel implements LanguageModelV2 { } } - switch (type) { - case 'regular': { - const { tools, tool_choice, functions, function_call, toolWarnings } = - prepareTools({ - mode, - useLegacyFunctionCalling, - structuredOutputs: this.supportsStructuredOutputs, - }); - - return { - args: { - ...baseArgs, - tools, - tool_choice, - functions, - function_call, - }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - response_format: - this.supportsStructuredOutputs && mode.schema != null - ? { - type: 'json_schema', - json_schema: { - schema: mode.schema, - strict: true, - name: mode.name ?? 'response', - description: mode.description, - }, - } - : { type: 'json_object' }, - }, - warnings, - }; - } - - case 'object-tool': { - return { - args: useLegacyFunctionCalling - ? { - ...baseArgs, - function_call: { - name: mode.tool.name, - }, - functions: [ - { - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - }, - ], - } - : { - ...baseArgs, - tool_choice: { - type: 'function', - function: { name: mode.tool.name }, - }, - tools: [ - { - type: 'function', - function: { - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - strict: this.supportsStructuredOutputs ? true : undefined, - }, - }, - ], - }, - warnings, - }; - } + const { + tools: openaiTools, + toolChoice: openaiToolChoice, + functions, + function_call, + toolWarnings, + } = prepareTools({ + tools, + toolChoice, + useLegacyFunctionCalling, + structuredOutputs: this.supportsStructuredOutputs, + }); - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + return { + args: { + ...baseArgs, + tools: openaiTools, + tool_choice: openaiToolChoice, + functions, + function_call, + }, + warnings: [...warnings, ...toolWarnings], + }; } async doGenerate( diff --git a/packages/openai/src/openai-completion-language-model.test.ts b/packages/openai/src/openai-completion-language-model.test.ts index e0dce07c98c7..d4f5af6d994f 100644 --- a/packages/openai/src/openai-completion-language-model.test.ts +++ b/packages/openai/src/openai-completion-language-model.test.ts @@ -106,7 +106,6 @@ describe('doGenerate', () => { const { text } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -121,7 +120,6 @@ describe('doGenerate', () => { const { usage } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -136,7 +134,6 @@ describe('doGenerate', () => { const { request } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -154,7 +151,6 @@ describe('doGenerate', () => { const { response } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -174,7 +170,6 @@ describe('doGenerate', () => { .completion('gpt-3.5-turbo', { logprobs: 1 }) .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); expect(response.logprobs).toStrictEqual( @@ -192,7 +187,6 @@ describe('doGenerate', () => { .completion('gpt-3.5-turbo-instruct') .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -209,7 +203,6 @@ describe('doGenerate', () => { .completion('gpt-3.5-turbo-instruct') .doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -225,7 +218,6 @@ describe('doGenerate', () => { const { rawResponse } = await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -244,7 +236,6 @@ describe('doGenerate', () => { await model.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -268,7 +259,6 @@ describe('doGenerate', () => { await provider.completion('gpt-3.5-turbo-instruct').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', @@ -349,7 +339,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -386,7 +375,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -423,7 +411,6 @@ describe('doStream', () => { const { stream } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -447,7 +434,6 @@ describe('doStream', () => { const { request } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -463,7 +449,6 @@ describe('doStream', () => { const { rawResponse } = await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -483,7 +468,6 @@ describe('doStream', () => { await model.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -509,7 +493,6 @@ describe('doStream', () => { await provider.completion('gpt-3.5-turbo-instruct').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value', diff --git a/packages/openai/src/openai-completion-language-model.ts b/packages/openai/src/openai-completion-language-model.ts index 43428e9cbd92..b2a7e65ab597 100644 --- a/packages/openai/src/openai-completion-language-model.ts +++ b/packages/openai/src/openai-completion-language-model.ts @@ -4,7 +4,6 @@ import { LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2StreamPart, - UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { FetchFunction, @@ -16,6 +15,7 @@ import { } from '@ai-sdk/provider-utils'; import { z } from 'zod'; import { convertToOpenAICompletionPrompt } from './convert-to-openai-completion-prompt'; +import { getResponseMetadata } from './get-response-metadata'; import { mapOpenAICompletionLogProbs } from './map-openai-completion-logprobs'; import { mapOpenAIFinishReason } from './map-openai-finish-reason'; import { @@ -26,7 +26,6 @@ import { openaiErrorDataSchema, openaiFailedResponseHandler, } from './openai-error'; -import { getResponseMetadata } from './get-response-metadata'; type OpenAICompletionConfig = { provider: string; @@ -60,7 +59,6 @@ export class OpenAICompletionLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, inputFormat, prompt, maxTokens, @@ -71,17 +69,22 @@ export class OpenAICompletionLanguageModel implements LanguageModelV2 { presencePenalty, stopSequences: userStopSequences, responseFormat, + tools, + toolChoice, seed, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { - warnings.push({ - type: 'unsupported-setting', - setting: 'topK', - }); + warnings.push({ type: 'unsupported-setting', setting: 'topK' }); + } + + if (tools?.length) { + warnings.push({ type: 'unsupported-setting', setting: 'tools' }); + } + + if (toolChoice != null) { + warnings.push({ type: 'unsupported-setting', setting: 'toolChoice' }); } if (responseFormat != null && responseFormat.type !== 'text') { @@ -97,73 +100,41 @@ export class OpenAICompletionLanguageModel implements LanguageModelV2 { const stop = [...(stopSequences ?? []), ...(userStopSequences ?? [])]; - const baseArgs = { - // model id: - model: this.modelId, - - // model specific settings: - echo: this.settings.echo, - logit_bias: this.settings.logitBias, - logprobs: - typeof this.settings.logprobs === 'number' - ? this.settings.logprobs - : typeof this.settings.logprobs === 'boolean' + return { + args: { + // model id: + model: this.modelId, + + // model specific settings: + echo: this.settings.echo, + logit_bias: this.settings.logitBias, + logprobs: + typeof this.settings.logprobs === 'number' ? this.settings.logprobs - ? 0 - : undefined - : undefined, - suffix: this.settings.suffix, - user: this.settings.user, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - // prompt: - prompt: completionPrompt, - - // stop sequences: - stop: stop.length > 0 ? stop : undefined, + : typeof this.settings.logprobs === 'boolean' + ? this.settings.logprobs + ? 0 + : undefined + : undefined, + suffix: this.settings.suffix, + user: this.settings.user, + + // standardized settings: + max_tokens: maxTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + seed, + + // prompt: + prompt: completionPrompt, + + // stop sequences: + stop: stop.length > 0 ? stop : undefined, + }, + warnings, }; - - switch (type) { - case 'regular': { - if (mode.tools?.length) { - throw new UnsupportedFunctionalityError({ - functionality: 'tools', - }); - } - - if (mode.toolChoice) { - throw new UnsupportedFunctionalityError({ - functionality: 'toolChoice', - }); - } - - return { args: baseArgs, warnings }; - } - - case 'object-json': { - throw new UnsupportedFunctionalityError({ - functionality: 'object-json mode', - }); - } - - case 'object-tool': { - throw new UnsupportedFunctionalityError({ - functionality: 'object-tool mode', - }); - } - - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } } async doGenerate( diff --git a/packages/openai/src/openai-prepare-tools.ts b/packages/openai/src/openai-prepare-tools.ts index 8ff7442f6476..47b02d8be868 100644 --- a/packages/openai/src/openai-prepare-tools.ts +++ b/packages/openai/src/openai-prepare-tools.ts @@ -1,18 +1,18 @@ import { JSONSchema7, - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; export function prepareTools({ - mode, + tools, + toolChoice, useLegacyFunctionCalling = false, structuredOutputs, }: { - mode: Parameters[0]['mode'] & { - type: 'regular'; - }; + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; useLegacyFunctionCalling: boolean | undefined; structuredOutputs: boolean; }): { @@ -25,7 +25,7 @@ export function prepareTools({ strict?: boolean; }; }[]; - tool_choice?: + toolChoice?: | 'auto' | 'none' | 'required' @@ -41,16 +41,14 @@ export function prepareTools({ toolWarnings: Array; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { - return { tools: undefined, tool_choice: undefined, toolWarnings }; + return { tools: undefined, toolChoice: undefined, toolWarnings }; } - const toolChoice = mode.toolChoice; - if (useLegacyFunctionCalling) { const openaiFunctions: Array<{ name: string; @@ -129,7 +127,7 @@ export function prepareTools({ } if (toolChoice == null) { - return { tools: openaiTools, tool_choice: undefined, toolWarnings }; + return { tools: openaiTools, toolChoice: undefined, toolWarnings }; } const type = toolChoice.type; @@ -138,11 +136,11 @@ export function prepareTools({ case 'auto': case 'none': case 'required': - return { tools: openaiTools, tool_choice: type, toolWarnings }; + return { tools: openaiTools, toolChoice: type, toolWarnings }; case 'tool': return { tools: openaiTools, - tool_choice: { + toolChoice: { type: 'function', function: { name: toolChoice.toolName, diff --git a/packages/openai/src/responses/openai-responses-language-model.test.ts b/packages/openai/src/responses/openai-responses-language-model.test.ts index c0c7f50d558d..4489e8e8fbb2 100644 --- a/packages/openai/src/responses/openai-responses-language-model.test.ts +++ b/packages/openai/src/responses/openai-responses-language-model.test.ts @@ -119,7 +119,6 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular' }, }); expect(result.text).toStrictEqual('answer text'); @@ -129,7 +128,6 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular' }, }); expect(result.usage).toStrictEqual({ @@ -150,7 +148,6 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular' }, }); expect(result.providerMetadata).toStrictEqual({ @@ -165,7 +162,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send model id, settings, and input', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -190,7 +186,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should remove unsupported settings for o1', async () => { const { warnings } = await createModel('o1-mini').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -227,7 +222,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should remove unsupported settings for o3', async () => { const { warnings } = await createModel('o3').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, @@ -261,7 +255,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send response format json schema', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -305,7 +298,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send response format json object', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, responseFormat: { type: 'json', @@ -330,7 +322,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send parallelToolCalls provider option', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -353,7 +344,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send store provider option', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -376,7 +366,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send user provider option', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -399,7 +388,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send previous response id provider option', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -422,7 +410,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send metadata provider option', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -445,7 +432,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send reasoningEffort provider option', async () => { const { warnings } = await createModel('o3').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -470,7 +456,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send instructions provider option', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { openai: { @@ -490,57 +475,10 @@ describe('OpenAIResponsesLanguageModel', () => { expect(warnings).toStrictEqual([]); }); - it('should send object-tool format', async () => { + it('should send responseFormat json format', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { - type: 'function', - name: 'response', - description: 'A response', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - }, - prompt: TEST_PROMPT, - }); - - expect(await server.calls[0].requestBody).toStrictEqual({ - model: 'gpt-4o', - tool_choice: { type: 'function', name: 'response' }, - tools: [ - { - type: 'function', - strict: true, - name: 'response', - description: 'A response', - parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], - additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', - }, - }, - ], - input: [ - { role: 'user', content: [{ type: 'input_text', text: 'Hello' }] }, - ], - }); - - expect(warnings).toStrictEqual([]); - }); - - it('should send object-json json_object format', async () => { - const { warnings } = await createModel('gpt-4o').doGenerate({ - inputFormat: 'prompt', - mode: { type: 'object-json' }, + responseFormat: { type: 'json' }, prompt: TEST_PROMPT, }); @@ -555,11 +493,11 @@ describe('OpenAIResponsesLanguageModel', () => { expect(warnings).toStrictEqual([]); }); - it('should send object-json json_schema format', async () => { + it('should send responseFormat json_schema format', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'response', description: 'A response', schema: { @@ -601,11 +539,11 @@ describe('OpenAIResponsesLanguageModel', () => { expect(warnings).toStrictEqual([]); }); - it('should send object-json json_schema format with strictSchemas false', async () => { + it('should send responseFormat json_schema format with strictSchemas false', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'object-json', + responseFormat: { + type: 'json', name: 'response', description: 'A response', schema: { @@ -655,23 +593,20 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send web_search tool', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - tools: [ - { - type: 'provider-defined', - id: 'openai.web_search_preview', - name: 'web_search_preview', - args: { - searchContextSize: 'high', - userLocation: { - type: 'approximate', - city: 'San Francisco', - }, + tools: [ + { + type: 'provider-defined', + id: 'openai.web_search_preview', + name: 'web_search_preview', + args: { + searchContextSize: 'high', + userLocation: { + type: 'approximate', + city: 'San Francisco', }, }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -695,27 +630,24 @@ describe('OpenAIResponsesLanguageModel', () => { it('should send web_search tool as tool_choice', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { - type: 'regular', - toolChoice: { - type: 'tool', - toolName: 'web_search_preview', - }, - tools: [ - { - type: 'provider-defined', - id: 'openai.web_search_preview', - name: 'web_search_preview', - args: { - searchContextSize: 'high', - userLocation: { - type: 'approximate', - city: 'San Francisco', - }, + toolChoice: { + type: 'tool', + toolName: 'web_search_preview', + }, + tools: [ + { + type: 'provider-defined', + id: 'openai.web_search_preview', + name: 'web_search_preview', + args: { + searchContextSize: 'high', + userLocation: { + type: 'approximate', + city: 'San Francisco', }, }, - ], - }, + }, + ], prompt: TEST_PROMPT, }); @@ -740,7 +672,6 @@ describe('OpenAIResponsesLanguageModel', () => { it('should warn about unsupported settings', async () => { const { warnings } = await createModel('gpt-4o').doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, stopSequences: ['\n\n'], topK: 0.1, @@ -861,7 +792,7 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular', tools: TEST_TOOLS }, + tools: TEST_TOOLS, }); expect(result.toolCalls).toStrictEqual([ @@ -884,7 +815,7 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular', tools: TEST_TOOLS }, + tools: TEST_TOOLS, }); expect(result.finishReason).toStrictEqual('tool-calls'); @@ -1017,7 +948,6 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular' }, }); expect(result.text).toStrictEqual(outputText); @@ -1027,7 +957,6 @@ describe('OpenAIResponsesLanguageModel', () => { const result = await createModel('gpt-4o').doGenerate({ prompt: TEST_PROMPT, inputFormat: 'prompt', - mode: { type: 'regular' }, }); expect(result.sources).toStrictEqual([ @@ -1086,7 +1015,6 @@ describe('OpenAIResponsesLanguageModel', () => { const { stream } = await createModel('gpt-4o').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1135,7 +1063,6 @@ describe('OpenAIResponsesLanguageModel', () => { const { stream } = await createModel('gpt-4o').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -1189,7 +1116,7 @@ describe('OpenAIResponsesLanguageModel', () => { const { stream } = await createModel('gpt-4o').doStream({ inputFormat: 'prompt', - mode: { type: 'regular', tools: TEST_TOOLS }, + tools: TEST_TOOLS, prompt: TEST_PROMPT, }); @@ -1317,7 +1244,6 @@ describe('OpenAIResponsesLanguageModel', () => { const { stream } = await createModel('gpt-4o-mini').doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); diff --git a/packages/openai/src/responses/openai-responses-language-model.ts b/packages/openai/src/responses/openai-responses-language-model.ts index c6531c8f769f..b655ad5b11d8 100644 --- a/packages/openai/src/responses/openai-responses-language-model.ts +++ b/packages/openai/src/responses/openai-responses-language-model.ts @@ -39,7 +39,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, maxTokens, temperature, stopSequences, @@ -50,24 +49,19 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 { seed, prompt, providerOptions, + tools, + toolChoice, responseFormat, }: Parameters[0]) { const warnings: LanguageModelV2CallWarning[] = []; const modelConfig = getResponsesModelConfig(this.modelId); - const type = mode.type; if (topK != null) { - warnings.push({ - type: 'unsupported-setting', - setting: 'topK', - }); + warnings.push({ type: 'unsupported-setting', setting: 'topK' }); } if (seed != null) { - warnings.push({ - type: 'unsupported-setting', - setting: 'seed', - }); + warnings.push({ type: 'unsupported-setting', setting: 'seed' }); } if (presencePenalty != null) { @@ -85,10 +79,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 { } if (stopSequences != null) { - warnings.push({ - type: 'unsupported-setting', - setting: 'stopSequences', - }); + warnings.push({ type: 'unsupported-setting', setting: 'stopSequences' }); } const { messages, warnings: messageWarnings } = @@ -169,68 +160,24 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 { } } - switch (type) { - case 'regular': { - const { tools, tool_choice, toolWarnings } = prepareResponsesTools({ - mode, - strict: isStrict, // TODO support provider options on tools - }); - - return { - args: { - ...baseArgs, - tools, - tool_choice, - }, - warnings: [...warnings, ...toolWarnings], - }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - text: { - format: - mode.schema != null - ? { - type: 'json_schema', - strict: isStrict, - name: mode.name ?? 'response', - description: mode.description, - schema: mode.schema, - } - : { type: 'json_object' }, - }, - }, - warnings, - }; - } - - case 'object-tool': { - return { - args: { - ...baseArgs, - tool_choice: { type: 'function', name: mode.tool.name }, - tools: [ - { - type: 'function', - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - strict: isStrict, - }, - ], - }, - warnings, - }; - } + const { + tools: openaiTools, + toolChoice: openaiToolChoice, + toolWarnings, + } = prepareResponsesTools({ + tools, + toolChoice, + strict: isStrict, + }); - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } + return { + args: { + ...baseArgs, + tools: openaiTools, + tool_choice: openaiToolChoice, + }, + warnings: [...warnings, ...toolWarnings], + }; } async doGenerate( diff --git a/packages/openai/src/responses/openai-responses-prepare-tools.ts b/packages/openai/src/responses/openai-responses-prepare-tools.ts index 60fbb5facf1e..a553b46309f8 100644 --- a/packages/openai/src/responses/openai-responses-prepare-tools.ts +++ b/packages/openai/src/responses/openai-responses-prepare-tools.ts @@ -1,21 +1,21 @@ import { - LanguageModelV2, + LanguageModelV2CallOptions, LanguageModelV2CallWarning, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { OpenAIResponsesTool } from './openai-responses-api-types'; export function prepareResponsesTools({ - mode, + tools, + toolChoice, strict, }: { - mode: Parameters[0]['mode'] & { - type: 'regular'; - }; + tools: LanguageModelV2CallOptions['tools']; + toolChoice?: LanguageModelV2CallOptions['toolChoice']; strict: boolean; }): { tools?: Array; - tool_choice?: + toolChoice?: | 'auto' | 'none' | 'required' @@ -24,16 +24,14 @@ export function prepareResponsesTools({ toolWarnings: LanguageModelV2CallWarning[]; } { // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; + tools = tools?.length ? tools : undefined; const toolWarnings: LanguageModelV2CallWarning[] = []; if (tools == null) { - return { tools: undefined, tool_choice: undefined, toolWarnings }; + return { tools: undefined, toolChoice: undefined, toolWarnings }; } - const toolChoice = mode.toolChoice; - const openaiTools: Array = []; for (const tool of tools) { @@ -75,7 +73,7 @@ export function prepareResponsesTools({ } if (toolChoice == null) { - return { tools: openaiTools, tool_choice: undefined, toolWarnings }; + return { tools: openaiTools, toolChoice: undefined, toolWarnings }; } const type = toolChoice.type; @@ -84,26 +82,16 @@ export function prepareResponsesTools({ case 'auto': case 'none': case 'required': - return { tools: openaiTools, tool_choice: type, toolWarnings }; - case 'tool': { - if (toolChoice.toolName === 'web_search_preview') { - return { - tools: openaiTools, - tool_choice: { - type: 'web_search_preview', - }, - toolWarnings, - }; - } + return { tools: openaiTools, toolChoice: type, toolWarnings }; + case 'tool': return { tools: openaiTools, - tool_choice: { - type: 'function', - name: toolChoice.toolName, - }, + toolChoice: + toolChoice.toolName === 'web_search_preview' + ? { type: 'web_search_preview' } + : { type: 'function', name: toolChoice.toolName }, toolWarnings, }; - } default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ diff --git a/packages/perplexity/src/perplexity-language-model.test.ts b/packages/perplexity/src/perplexity-language-model.test.ts index ecee90ff6385..e7e72254d8ba 100644 --- a/packages/perplexity/src/perplexity-language-model.test.ts +++ b/packages/perplexity/src/perplexity-language-model.test.ts @@ -94,7 +94,6 @@ describe('PerplexityLanguageModel', () => { const result = await perplexityLM.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -114,7 +113,6 @@ describe('PerplexityLanguageModel', () => { prepareJsonResponse({ content: '' }); await perplexityLM.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); const requestBody = await jsonServer.calls[0].requestBody; @@ -128,7 +126,6 @@ describe('PerplexityLanguageModel', () => { prepareJsonResponse({ content: '' }); await perplexityLM.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, providerOptions: { perplexity: { @@ -154,7 +151,6 @@ describe('PerplexityLanguageModel', () => { const result = await perplexityLM.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -186,7 +182,6 @@ describe('PerplexityLanguageModel', () => { const result = await perplexityLM.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -220,7 +215,6 @@ describe('PerplexityLanguageModel', () => { const result = await perplexityLM.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -253,7 +247,6 @@ describe('PerplexityLanguageModel', () => { await lmWithCustomHeaders.doGenerate({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value' }, }); @@ -265,19 +258,6 @@ describe('PerplexityLanguageModel', () => { 'custom-request-header': 'request-header-value', }); }); - - it('should throw error for unsupported mode: object-tool', async () => { - await expect( - perplexityLM.doGenerate({ - inputFormat: 'prompt', - mode: { - type: 'object-tool', - tool: { type: 'function', name: 'test', parameters: {} }, - }, - prompt: TEST_PROMPT, - }), - ).rejects.toThrowError(UnsupportedFunctionalityError); - }); }); describe('doStream', () => { @@ -366,7 +346,6 @@ describe('PerplexityLanguageModel', () => { const { stream } = await perplexityLM.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -416,7 +395,6 @@ describe('PerplexityLanguageModel', () => { const { stream } = await perplexityLM.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -479,7 +457,6 @@ describe('PerplexityLanguageModel', () => { await perplexityLM.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -505,7 +482,6 @@ describe('PerplexityLanguageModel', () => { }); const { stream } = await perplexityLM.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -567,7 +543,6 @@ describe('PerplexityLanguageModel', () => { const { stream } = await perplexityLM.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, }); @@ -622,7 +597,6 @@ describe('PerplexityLanguageModel', () => { await lmWithCustomHeaders.doStream({ inputFormat: 'prompt', - mode: { type: 'regular' }, prompt: TEST_PROMPT, headers: { 'Custom-Request-Header': 'request-header-value' }, }); diff --git a/packages/perplexity/src/perplexity-language-model.ts b/packages/perplexity/src/perplexity-language-model.ts index 3f948c001c81..12c2617409a7 100644 --- a/packages/perplexity/src/perplexity-language-model.ts +++ b/packages/perplexity/src/perplexity-language-model.ts @@ -46,7 +46,6 @@ export class PerplexityLanguageModel implements LanguageModelV2 { } private getArgs({ - mode, prompt, maxTokens, temperature, @@ -59,8 +58,6 @@ export class PerplexityLanguageModel implements LanguageModelV2 { seed, providerOptions, }: Parameters[0]) { - const type = mode.type; - const warnings: LanguageModelV2CallWarning[] = []; if (topK != null) { @@ -84,63 +81,36 @@ export class PerplexityLanguageModel implements LanguageModelV2 { }); } - const baseArgs = { - // model id: - model: this.modelId, - - // standardized settings: - frequency_penalty: frequencyPenalty, - max_tokens: maxTokens, - presence_penalty: presencePenalty, - temperature, - top_k: topK, - top_p: topP, - - // response format: - response_format: - responseFormat?.type === 'json' - ? { - type: 'json_schema', - json_schema: { schema: responseFormat.schema }, - } - : undefined, - - // provider extensions - ...(providerOptions?.perplexity ?? {}), - - // messages: - messages: convertToPerplexityMessages(prompt), + return { + args: { + // model id: + model: this.modelId, + + // standardized settings: + frequency_penalty: frequencyPenalty, + max_tokens: maxTokens, + presence_penalty: presencePenalty, + temperature, + top_k: topK, + top_p: topP, + + // response format: + response_format: + responseFormat?.type === 'json' + ? { + type: 'json_schema', + json_schema: { schema: responseFormat.schema }, + } + : undefined, + + // provider extensions + ...(providerOptions?.perplexity ?? {}), + + // messages: + messages: convertToPerplexityMessages(prompt), + }, + warnings, }; - - switch (type) { - case 'regular': { - return { args: baseArgs, warnings }; - } - - case 'object-json': { - return { - args: { - ...baseArgs, - response_format: { - type: 'json_schema', - json_schema: { schema: mode.schema }, - }, - }, - warnings, - }; - } - - case 'object-tool': { - throw new UnsupportedFunctionalityError({ - functionality: 'tool-mode object generation', - }); - } - - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); - } - } } async doGenerate( diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-options.ts b/packages/provider/src/language-model/v2/language-model-v2-call-options.ts index 490f10baab18..a1fab83bce4e 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-call-options.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-call-options.ts @@ -1,12 +1,11 @@ import { JSONSchema7 } from 'json-schema'; -import { LanguageModelV2CallSettings } from './language-model-v2-call-settings'; import { LanguageModelV2FunctionTool } from './language-model-v2-function-tool'; import { LanguageModelV2Prompt } from './language-model-v2-prompt'; import { LanguageModelV2ProviderDefinedTool } from './language-model-v2-provider-defined-tool'; import { LanguageModelV2ProviderOptions } from './language-model-v2-provider-options'; import { LanguageModelV2ToolChoice } from './language-model-v2-tool-choice'; -export type LanguageModelV2CallOptions = LanguageModelV2CallSettings & { +export type LanguageModelV2CallOptions = { /** Whether the user provided the input as messages or as a prompt. This can help guide non-chat models in the @@ -16,39 +15,71 @@ chat/non-chat use cases. inputFormat: 'messages' | 'prompt'; /** -The mode affects the behavior of the language model. It is required to -support provider-independent streaming and generation of structured objects. -The model can take this information and e.g. configure json mode, the correct -low level grammar, etc. It can also be used to optimize the efficiency of the -streaming, e.g. tool-delta stream parts are only needed in the -object-tool mode. +A language mode prompt is a standardized prompt type. -@deprecated mode will be removed in v2. -All necessary settings will be directly supported through the call settings, -in particular responseFormat, toolChoice, and tools. +Note: This is **not** the user-facing prompt. The AI SDK methods will map the +user-facing prompt types such as chat or instruction prompts to this format. +That approach allows us to evolve the user facing prompts without breaking +the language model interface. */ - mode: - | { - // stream text & complete tool calls - type: 'regular'; + prompt: LanguageModelV2Prompt; - /** -The tools that are available for the model. - */ - // TODO Spec V2: move to call settings - tools?: Array< - LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool - >; + /** +Maximum number of tokens to generate. + */ + // TODO rename to maxOutputTokens + maxTokens?: number; - /** -Specifies how the tool should be selected. Defaults to 'auto'. - */ - // TODO Spec V2: move to call settings - toolChoice?: LanguageModelV2ToolChoice; - } + /** +Temperature setting. + +It is recommended to set either `temperature` or `topP`, but not both. + */ + temperature?: number; + + /** +Stop sequences. +If set, the model will stop generating text when one of the stop sequences is generated. +Providers may have limits on the number of stop sequences. + */ + stopSequences?: string[]; + + /** +Nucleus sampling. + +It is recommended to set either `temperature` or `topP`, but not both. + */ + topP?: number; + + /** +Only sample from the top K options for each subsequent token. + +Used to remove "long tail" low probability responses. +Recommended for advanced use cases only. You usually only need to use temperature. + */ + topK?: number; + + /** +Presence penalty setting. It affects the likelihood of the model to +repeat information that is already in the prompt. + */ + presencePenalty?: number; + + /** +Frequency penalty setting. It affects the likelihood of the model +to repeatedly use the same words or phrases. + */ + frequencyPenalty?: number; + + /** +Response format. The output can either be text or JSON. Default is text. + +If JSON is selected, a schema can optionally be provided to guide the LLM. + */ + responseFormat?: + | { type: 'text' } | { - // object generation with json mode enabled (streaming: text delta) - type: 'object-json'; + type: 'json'; /** * JSON schema that the generated output should conform to. @@ -64,22 +95,36 @@ Specifies how the tool should be selected. Defaults to 'auto'. * Description of the output that should be generated. Used by some providers for additional LLM guidance. */ description?: string; - } - | { - // object generation with tool mode enabled (streaming: tool call deltas) - type: 'object-tool'; - tool: LanguageModelV2FunctionTool; }; /** -A language mode prompt is a standardized prompt type. +The seed (integer) to use for random sampling. If set and supported +by the model, calls will generate deterministic results. + */ + seed?: number; -Note: This is **not** the user-facing prompt. The AI SDK methods will map the -user-facing prompt types such as chat or instruction prompts to this format. -That approach allows us to evolve the user facing prompts without breaking -the language model interface. - */ - prompt: LanguageModelV2Prompt; + /** +The tools that are available for the model. + */ + tools?: Array< + LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool + >; + + /** +Specifies how the tool should be selected. Defaults to 'auto'. +*/ + toolChoice?: LanguageModelV2ToolChoice; + + /** +Abort signal for cancelling the operation. + */ + abortSignal?: AbortSignal; + + /** +Additional HTTP headers to be sent with the request. +Only applicable for HTTP-based providers. + */ + headers?: Record; /** * Additional provider-specific options. They are passed through diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-settings.ts b/packages/provider/src/language-model/v2/language-model-v2-call-settings.ts deleted file mode 100644 index 98a47b73ffdb..000000000000 --- a/packages/provider/src/language-model/v2/language-model-v2-call-settings.ts +++ /dev/null @@ -1,92 +0,0 @@ -import { JSONSchema7 } from 'json-schema'; - -export type LanguageModelV2CallSettings = { - /** -Maximum number of tokens to generate. - */ - maxTokens?: number; - - /** -Temperature setting. - -It is recommended to set either `temperature` or `topP`, but not both. - */ - temperature?: number; - - /** -Stop sequences. -If set, the model will stop generating text when one of the stop sequences is generated. -Providers may have limits on the number of stop sequences. - */ - stopSequences?: string[]; - - /** -Nucleus sampling. - -It is recommended to set either `temperature` or `topP`, but not both. - */ - topP?: number; - - /** -Only sample from the top K options for each subsequent token. - -Used to remove "long tail" low probability responses. -Recommended for advanced use cases only. You usually only need to use temperature. - */ - topK?: number; - - /** -Presence penalty setting. It affects the likelihood of the model to -repeat information that is already in the prompt. - */ - presencePenalty?: number; - - /** -Frequency penalty setting. It affects the likelihood of the model -to repeatedly use the same words or phrases. - */ - frequencyPenalty?: number; - - /** -Response format. The output can either be text or JSON. Default is text. - -If JSON is selected, a schema can optionally be provided to guide the LLM. - */ - responseFormat?: - | { type: 'text' } - | { - type: 'json'; - - /** - * JSON schema that the generated output should conform to. - */ - schema?: JSONSchema7; - - /** - * Name of output that should be generated. Used by some providers for additional LLM guidance. - */ - name?: string; - - /** - * Description of the output that should be generated. Used by some providers for additional LLM guidance. - */ - description?: string; - }; - - /** -The seed (integer) to use for random sampling. If set and supported -by the model, calls will generate deterministic results. - */ - seed?: number; - - /** -Abort signal for cancelling the operation. - */ - abortSignal?: AbortSignal; - - /** -Additional HTTP headers to be sent with the request. -Only applicable for HTTP-based providers. - */ - headers?: Record; -}; diff --git a/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts b/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts index a442549ae5c4..e50882f8e75e 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-call-warning.ts @@ -1,4 +1,4 @@ -import { LanguageModelV2CallSettings } from './language-model-v2-call-settings'; +import { LanguageModelV2CallOptions } from './language-model-v2-call-options'; import { LanguageModelV2FunctionTool } from './language-model-v2-function-tool'; import { LanguageModelV2ProviderDefinedTool } from './language-model-v2-provider-defined-tool'; @@ -9,7 +9,7 @@ some settings might not be supported, which can lead to suboptimal results. export type LanguageModelV2CallWarning = | { type: 'unsupported-setting'; - setting: keyof LanguageModelV2CallSettings; + setting: Omit; details?: string; } | { From 78e6a51148551e2844cc4ad747fddcb842467557 Mon Sep 17 00:00:00 2001 From: Sam Denty Date: Mon, 7 Apr 2025 16:38:40 +0100 Subject: [PATCH 0040/1307] chore: update typescript to 5.8.3 (#5582) --- examples/ai-core/package.json | 2 +- examples/ai-core/tsconfig.json | 2 +- examples/express/package.json | 2 +- examples/express/tsconfig.json | 2 +- examples/fastify/package.json | 2 +- examples/fastify/tsconfig.json | 2 +- examples/hono/package.json | 2 +- examples/hono/tsconfig.json | 2 +- examples/mcp/package.json | 2 +- examples/mcp/tsconfig.json | 2 +- examples/next-fastapi/package.json | 2 +- examples/next-fastapi/tsconfig.json | 2 +- examples/next-google-vertex/package.json | 2 +- examples/next-langchain/package.json | 2 +- examples/next-langchain/tsconfig.json | 2 +- .../package.json | 2 +- .../tsconfig.json | 2 +- examples/next-openai-pages/tsconfig.json | 2 +- .../next-openai-telemetry-sentry/package.json | 2 +- .../tsconfig.json | 2 +- examples/next-openai-telemetry/package.json | 2 +- examples/next-openai-telemetry/tsconfig.json | 2 +- .../package.json | 2 +- .../tsconfig.json | 2 +- examples/next-openai/package.json | 2 +- examples/next-openai/tsconfig.json | 2 +- examples/node-http-server/package.json | 2 +- examples/node-http-server/tsconfig.json | 2 +- package.json | 2 +- .../convert-to-language-model-prompt.ts | 2 +- packages/ai/package.json | 2 +- packages/amazon-bedrock/package.json | 2 +- packages/anthropic/package.json | 2 +- packages/azure/package.json | 2 +- packages/cerebras/package.json | 2 +- packages/codemod/package.json | 2 +- packages/cohere/package.json | 2 +- packages/deepinfra/package.json | 2 +- packages/deepseek/package.json | 2 +- packages/fal/package.json | 2 +- packages/fireworks/package.json | 2 +- packages/google-vertex/package.json | 2 +- packages/google/package.json | 2 +- packages/groq/package.json | 2 +- packages/luma/package.json | 2 +- packages/mistral/package.json | 2 +- packages/openai-compatible/package.json | 2 +- packages/openai/package.json | 2 +- packages/perplexity/package.json | 2 +- packages/provider-utils/package.json | 2 +- packages/provider/package.json | 2 +- packages/react/package.json | 2 +- packages/replicate/package.json | 2 +- packages/togetherai/package.json | 2 +- packages/ui-utils/package.json | 2 +- packages/valibot/package.json | 2 +- packages/vue/package.json | 2 +- packages/xai/package.json | 2 +- pnpm-lock.yaml | 633 +++++++++--------- tools/analyze-downloads/package.json | 2 +- tools/generate-llms-txt/package.json | 2 +- tools/tsconfig/base.json | 2 +- 62 files changed, 394 insertions(+), 361 deletions(-) diff --git a/examples/ai-core/package.json b/examples/ai-core/package.json index bb6cef5fca72..4c09a3f88a7f 100644 --- a/examples/ai-core/package.json +++ b/examples/ai-core/package.json @@ -47,6 +47,6 @@ "devDependencies": { "@types/node": "20.17.24", "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/ai-core/tsconfig.json b/examples/ai-core/tsconfig.json index 67fe4a2689d3..f54bf8b0f489 100644 --- a/examples/ai-core/tsconfig.json +++ b/examples/ai-core/tsconfig.json @@ -9,7 +9,7 @@ "types": ["node"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "rootDir": "./src", "outDir": "./build", "skipLibCheck": true diff --git a/examples/express/package.json b/examples/express/package.json index b1ed86bb9748..b1e3f8be8848 100644 --- a/examples/express/package.json +++ b/examples/express/package.json @@ -16,6 +16,6 @@ "@types/express": "5.0.0", "@types/node": "20.17.24", "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/express/tsconfig.json b/examples/express/tsconfig.json index 67fe4a2689d3..f54bf8b0f489 100644 --- a/examples/express/tsconfig.json +++ b/examples/express/tsconfig.json @@ -9,7 +9,7 @@ "types": ["node"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "rootDir": "./src", "outDir": "./build", "skipLibCheck": true diff --git a/examples/fastify/package.json b/examples/fastify/package.json index 70756c5ffd93..ea633f91e348 100644 --- a/examples/fastify/package.json +++ b/examples/fastify/package.json @@ -15,6 +15,6 @@ "devDependencies": { "@types/node": "20.17.24", "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/fastify/tsconfig.json b/examples/fastify/tsconfig.json index 67fe4a2689d3..f54bf8b0f489 100644 --- a/examples/fastify/tsconfig.json +++ b/examples/fastify/tsconfig.json @@ -9,7 +9,7 @@ "types": ["node"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "rootDir": "./src", "outDir": "./build", "skipLibCheck": true diff --git a/examples/hono/package.json b/examples/hono/package.json index 87778031b83f..ceb7e5ace834 100644 --- a/examples/hono/package.json +++ b/examples/hono/package.json @@ -17,6 +17,6 @@ "devDependencies": { "@types/node": "20.17.24", "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/hono/tsconfig.json b/examples/hono/tsconfig.json index 67fe4a2689d3..f54bf8b0f489 100644 --- a/examples/hono/tsconfig.json +++ b/examples/hono/tsconfig.json @@ -9,7 +9,7 @@ "types": ["node"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "rootDir": "./src", "outDir": "./build", "skipLibCheck": true diff --git a/examples/mcp/package.json b/examples/mcp/package.json index 899eedbfe4e0..146c77a23819 100644 --- a/examples/mcp/package.json +++ b/examples/mcp/package.json @@ -23,6 +23,6 @@ "@types/express": "5.0.0", "@types/node": "20.17.24", "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/mcp/tsconfig.json b/examples/mcp/tsconfig.json index 67fe4a2689d3..f54bf8b0f489 100644 --- a/examples/mcp/tsconfig.json +++ b/examples/mcp/tsconfig.json @@ -9,7 +9,7 @@ "types": ["node"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "rootDir": "./src", "outDir": "./build", "skipLibCheck": true diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index 924a3fd0c086..0766f3f6548a 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -29,6 +29,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-fastapi/tsconfig.json b/examples/next-fastapi/tsconfig.json index fd2df4b6b16c..0d826251d797 100644 --- a/examples/next-fastapi/tsconfig.json +++ b/examples/next-fastapi/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-google-vertex/package.json b/examples/next-google-vertex/package.json index 3c88c306081e..03f8ee673903 100644 --- a/examples/next-google-vertex/package.json +++ b/examples/next-google-vertex/package.json @@ -23,6 +23,6 @@ "eslint-config-vercel-ai": "workspace:*", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-langchain/package.json b/examples/next-langchain/package.json index 4cf0d2588437..f1a3eb05706c 100644 --- a/examples/next-langchain/package.json +++ b/examples/next-langchain/package.json @@ -27,6 +27,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-langchain/tsconfig.json b/examples/next-langchain/tsconfig.json index e06a4454ab06..eefe1a21d2f1 100644 --- a/examples/next-langchain/tsconfig.json +++ b/examples/next-langchain/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-openai-kasada-bot-protection/package.json b/examples/next-openai-kasada-bot-protection/package.json index d8376abab978..da7dd7974857 100644 --- a/examples/next-openai-kasada-bot-protection/package.json +++ b/examples/next-openai-kasada-bot-protection/package.json @@ -27,6 +27,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-openai-kasada-bot-protection/tsconfig.json b/examples/next-openai-kasada-bot-protection/tsconfig.json index 14d189328b91..cc6d2135b42f 100644 --- a/examples/next-openai-kasada-bot-protection/tsconfig.json +++ b/examples/next-openai-kasada-bot-protection/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-openai-pages/tsconfig.json b/examples/next-openai-pages/tsconfig.json index e06a4454ab06..eefe1a21d2f1 100644 --- a/examples/next-openai-pages/tsconfig.json +++ b/examples/next-openai-pages/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-openai-telemetry-sentry/package.json b/examples/next-openai-telemetry-sentry/package.json index e9b73d2a14e6..61937b94ebd1 100644 --- a/examples/next-openai-telemetry-sentry/package.json +++ b/examples/next-openai-telemetry-sentry/package.json @@ -33,6 +33,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-openai-telemetry-sentry/tsconfig.json b/examples/next-openai-telemetry-sentry/tsconfig.json index e06a4454ab06..eefe1a21d2f1 100644 --- a/examples/next-openai-telemetry-sentry/tsconfig.json +++ b/examples/next-openai-telemetry-sentry/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-openai-telemetry/package.json b/examples/next-openai-telemetry/package.json index a9437a3a3bab..96fee85584f5 100644 --- a/examples/next-openai-telemetry/package.json +++ b/examples/next-openai-telemetry/package.json @@ -31,6 +31,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-openai-telemetry/tsconfig.json b/examples/next-openai-telemetry/tsconfig.json index e06a4454ab06..eefe1a21d2f1 100644 --- a/examples/next-openai-telemetry/tsconfig.json +++ b/examples/next-openai-telemetry/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-openai-upstash-rate-limits/package.json b/examples/next-openai-upstash-rate-limits/package.json index a2fc895d5b7d..8e9799f0f750 100644 --- a/examples/next-openai-upstash-rate-limits/package.json +++ b/examples/next-openai-upstash-rate-limits/package.json @@ -28,6 +28,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-openai-upstash-rate-limits/tsconfig.json b/examples/next-openai-upstash-rate-limits/tsconfig.json index 83fba45906da..3f55fc95b820 100644 --- a/examples/next-openai-upstash-rate-limits/tsconfig.json +++ b/examples/next-openai-upstash-rate-limits/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 98dbc49b0757..6268c42ada32 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -36,6 +36,6 @@ "eslint-config-next": "14.2.3", "postcss": "^8.4.49", "tailwindcss": "^3.4.15", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/next-openai/tsconfig.json b/examples/next-openai/tsconfig.json index 75b97d28fd6d..ea0521323c45 100644 --- a/examples/next-openai/tsconfig.json +++ b/examples/next-openai/tsconfig.json @@ -9,7 +9,7 @@ "noEmit": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "Bundler", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", diff --git a/examples/node-http-server/package.json b/examples/node-http-server/package.json index 7b9a07bfa59d..9858efcbdd5f 100644 --- a/examples/node-http-server/package.json +++ b/examples/node-http-server/package.json @@ -16,6 +16,6 @@ "devDependencies": { "@types/node": "20.17.24", "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/examples/node-http-server/tsconfig.json b/examples/node-http-server/tsconfig.json index 67fe4a2689d3..f54bf8b0f489 100644 --- a/examples/node-http-server/tsconfig.json +++ b/examples/node-http-server/tsconfig.json @@ -9,7 +9,7 @@ "types": ["node"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "rootDir": "./src", "outDir": "./build", "skipLibCheck": true diff --git a/package.json b/package.json index c336659cec05..59cf129a2155 100644 --- a/package.json +++ b/package.json @@ -37,7 +37,7 @@ "react": "19.0.0-rc-cc1ec60d0d-20240607", "react-dom": "19.0.0-rc-cc1ec60d0d-20240607", "turbo": "2.4.4", - "typescript": "5.6.3", + "typescript": "5.8.3", "vitest": "2.1.4" }, "engines": { diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index eeac30a672be..3bbb269d3d3a 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -273,7 +273,7 @@ function convertPartToLanguageModelPart( let mimeType: string | undefined = part.mimeType; let data: DataContent | URL; - let content: URL | ArrayBuffer | string; + let content: DataContent | URL | string; let normalizedData: Uint8Array | URL; const type = part.type; diff --git a/packages/ai/package.json b/packages/ai/package.json index 721e82689b3f..af9f4254f660 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -77,7 +77,7 @@ "react-dom": "^18", "react-server-dom-webpack": "18.3.0-canary-eb33bd747-20240312", "tsup": "^7.2.0", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/amazon-bedrock/package.json b/packages/amazon-bedrock/package.json index 90db80c87596..f46ccc9a1e56 100644 --- a/packages/amazon-bedrock/package.json +++ b/packages/amazon-bedrock/package.json @@ -40,7 +40,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8.3.0", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/anthropic/package.json b/packages/anthropic/package.json index ffe6ae31bb65..123ebfd34e6c 100644 --- a/packages/anthropic/package.json +++ b/packages/anthropic/package.json @@ -44,7 +44,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/azure/package.json b/packages/azure/package.json index 1d81f0e57e23..43ff8a277688 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -39,7 +39,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/cerebras/package.json b/packages/cerebras/package.json index af5418c4f904..4c60da85ad06 100644 --- a/packages/cerebras/package.json +++ b/packages/cerebras/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/codemod/package.json b/packages/codemod/package.json index 4be1da38d4a4..b3bdb2886a38 100644 --- a/packages/codemod/package.json +++ b/packages/codemod/package.json @@ -28,7 +28,7 @@ "msw": "2.6.4", "tsup": "^8", "tsx": "4.19.2", - "typescript": "5.6.3", + "typescript": "5.8.3", "vitest": "2.1.4" }, "bin": { diff --git a/packages/cohere/package.json b/packages/cohere/package.json index 709179bbf3a8..b77507e46430 100644 --- a/packages/cohere/package.json +++ b/packages/cohere/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/deepinfra/package.json b/packages/deepinfra/package.json index 729ccf146a10..40f02f504bdf 100644 --- a/packages/deepinfra/package.json +++ b/packages/deepinfra/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/deepseek/package.json b/packages/deepseek/package.json index a3934ecce999..3251e276e7ea 100644 --- a/packages/deepseek/package.json +++ b/packages/deepseek/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/fal/package.json b/packages/fal/package.json index 3f8ed292c82f..0281ddd6d78c 100644 --- a/packages/fal/package.json +++ b/packages/fal/package.json @@ -37,7 +37,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/fireworks/package.json b/packages/fireworks/package.json index 728e7b6673c4..3e41302b03e3 100644 --- a/packages/fireworks/package.json +++ b/packages/fireworks/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index c25e61a6f855..fe626f14cf43 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -59,7 +59,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/google/package.json b/packages/google/package.json index b0f09d0d8b59..eedf4290c7f0 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -45,7 +45,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/groq/package.json b/packages/groq/package.json index 676fd01a0248..045d1f1ecdd2 100644 --- a/packages/groq/package.json +++ b/packages/groq/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/luma/package.json b/packages/luma/package.json index 0f808711c89a..6568023edb2f 100644 --- a/packages/luma/package.json +++ b/packages/luma/package.json @@ -37,7 +37,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/mistral/package.json b/packages/mistral/package.json index ce0861769338..122df0696d0a 100644 --- a/packages/mistral/package.json +++ b/packages/mistral/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/openai-compatible/package.json b/packages/openai-compatible/package.json index e2869bf97843..f9b28eaf9ac2 100644 --- a/packages/openai-compatible/package.json +++ b/packages/openai-compatible/package.json @@ -45,7 +45,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/openai/package.json b/packages/openai/package.json index c710358479a8..40a805e0a592 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -45,7 +45,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/perplexity/package.json b/packages/perplexity/package.json index f5c8fbbe096a..294a4cf03479 100644 --- a/packages/perplexity/package.json +++ b/packages/perplexity/package.json @@ -37,7 +37,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index c8f603c576a9..f50ffde89dbf 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -45,7 +45,7 @@ "@vercel/ai-tsconfig": "workspace:*", "msw": "2.7.0", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/provider/package.json b/packages/provider/package.json index b7c60385b76b..32f2cad1da87 100644 --- a/packages/provider/package.json +++ b/packages/provider/package.json @@ -34,7 +34,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3" + "typescript": "5.8.3" }, "engines": { "node": ">=18" diff --git a/packages/react/package.json b/packages/react/package.json index 7011b36c5a42..1f7d2a86865c 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -48,7 +48,7 @@ "msw": "2.6.4", "react-dom": "^18", "tsup": "^7.2.0", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/replicate/package.json b/packages/replicate/package.json index 20063125d09d..fade66cc8ffc 100644 --- a/packages/replicate/package.json +++ b/packages/replicate/package.json @@ -37,7 +37,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/togetherai/package.json b/packages/togetherai/package.json index d8afb23fc753..5215bcce1886 100644 --- a/packages/togetherai/package.json +++ b/packages/togetherai/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index 965046b1fbf7..c32450d23eb5 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -41,7 +41,7 @@ "@types/react": "^18", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/valibot/package.json b/packages/valibot/package.json index a08a71bfdd1b..8e4ed037e045 100644 --- a/packages/valibot/package.json +++ b/packages/valibot/package.json @@ -33,7 +33,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "valibot": "^1.0.0-rc.0 || ^1.0.0" }, "peerDependencies": { diff --git a/packages/vue/package.json b/packages/vue/package.json index c4dbb5f2e1a4..e5c4f7efc7a3 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -45,7 +45,7 @@ "jsdom": "^24.0.0", "msw": "2.6.4", "tsup": "^7.2.0", - "typescript": "5.6.3", + "typescript": "5.8.3", "vitest": "2.1.4" }, "peerDependencies": { diff --git a/packages/xai/package.json b/packages/xai/package.json index 505eceec111a..3ac4e3f03d39 100644 --- a/packages/xai/package.json +++ b/packages/xai/package.json @@ -38,7 +38,7 @@ "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", "tsup": "^8", - "typescript": "5.6.3", + "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dcc0685ee686..9cdde8c2e92c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -51,11 +51,11 @@ importers: specifier: 2.4.4 version: 2.4.4 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 vitest: specifier: 2.1.4 - version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@22.7.4)(jsdom@26.0.0)(msw@2.7.0(@types/node@22.7.4)(typescript@5.6.3))(terser@5.31.3) + version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@22.7.4)(jsdom@26.0.0)(msw@2.7.0(@types/node@22.7.4)(typescript@5.8.3))(terser@5.31.3) examples/ai-core: dependencies: @@ -157,7 +157,7 @@ importers: version: 2.0.0 valibot: specifier: ^1.0.0-rc.0 || ^1.0.0 - version: 1.0.0-rc.0(typescript@5.6.3) + version: 1.0.0-rc.0(typescript@5.8.3) zod: specifier: 3.23.8 version: 3.23.8 @@ -172,8 +172,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/express: dependencies: @@ -200,8 +200,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/fastify: dependencies: @@ -225,8 +225,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/hono: dependencies: @@ -253,8 +253,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/mcp: dependencies: @@ -287,8 +287,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/nest: dependencies: @@ -422,16 +422,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-google-vertex: dependencies: @@ -471,10 +471,10 @@ importers: version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-langchain: dependencies: @@ -520,16 +520,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-openai: dependencies: @@ -602,16 +602,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-openai-kasada-bot-protection: dependencies: @@ -657,16 +657,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-openai-pages: dependencies: @@ -779,16 +779,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-openai-telemetry-sentry: dependencies: @@ -852,16 +852,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/next-openai-upstash-rate-limits: dependencies: @@ -910,16 +910,16 @@ importers: version: 8.57.1 eslint-config-next: specifier: 14.2.3 - version: 14.2.3(eslint@8.57.1)(typescript@5.6.3) + version: 14.2.3(eslint@8.57.1)(typescript@5.8.3) postcss: specifier: ^8.4.49 version: 8.4.49 tailwindcss: specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/node-http-server: dependencies: @@ -946,8 +946,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 examples/nuxt-openai: dependencies: @@ -966,13 +966,13 @@ importers: devDependencies: '@nuxt/devtools': specifier: 1.6.3 - version: 1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) + version: 1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3)) '@nuxt/ui-templates': specifier: 1.3.4 version: 1.3.4 '@nuxtjs/tailwindcss': specifier: 6.12.2 - version: 6.12.2(magicast@0.3.5)(rollup@4.34.9)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 6.12.2(magicast@0.3.5)(rollup@4.34.9)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) '@types/node': specifier: 20.17.24 version: 20.17.24 @@ -990,19 +990,19 @@ importers: version: 3.5.12 nuxt: specifier: 3.14.159 - version: 3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0(jiti@2.4.0))(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3) + version: 3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0(jiti@2.4.0))(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.8.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3) tailwindcss: specifier: 3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) unctx: specifier: 2.3.1 version: 2.3.1 vue: specifier: 3.5.13 - version: 3.5.13(typescript@5.6.3) + version: 3.5.13(typescript@5.8.3) vue-router: specifier: 4.5.0 - version: 4.5.0(vue@3.5.13(typescript@5.6.3)) + version: 4.5.0(vue@3.5.13(typescript@5.8.3)) examples/sveltekit-openai: devDependencies: @@ -1141,10 +1141,10 @@ importers: version: 18.3.0-canary-eb33bd747-20240312(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(webpack@5.96.1(esbuild@0.18.20)) tsup: specifier: ^7.2.0 - version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1190,10 +1190,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8.3.0 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1215,10 +1215,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1243,10 +1243,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1271,10 +1271,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1314,19 +1314,19 @@ importers: version: link:../../tools/tsconfig msw: specifier: 2.6.4 - version: 2.6.4(@types/node@20.17.24)(typescript@5.6.3) + version: 2.6.4(@types/node@20.17.24)(typescript@5.8.3) tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) tsx: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 vitest: specifier: 2.1.4 - version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@26.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(terser@5.31.3) + version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@26.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(terser@5.31.3) packages/cohere: dependencies: @@ -1345,10 +1345,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1373,10 +1373,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1401,10 +1401,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1426,10 +1426,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1454,10 +1454,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1479,10 +1479,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1513,10 +1513,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1538,10 +1538,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1563,10 +1563,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1588,10 +1588,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1613,10 +1613,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.2.4(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.2.4(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1638,10 +1638,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1663,10 +1663,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1688,10 +1688,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 packages/provider-utils: dependencies: @@ -1710,13 +1710,13 @@ importers: version: link:../../tools/tsconfig msw: specifier: 2.7.0 - version: 2.7.0(@types/node@20.17.24)(typescript@5.6.3) + version: 2.7.0(@types/node@20.17.24)(typescript@5.8.3) tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1771,16 +1771,16 @@ importers: version: 24.0.0 msw: specifier: 2.6.4 - version: 2.6.4(@types/node@20.17.24)(typescript@5.6.3) + version: 2.6.4(@types/node@20.17.24)(typescript@5.8.3) react-dom: specifier: ^18 version: 18.2.0(react@18.3.1) tsup: specifier: ^7.2.0 - version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1802,10 +1802,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1894,10 +1894,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1928,10 +1928,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -1940,7 +1940,7 @@ importers: dependencies: '@valibot/to-json-schema': specifier: ^1.0.0-rc.0 || ^1.0.0 - version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.6.3)) + version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.8.3)) ai: specifier: 5.0.0-canary.2 version: link:../ai @@ -1953,13 +1953,13 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 valibot: specifier: ^1.0.0-rc.0 || ^1.0.0 - version: 1.0.0-rc.0(typescript@5.6.3) + version: 1.0.0-rc.0(typescript@5.8.3) packages/vue: dependencies: @@ -1971,10 +1971,10 @@ importers: version: link:../ui-utils swrv: specifier: ^1.0.4 - version: 1.0.4(vue@3.3.8(typescript@5.6.3)) + version: 1.0.4(vue@3.3.8(typescript@5.8.3)) vue: specifier: ^3.3.4 - version: 3.3.8(typescript@5.6.3) + version: 3.3.8(typescript@5.8.3) devDependencies: '@testing-library/jest-dom': specifier: ^6.6.3 @@ -1984,7 +1984,7 @@ importers: version: 14.5.2(@testing-library/dom@10.4.0) '@testing-library/vue': specifier: ^8.1.0 - version: 8.1.0(@vue/compiler-sfc@3.5.13)(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.6.3)))(vue@3.3.8(typescript@5.6.3)) + version: 8.1.0(@vue/compiler-sfc@3.5.13)(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.8.3)))(vue@3.3.8(typescript@5.8.3)) '@types/node': specifier: 20.17.24 version: 20.17.24 @@ -1993,7 +1993,7 @@ importers: version: link:../../tools/tsconfig '@vitejs/plugin-vue': specifier: 5.2.0 - version: 5.2.0(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))(vue@3.3.8(typescript@5.6.3)) + version: 5.2.0(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))(vue@3.3.8(typescript@5.8.3)) eslint: specifier: 8.57.1 version: 8.57.1 @@ -2005,16 +2005,16 @@ importers: version: 24.0.0 msw: specifier: 2.6.4 - version: 2.6.4(@types/node@20.17.24)(typescript@5.6.3) + version: 2.6.4(@types/node@20.17.24)(typescript@5.8.3) tsup: specifier: ^7.2.0 - version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3) + version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 vitest: specifier: 2.1.4 - version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@24.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(terser@5.31.3) + version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@24.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(terser@5.31.3) packages/xai: dependencies: @@ -2036,10 +2036,10 @@ importers: version: link:../../tools/tsconfig tsup: specifier: ^8 - version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0) typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 zod: specifier: 3.23.8 version: 3.23.8 @@ -2050,14 +2050,14 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 tools/eslint-config: dependencies: eslint-config-next: specifier: ^14.2.3 - version: 14.2.3(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3) + version: 14.2.3(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3) eslint-config-prettier: specifier: ^9.1.0 version: 9.1.0(eslint@9.21.0(jiti@2.4.0)) @@ -2074,8 +2074,8 @@ importers: specifier: 4.19.2 version: 4.19.2 typescript: - specifier: 5.6.3 - version: 5.6.3 + specifier: 5.8.3 + version: 5.8.3 tools/tsconfig: {} @@ -14029,6 +14029,11 @@ packages: engines: {node: '>=14.17'} hasBin: true + typescript@5.8.3: + resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} + engines: {node: '>=14.17'} + hasBin: true + ufo@1.5.4: resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==} @@ -17849,13 +17854,13 @@ snapshots: rc9: 2.1.2 semver: 7.6.3 - '@nuxt/devtools@1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3))': + '@nuxt/devtools@1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3))': dependencies: '@antfu/utils': 0.7.10 '@nuxt/devtools-kit': 1.6.3(magicast@0.3.5)(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) '@nuxt/devtools-wizard': 1.6.3 '@nuxt/kit': 3.14.1592(magicast@0.3.5)(rollup@4.34.9) - '@vue/devtools-core': 7.6.4(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) + '@vue/devtools-core': 7.6.4(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3)) '@vue/devtools-kit': 7.6.4 birpc: 0.2.19 consola: 3.2.3 @@ -18017,12 +18022,12 @@ snapshots: '@nuxt/ui-templates@1.3.4': {} - '@nuxt/vite-builder@3.14.159(@types/node@20.17.24)(eslint@9.21.0(jiti@2.4.0))(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3)': + '@nuxt/vite-builder@3.14.159(@types/node@20.17.24)(eslint@9.21.0(jiti@2.4.0))(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.8.3)(vue@3.5.13(typescript@5.8.3))(webpack-sources@3.2.3)': dependencies: '@nuxt/kit': 3.14.159(magicast@0.3.5)(rollup@4.34.9) '@rollup/plugin-replace': 6.0.1(rollup@4.34.9) - '@vitejs/plugin-vue': 5.2.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) - '@vitejs/plugin-vue-jsx': 4.1.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) + '@vitejs/plugin-vue': 5.2.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3)) + '@vitejs/plugin-vue-jsx': 4.1.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3)) autoprefixer: 10.4.20(postcss@8.4.49) clear: 0.1.0 consola: 3.2.3 @@ -18051,8 +18056,8 @@ snapshots: unplugin: 1.15.0(webpack-sources@3.2.3) vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) vite-node: 2.1.4(@types/node@20.17.24)(terser@5.31.3) - vite-plugin-checker: 0.8.0(eslint@9.21.0(jiti@2.4.0))(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) - vue: 3.5.13(typescript@5.6.3) + vite-plugin-checker: 0.8.0(eslint@9.21.0(jiti@2.4.0))(optionator@0.9.4)(typescript@5.8.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) + vue: 3.5.13(typescript@5.8.3) vue-bundle-renderer: 2.1.1 transitivePeerDependencies: - '@biomejs/biome' @@ -18085,7 +18090,7 @@ snapshots: transitivePeerDependencies: - encoding - '@nuxtjs/tailwindcss@6.12.2(magicast@0.3.5)(rollup@4.34.9)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))': + '@nuxtjs/tailwindcss@6.12.2(magicast@0.3.5)(rollup@4.34.9)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))': dependencies: '@nuxt/kit': 3.14.159(magicast@0.3.5)(rollup@4.34.9) autoprefixer: 10.4.20(postcss@8.4.49) @@ -18096,8 +18101,8 @@ snapshots: pathe: 1.1.2 postcss: 8.4.49 postcss-nesting: 13.0.1(postcss@8.4.49) - tailwind-config-viewer: 2.0.4(tailwindcss@3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))) - tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + tailwind-config-viewer: 2.0.4(tailwindcss@3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))) + tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) ufo: 1.5.4 unctx: 2.3.1 transitivePeerDependencies: @@ -20446,12 +20451,12 @@ snapshots: dependencies: '@testing-library/dom': 10.4.0 - '@testing-library/vue@8.1.0(@vue/compiler-sfc@3.5.13)(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.6.3)))(vue@3.3.8(typescript@5.6.3))': + '@testing-library/vue@8.1.0(@vue/compiler-sfc@3.5.13)(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.8.3)))(vue@3.3.8(typescript@5.8.3))': dependencies: '@babel/runtime': 7.25.7 '@testing-library/dom': 9.3.3 - '@vue/test-utils': 2.4.2(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.6.3)))(vue@3.3.8(typescript@5.6.3)) - vue: 3.3.8(typescript@5.6.3) + '@vue/test-utils': 2.4.2(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.8.3)))(vue@3.3.8(typescript@5.8.3)) + vue: 3.3.8(typescript@5.8.3) optionalDependencies: '@vue/compiler-sfc': 3.5.13 transitivePeerDependencies: @@ -20772,29 +20777,29 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3)': + '@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3)': dependencies: '@typescript-eslint/scope-manager': 7.2.0 '@typescript-eslint/types': 7.2.0 - '@typescript-eslint/typescript-estree': 7.2.0(typescript@5.6.3) + '@typescript-eslint/typescript-estree': 7.2.0(typescript@5.8.3) '@typescript-eslint/visitor-keys': 7.2.0 debug: 4.4.0(supports-color@9.4.0) eslint: 8.57.1 optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3)': + '@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3)': dependencies: '@typescript-eslint/scope-manager': 7.2.0 '@typescript-eslint/types': 7.2.0 - '@typescript-eslint/typescript-estree': 7.2.0(typescript@5.6.3) + '@typescript-eslint/typescript-estree': 7.2.0(typescript@5.8.3) '@typescript-eslint/visitor-keys': 7.2.0 debug: 4.4.0(supports-color@9.4.0) eslint: 9.21.0(jiti@2.4.0) optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -20882,7 +20887,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/typescript-estree@7.2.0(typescript@5.6.3)': + '@typescript-eslint/typescript-estree@7.2.0(typescript@5.8.3)': dependencies: '@typescript-eslint/types': 7.2.0 '@typescript-eslint/visitor-keys': 7.2.0 @@ -20891,9 +20896,9 @@ snapshots: is-glob: 4.0.3 minimatch: 9.0.3 semver: 7.6.3 - ts-api-utils: 1.3.0(typescript@5.6.3) + ts-api-utils: 1.3.0(typescript@5.8.3) optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -20984,14 +20989,14 @@ snapshots: '@unhead/schema': 1.11.11 '@unhead/shared': 1.11.11 - '@unhead/vue@1.11.11(vue@3.5.13(typescript@5.6.3))': + '@unhead/vue@1.11.11(vue@3.5.13(typescript@5.8.3))': dependencies: '@unhead/schema': 1.11.11 '@unhead/shared': 1.11.11 defu: 6.1.4 hookable: 5.5.3 unhead: 1.11.11 - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) '@upstash/core-analytics@0.0.6': dependencies: @@ -21009,9 +21014,9 @@ snapshots: dependencies: crypto-js: 4.2.0 - '@valibot/to-json-schema@1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.6.3))': + '@valibot/to-json-schema@1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.8.3))': dependencies: - valibot: 1.0.0-rc.0(typescript@5.6.3) + valibot: 1.0.0-rc.0(typescript@5.8.3) '@vercel/blob@0.26.0': dependencies: @@ -21098,25 +21103,25 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitejs/plugin-vue-jsx@4.1.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3))': + '@vitejs/plugin-vue-jsx@4.1.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3))': dependencies: '@babel/core': 7.26.0 '@babel/plugin-transform-typescript': 7.25.9(@babel/core@7.26.0) '@vue/babel-plugin-jsx': 1.2.5(@babel/core@7.26.0) vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) transitivePeerDependencies: - supports-color - '@vitejs/plugin-vue@5.2.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3))': + '@vitejs/plugin-vue@5.2.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3))': dependencies: vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) - '@vitejs/plugin-vue@5.2.0(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))(vue@3.3.8(typescript@5.6.3))': + '@vitejs/plugin-vue@5.2.0(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))(vue@3.3.8(typescript@5.8.3))': dependencies: vite: 6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) - vue: 3.3.8(typescript@5.6.3) + vue: 3.3.8(typescript@5.8.3) '@vitest/expect@2.1.4': dependencies: @@ -21132,22 +21137,22 @@ snapshots: chai: 5.2.0 tinyrainbow: 2.0.0 - '@vitest/mocker@2.1.4(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))': + '@vitest/mocker@2.1.4(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))': dependencies: '@vitest/spy': 2.1.4 estree-walker: 3.0.3 magic-string: 0.30.17 optionalDependencies: - msw: 2.6.4(@types/node@20.17.24)(typescript@5.6.3) + msw: 2.6.4(@types/node@20.17.24)(typescript@5.8.3) vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) - '@vitest/mocker@2.1.4(msw@2.7.0(@types/node@22.7.4)(typescript@5.6.3))(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3))': + '@vitest/mocker@2.1.4(msw@2.7.0(@types/node@22.7.4)(typescript@5.8.3))(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3))': dependencies: '@vitest/spy': 2.1.4 estree-walker: 3.0.3 magic-string: 0.30.17 optionalDependencies: - msw: 2.7.0(@types/node@22.7.4)(typescript@5.6.3) + msw: 2.7.0(@types/node@22.7.4)(typescript@5.8.3) vite: 5.4.11(@types/node@22.7.4)(terser@5.31.3) '@vitest/mocker@3.0.7(msw@2.7.0(@types/node@22.7.4)(typescript@5.6.3))(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))': @@ -21209,7 +21214,7 @@ snapshots: loupe: 3.1.3 tinyrainbow: 2.0.0 - '@vue-macros/common@1.15.0(rollup@4.34.9)(vue@3.5.13(typescript@5.6.3))': + '@vue-macros/common@1.15.0(rollup@4.34.9)(vue@3.5.13(typescript@5.8.3))': dependencies: '@babel/types': 7.26.0 '@rollup/pluginutils': 5.1.3(rollup@4.34.9) @@ -21218,7 +21223,7 @@ snapshots: local-pkg: 0.5.1 magic-string-ast: 0.6.2 optionalDependencies: - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) transitivePeerDependencies: - rollup @@ -21314,7 +21319,7 @@ snapshots: '@vue/devtools-api@6.6.4': {} - '@vue/devtools-core@7.6.4(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3))': + '@vue/devtools-core@7.6.4(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3))': dependencies: '@vue/devtools-kit': 7.6.4 '@vue/devtools-shared': 7.6.4 @@ -21322,7 +21327,7 @@ snapshots: nanoid: 3.3.8 pathe: 1.1.2 vite-hot-client: 0.2.3(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) transitivePeerDependencies: - vite @@ -21388,24 +21393,24 @@ snapshots: '@vue/shared': 3.5.13 csstype: 3.1.3 - '@vue/server-renderer@3.3.8(vue@3.3.8(typescript@5.6.3))': + '@vue/server-renderer@3.3.8(vue@3.3.8(typescript@5.8.3))': dependencies: '@vue/compiler-ssr': 3.3.8 '@vue/shared': 3.3.8 - vue: 3.3.8(typescript@5.6.3) + vue: 3.3.8(typescript@5.8.3) - '@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.6.3))': + '@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.8.3))': dependencies: '@vue/compiler-ssr': 3.5.13 '@vue/shared': 3.5.13 - vue: 3.3.8(typescript@5.6.3) + vue: 3.3.8(typescript@5.8.3) optional: true - '@vue/server-renderer@3.5.13(vue@3.5.13(typescript@5.6.3))': + '@vue/server-renderer@3.5.13(vue@3.5.13(typescript@5.8.3))': dependencies: '@vue/compiler-ssr': 3.5.13 '@vue/shared': 3.5.13 - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) '@vue/shared@3.3.8': {} @@ -21413,13 +21418,13 @@ snapshots: '@vue/shared@3.5.13': {} - '@vue/test-utils@2.4.2(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.6.3)))(vue@3.3.8(typescript@5.6.3))': + '@vue/test-utils@2.4.2(@vue/server-renderer@3.5.13(vue@3.3.8(typescript@5.8.3)))(vue@3.3.8(typescript@5.8.3))': dependencies: js-beautify: 1.14.11 - vue: 3.3.8(typescript@5.6.3) + vue: 3.3.8(typescript@5.8.3) vue-component-type-helpers: 1.8.22 optionalDependencies: - '@vue/server-renderer': 3.5.13(vue@3.3.8(typescript@5.6.3)) + '@vue/server-renderer': 3.5.13(vue@3.3.8(typescript@5.8.3)) '@webassemblyjs/ast@1.12.1': dependencies: @@ -23282,38 +23287,38 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-config-next@14.2.3(eslint@8.57.1)(typescript@5.6.3): + eslint-config-next@14.2.3(eslint@8.57.1)(typescript@5.8.3): dependencies: '@next/eslint-plugin-next': 14.2.3 '@rushstack/eslint-patch': 1.10.4 - '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.8.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.9.0(eslint@8.57.1) eslint-plugin-react: 7.35.0(eslint@8.57.1) eslint-plugin-react-hooks: 4.6.2(eslint@8.57.1) optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - eslint-import-resolver-webpack - supports-color - eslint-config-next@14.2.3(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3): + eslint-config-next@14.2.3(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3): dependencies: '@next/eslint-plugin-next': 14.2.3 '@rushstack/eslint-patch': 1.10.4 - '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3) eslint: 9.21.0(jiti@2.4.0) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) eslint-plugin-jsx-a11y: 6.9.0(eslint@9.21.0(jiti@2.4.0)) eslint-plugin-react: 7.35.0(eslint@9.21.0(jiti@2.4.0)) eslint-plugin-react-hooks: 4.6.2(eslint@9.21.0(jiti@2.4.0)) optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - eslint-import-resolver-webpack - supports-color @@ -23360,13 +23365,13 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1): + eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 4.4.0(supports-color@9.4.0) enhanced-resolve: 5.17.1 eslint: 8.57.1 - eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) fast-glob: 3.3.2 get-tsconfig: 4.7.2 is-core-module: 2.13.1 @@ -23377,13 +23382,13 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)): + eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)): dependencies: debug: 4.4.0(supports-color@9.4.0) enhanced-resolve: 5.17.1 eslint: 9.21.0(jiti@2.4.0) - eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) fast-glob: 3.3.2 get-tsconfig: 4.7.2 is-core-module: 2.13.1 @@ -23405,25 +23410,25 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.8.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): + eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3) eslint: 9.21.0(jiti@2.4.0) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)) transitivePeerDependencies: - supports-color @@ -23438,25 +23443,25 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.8.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): + eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3) eslint: 9.21.0(jiti@2.4.0) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)) transitivePeerDependencies: - supports-color @@ -23487,7 +23492,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 @@ -23497,7 +23502,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.15.0 is-glob: 4.0.3 @@ -23508,13 +23513,13 @@ snapshots: semver: 6.3.1 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@8.57.1)(typescript@5.8.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack - supports-color - eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): + eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): dependencies: array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 @@ -23524,7 +23529,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.21.0(jiti@2.4.0) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) hasown: 2.0.2 is-core-module: 2.15.0 is-glob: 4.0.3 @@ -23535,7 +23540,7 @@ snapshots: semver: 6.3.1 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3) + '@typescript-eslint/parser': 7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.8.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -26537,7 +26542,7 @@ snapshots: ms@2.1.3: {} - msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3): + msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3): dependencies: '@bundled-es-modules/cookie': 2.0.1 '@bundled-es-modules/statuses': 1.0.1 @@ -26558,11 +26563,11 @@ snapshots: type-fest: 4.26.1 yargs: 17.7.2 optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - '@types/node' - msw@2.7.0(@types/node@20.17.24)(typescript@5.6.3): + msw@2.7.0(@types/node@20.17.24)(typescript@5.8.3): dependencies: '@bundled-es-modules/cookie': 2.0.1 '@bundled-es-modules/statuses': 1.0.1 @@ -26583,7 +26588,7 @@ snapshots: type-fest: 4.26.1 yargs: 17.7.2 optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - '@types/node' @@ -26613,6 +26618,32 @@ snapshots: - '@types/node' optional: true + msw@2.7.0(@types/node@22.7.4)(typescript@5.8.3): + dependencies: + '@bundled-es-modules/cookie': 2.0.1 + '@bundled-es-modules/statuses': 1.0.1 + '@bundled-es-modules/tough-cookie': 0.1.6 + '@inquirer/confirm': 5.0.2(@types/node@22.7.4) + '@mswjs/interceptors': 0.37.5 + '@open-draft/deferred-promise': 2.2.0 + '@open-draft/until': 2.1.0 + '@types/cookie': 0.6.0 + '@types/statuses': 2.0.5 + graphql: 16.9.0 + headers-polyfill: 4.0.3 + is-node-process: 1.2.0 + outvariant: 1.4.3 + path-to-regexp: 6.3.0 + picocolors: 1.1.1 + strict-event-emitter: 0.5.1 + type-fest: 4.26.1 + yargs: 17.7.2 + optionalDependencies: + typescript: 5.8.3 + transitivePeerDependencies: + - '@types/node' + optional: true + multer@1.4.4-lts.1: dependencies: append-field: 1.0.0 @@ -26762,7 +26793,7 @@ snapshots: - '@babel/core' - babel-plugin-macros - nitropack@2.10.4(@upstash/redis@1.34.3)(typescript@5.6.3): + nitropack@2.10.4(@upstash/redis@1.34.3)(typescript@5.8.3): dependencies: '@cloudflare/kv-asset-handler': 0.3.4 '@netlify/functions': 2.8.2 @@ -26811,7 +26842,7 @@ snapshots: node-fetch-native: 1.6.4 ofetch: 1.4.1 ohash: 1.1.4 - openapi-typescript: 7.4.3(typescript@5.6.3) + openapi-typescript: 7.4.3(typescript@5.8.3) pathe: 1.1.2 perfect-debounce: 1.0.0 pkg-types: 1.2.1 @@ -26937,18 +26968,18 @@ snapshots: nuxi@3.15.0: {} - nuxt@3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0(jiti@2.4.0))(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3): + nuxt@3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0(jiti@2.4.0))(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.8.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3): dependencies: '@nuxt/devalue': 2.0.2 - '@nuxt/devtools': 1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) + '@nuxt/devtools': 1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.8.3)) '@nuxt/kit': 3.14.159(magicast@0.3.5)(rollup@4.34.9) '@nuxt/schema': 3.14.159(magicast@0.3.5)(rollup@4.34.9) '@nuxt/telemetry': 2.6.0(magicast@0.3.5)(rollup@4.34.9) - '@nuxt/vite-builder': 3.14.159(@types/node@20.17.24)(eslint@9.21.0(jiti@2.4.0))(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3) + '@nuxt/vite-builder': 3.14.159(@types/node@20.17.24)(eslint@9.21.0(jiti@2.4.0))(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.8.3)(vue@3.5.13(typescript@5.8.3))(webpack-sources@3.2.3) '@unhead/dom': 1.11.11 '@unhead/shared': 1.11.11 '@unhead/ssr': 1.11.11 - '@unhead/vue': 1.11.11(vue@3.5.13(typescript@5.6.3)) + '@unhead/vue': 1.11.11(vue@3.5.13(typescript@5.8.3)) '@vue/shared': 3.5.12 acorn: 8.14.0 c12: 2.0.1(magicast@0.3.5) @@ -26974,7 +27005,7 @@ snapshots: magic-string: 0.30.12 mlly: 1.7.2 nanotar: 0.1.1 - nitropack: 2.10.4(@upstash/redis@1.34.3)(typescript@5.6.3) + nitropack: 2.10.4(@upstash/redis@1.34.3)(typescript@5.8.3) nuxi: 3.15.0 nypm: 0.3.12 ofetch: 1.4.1 @@ -26996,13 +27027,13 @@ snapshots: unhead: 1.11.11 unimport: 3.13.1(rollup@4.34.9)(webpack-sources@3.2.3) unplugin: 1.15.0(webpack-sources@3.2.3) - unplugin-vue-router: 0.10.8(rollup@4.34.9)(vue-router@4.5.0(vue@3.5.13(typescript@5.6.3)))(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3) + unplugin-vue-router: 0.10.8(rollup@4.34.9)(vue-router@4.5.0(vue@3.5.13(typescript@5.8.3)))(vue@3.5.13(typescript@5.8.3))(webpack-sources@3.2.3) unstorage: 1.13.1(@upstash/redis@1.34.3)(ioredis@5.4.1) untyped: 1.5.1 - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) vue-bundle-renderer: 2.1.1 vue-devtools-stub: 0.1.0 - vue-router: 4.5.0(vue@3.5.13(typescript@5.6.3)) + vue-router: 4.5.0(vue@3.5.13(typescript@5.8.3)) optionalDependencies: '@parcel/watcher': 2.4.1 '@types/node': 20.17.24 @@ -27211,14 +27242,14 @@ snapshots: openapi-types@12.1.3: {} - openapi-typescript@7.4.3(typescript@5.6.3): + openapi-typescript@7.4.3(typescript@5.8.3): dependencies: '@redocly/openapi-core': 1.25.11(supports-color@9.4.0) ansi-colors: 4.1.3 change-case: 5.4.4 parse-json: 8.1.0 supports-color: 9.4.0 - typescript: 5.6.3 + typescript: 5.8.3 yargs-parser: 21.1.1 transitivePeerDependencies: - encoding @@ -27607,13 +27638,13 @@ snapshots: postcss: 8.5.3 ts-node: 10.9.2(@types/node@22.7.4)(typescript@5.6.3) - postcss-load-config@4.0.1(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)): + postcss-load-config@4.0.1(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)): dependencies: lilconfig: 2.1.0 yaml: 2.4.5 optionalDependencies: postcss: 8.5.3 - ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.6.3) + ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.8.3) postcss-load-config@4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.4.5)): dependencies: @@ -27623,21 +27654,21 @@ snapshots: postcss: 8.4.49 ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.4.5) - postcss-load-config@4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)): + postcss-load-config@4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)): dependencies: lilconfig: 3.1.3 yaml: 2.7.0 optionalDependencies: postcss: 8.4.49 - ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.6.3) + ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.8.3) - postcss-load-config@4.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)): + postcss-load-config@4.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)): dependencies: lilconfig: 3.1.3 yaml: 2.7.0 optionalDependencies: postcss: 8.5.3 - ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.6.3) + ts-node: 10.9.2(@types/node@20.17.24)(typescript@5.8.3) postcss-load-config@4.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@22.7.4)(typescript@5.6.3)): dependencies: @@ -29037,9 +29068,9 @@ snapshots: react: 18.3.1 use-sync-external-store: 1.2.0(react@18.3.1) - swrv@1.0.4(vue@3.3.8(typescript@5.6.3)): + swrv@1.0.4(vue@3.3.8(typescript@5.8.3)): dependencies: - vue: 3.3.8(typescript@5.6.3) + vue: 3.3.8(typescript@5.8.3) symbol-observable@4.0.0: {} @@ -29054,7 +29085,7 @@ snapshots: tabbable@6.2.0: {} - tailwind-config-viewer@2.0.4(tailwindcss@3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))): + tailwind-config-viewer@2.0.4(tailwindcss@3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))): dependencies: '@koa/router': 12.0.1 commander: 6.2.1 @@ -29064,7 +29095,7 @@ snapshots: open: 7.4.2 portfinder: 1.0.32 replace-in-file: 6.3.5 - tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) transitivePeerDependencies: - supports-color @@ -29106,7 +29137,7 @@ snapshots: transitivePeerDependencies: - ts-node - tailwindcss@3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)): + tailwindcss@3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)): dependencies: '@alloc/quick-lru': 5.2.0 arg: 5.0.2 @@ -29125,7 +29156,7 @@ snapshots: postcss: 8.4.49 postcss-import: 15.1.0(postcss@8.4.49) postcss-js: 4.0.1(postcss@8.4.49) - postcss-load-config: 4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + postcss-load-config: 4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) postcss-nested: 6.2.0(postcss@8.4.49) postcss-selector-parser: 6.1.2 resolve: 1.22.8 @@ -29357,9 +29388,9 @@ snapshots: dependencies: typescript: 5.5.4 - ts-api-utils@1.3.0(typescript@5.6.3): + ts-api-utils@1.3.0(typescript@5.8.3): dependencies: - typescript: 5.6.3 + typescript: 5.8.3 ts-api-utils@2.0.1(typescript@5.6.3): dependencies: @@ -29433,7 +29464,7 @@ snapshots: v8-compile-cache-lib: 3.0.1 yn: 3.1.1 - ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3): + ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.11 @@ -29447,7 +29478,7 @@ snapshots: create-require: 1.1.1 diff: 4.0.2 make-error: 1.3.6 - typescript: 5.6.3 + typescript: 5.8.3 v8-compile-cache-lib: 3.0.1 yn: 3.1.1 optional: true @@ -29499,7 +29530,7 @@ snapshots: tsscmp@1.0.6: {} - tsup@7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3): + tsup@7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3): dependencies: bundle-require: 4.2.1(esbuild@0.18.20) cac: 6.7.14 @@ -29509,7 +29540,7 @@ snapshots: execa: 5.1.1 globby: 11.1.0 joycon: 3.1.1 - postcss-load-config: 4.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + postcss-load-config: 4.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) resolve-from: 5.0.0 rollup: 3.29.5 source-map: 0.8.0-beta.0 @@ -29517,12 +29548,12 @@ snapshots: tree-kill: 1.2.2 optionalDependencies: postcss: 8.5.3 - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - supports-color - ts-node - tsup@8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3))(typescript@5.6.3): + tsup@8.0.2(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3): dependencies: bundle-require: 4.0.2(esbuild@0.19.12) cac: 6.7.14 @@ -29532,7 +29563,7 @@ snapshots: execa: 5.1.1 globby: 11.1.0 joycon: 3.1.1 - postcss-load-config: 4.0.1(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) + postcss-load-config: 4.0.1(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3)) resolve-from: 5.0.0 rollup: 4.14.1 source-map: 0.8.0-beta.0 @@ -29540,12 +29571,12 @@ snapshots: tree-kill: 1.2.2 optionalDependencies: postcss: 8.5.3 - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - supports-color - ts-node - tsup@8.2.4(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0): + tsup@8.2.4(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0): dependencies: bundle-require: 5.0.0(esbuild@0.23.0) cac: 6.7.14 @@ -29565,14 +29596,14 @@ snapshots: tree-kill: 1.2.2 optionalDependencies: postcss: 8.5.3 - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - jiti - supports-color - tsx - yaml - tsup@8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0): + tsup@8.3.0(jiti@2.4.0)(postcss@8.5.3)(tsx@4.19.2)(typescript@5.8.3)(yaml@2.7.0): dependencies: bundle-require: 5.0.0(esbuild@0.23.1) cac: 6.7.14 @@ -29592,7 +29623,7 @@ snapshots: tree-kill: 1.2.2 optionalDependencies: postcss: 8.5.3 - typescript: 5.6.3 + typescript: 5.8.3 transitivePeerDependencies: - jiti - supports-color @@ -29712,6 +29743,8 @@ snapshots: typescript@5.6.3: {} + typescript@5.8.3: {} + ufo@1.5.4: {} uid@2.0.2: @@ -29840,11 +29873,11 @@ snapshots: unpipe@1.0.0: {} - unplugin-vue-router@0.10.8(rollup@4.34.9)(vue-router@4.5.0(vue@3.5.13(typescript@5.6.3)))(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3): + unplugin-vue-router@0.10.8(rollup@4.34.9)(vue-router@4.5.0(vue@3.5.13(typescript@5.8.3)))(vue@3.5.13(typescript@5.8.3))(webpack-sources@3.2.3): dependencies: '@babel/types': 7.26.0 '@rollup/pluginutils': 5.1.0(rollup@4.34.9) - '@vue-macros/common': 1.15.0(rollup@4.34.9)(vue@3.5.13(typescript@5.6.3)) + '@vue-macros/common': 1.15.0(rollup@4.34.9)(vue@3.5.13(typescript@5.8.3)) ast-walker-scope: 0.6.2 chokidar: 3.6.0 fast-glob: 3.3.2 @@ -29857,7 +29890,7 @@ snapshots: unplugin: 1.15.0(webpack-sources@3.2.3) yaml: 2.5.0 optionalDependencies: - vue-router: 4.5.0(vue@3.5.13(typescript@5.6.3)) + vue-router: 4.5.0(vue@3.5.13(typescript@5.8.3)) transitivePeerDependencies: - rollup - vue @@ -29993,9 +30026,9 @@ snapshots: '@types/istanbul-lib-coverage': 2.0.6 convert-source-map: 2.0.0 - valibot@1.0.0-rc.0(typescript@5.6.3): + valibot@1.0.0-rc.0(typescript@5.8.3): optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 vary@1.1.2: {} @@ -30068,7 +30101,7 @@ snapshots: - tsx - yaml - vite-plugin-checker@0.8.0(eslint@9.21.0(jiti@2.4.0))(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): + vite-plugin-checker@0.8.0(eslint@9.21.0(jiti@2.4.0))(optionator@0.9.4)(typescript@5.8.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): dependencies: '@babel/code-frame': 7.26.2 ansi-escapes: 4.3.2 @@ -30088,7 +30121,7 @@ snapshots: optionalDependencies: eslint: 9.21.0(jiti@2.4.0) optionator: 0.9.4 - typescript: 5.6.3 + typescript: 5.8.3 vite-plugin-inspect@0.8.9(@nuxt/kit@3.14.1592(magicast@0.3.5)(rollup@4.34.9))(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): dependencies: @@ -30173,10 +30206,10 @@ snapshots: optionalDependencies: vite: 6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) - vitest@2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@24.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(terser@5.31.3): + vitest@2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@24.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(terser@5.31.3): dependencies: '@vitest/expect': 2.1.4 - '@vitest/mocker': 2.1.4(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) + '@vitest/mocker': 2.1.4(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) '@vitest/pretty-format': 2.1.4 '@vitest/runner': 2.1.4 '@vitest/snapshot': 2.1.4 @@ -30210,10 +30243,10 @@ snapshots: - supports-color - terser - vitest@2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@26.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(terser@5.31.3): + vitest@2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@26.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(terser@5.31.3): dependencies: '@vitest/expect': 2.1.4 - '@vitest/mocker': 2.1.4(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) + '@vitest/mocker': 2.1.4(msw@2.6.4(@types/node@20.17.24)(typescript@5.8.3))(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) '@vitest/pretty-format': 2.1.4 '@vitest/runner': 2.1.4 '@vitest/snapshot': 2.1.4 @@ -30247,10 +30280,10 @@ snapshots: - supports-color - terser - vitest@2.1.4(@edge-runtime/vm@5.0.0)(@types/node@22.7.4)(jsdom@26.0.0)(msw@2.7.0(@types/node@22.7.4)(typescript@5.6.3))(terser@5.31.3): + vitest@2.1.4(@edge-runtime/vm@5.0.0)(@types/node@22.7.4)(jsdom@26.0.0)(msw@2.7.0(@types/node@22.7.4)(typescript@5.8.3))(terser@5.31.3): dependencies: '@vitest/expect': 2.1.4 - '@vitest/mocker': 2.1.4(msw@2.7.0(@types/node@22.7.4)(typescript@5.6.3))(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3)) + '@vitest/mocker': 2.1.4(msw@2.7.0(@types/node@22.7.4)(typescript@5.8.3))(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3)) '@vitest/pretty-format': 2.1.4 '@vitest/runner': 2.1.4 '@vitest/snapshot': 2.1.4 @@ -30356,30 +30389,30 @@ snapshots: vue-devtools-stub@0.1.0: {} - vue-router@4.5.0(vue@3.5.13(typescript@5.6.3)): + vue-router@4.5.0(vue@3.5.13(typescript@5.8.3)): dependencies: '@vue/devtools-api': 6.6.4 - vue: 3.5.13(typescript@5.6.3) + vue: 3.5.13(typescript@5.8.3) - vue@3.3.8(typescript@5.6.3): + vue@3.3.8(typescript@5.8.3): dependencies: '@vue/compiler-dom': 3.3.8 '@vue/compiler-sfc': 3.3.8 '@vue/runtime-dom': 3.3.8 - '@vue/server-renderer': 3.3.8(vue@3.3.8(typescript@5.6.3)) + '@vue/server-renderer': 3.3.8(vue@3.3.8(typescript@5.8.3)) '@vue/shared': 3.3.8 optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 - vue@3.5.13(typescript@5.6.3): + vue@3.5.13(typescript@5.8.3): dependencies: '@vue/compiler-dom': 3.5.13 '@vue/compiler-sfc': 3.5.13 '@vue/runtime-dom': 3.5.13 - '@vue/server-renderer': 3.5.13(vue@3.5.13(typescript@5.6.3)) + '@vue/server-renderer': 3.5.13(vue@3.5.13(typescript@5.8.3)) '@vue/shared': 3.5.13 optionalDependencies: - typescript: 5.6.3 + typescript: 5.8.3 w3c-xmlserializer@5.0.0: dependencies: diff --git a/tools/analyze-downloads/package.json b/tools/analyze-downloads/package.json index 7070b3b38d7e..45ea32f148a6 100644 --- a/tools/analyze-downloads/package.json +++ b/tools/analyze-downloads/package.json @@ -14,6 +14,6 @@ }, "devDependencies": { "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/tools/generate-llms-txt/package.json b/tools/generate-llms-txt/package.json index a30a5d708c72..9e349ed9b866 100644 --- a/tools/generate-llms-txt/package.json +++ b/tools/generate-llms-txt/package.json @@ -12,6 +12,6 @@ }, "devDependencies": { "tsx": "4.19.2", - "typescript": "5.6.3" + "typescript": "5.8.3" } } diff --git a/tools/tsconfig/base.json b/tools/tsconfig/base.json index 582ce0ad3554..7f822833816f 100644 --- a/tools/tsconfig/base.json +++ b/tools/tsconfig/base.json @@ -9,7 +9,7 @@ "forceConsistentCasingInFileNames": true, "inlineSources": false, "isolatedModules": true, - "moduleResolution": "node", + "moduleResolution": "Bundler", "noUnusedLocals": false, "noUnusedParameters": false, "preserveWatchOutput": true, From f2dab6724065140529fe7c8aef6e5cbbcb1d9335 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 17:42:42 +0200 Subject: [PATCH 0041/1307] Version Packages (canary) (#5578) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .changeset/pre.json | 5 +- examples/ai-core/package.json | 46 +-- examples/express/package.json | 4 +- examples/fastify/package.json | 4 +- examples/hono/package.json | 4 +- examples/mcp/package.json | 4 +- examples/nest/package.json | 4 +- examples/next-fastapi/package.json | 6 +- examples/next-google-vertex/package.json | 4 +- examples/next-langchain/package.json | 4 +- .../package.json | 6 +- examples/next-openai-pages/package.json | 6 +- .../next-openai-telemetry-sentry/package.json | 6 +- examples/next-openai-telemetry/package.json | 6 +- .../package.json | 6 +- examples/next-openai/package.json | 20 +- examples/node-http-server/package.json | 4 +- examples/nuxt-openai/package.json | 6 +- examples/sveltekit-openai/package.json | 10 +- packages/ai/CHANGELOG.md | 10 + packages/ai/package.json | 8 +- .../ai/tests/e2e/next-server/CHANGELOG.md | 6 + packages/amazon-bedrock/CHANGELOG.md | 9 + packages/amazon-bedrock/package.json | 6 +- packages/anthropic/CHANGELOG.md | 9 + packages/anthropic/package.json | 6 +- packages/azure/CHANGELOG.md | 10 + packages/azure/package.json | 8 +- packages/cerebras/CHANGELOG.md | 10 + packages/cerebras/package.json | 8 +- packages/cohere/CHANGELOG.md | 9 + packages/cohere/package.json | 6 +- packages/deepinfra/CHANGELOG.md | 11 + packages/deepinfra/package.json | 8 +- packages/deepseek/CHANGELOG.md | 10 + packages/deepseek/package.json | 8 +- packages/fal/CHANGELOG.md | 9 + packages/fal/package.json | 6 +- packages/fireworks/CHANGELOG.md | 10 + packages/fireworks/package.json | 8 +- packages/google-vertex/CHANGELOG.md | 11 + packages/google-vertex/package.json | 10 +- packages/google/CHANGELOG.md | 9 + packages/google/package.json | 6 +- packages/groq/CHANGELOG.md | 9 + packages/groq/package.json | 6 +- packages/luma/CHANGELOG.md | 9 + packages/luma/package.json | 6 +- packages/mistral/CHANGELOG.md | 9 + packages/mistral/package.json | 6 +- packages/openai-compatible/CHANGELOG.md | 9 + packages/openai-compatible/package.json | 6 +- packages/openai/CHANGELOG.md | 9 + packages/openai/package.json | 6 +- packages/perplexity/CHANGELOG.md | 9 + packages/perplexity/package.json | 6 +- packages/provider-utils/CHANGELOG.md | 8 + packages/provider-utils/package.json | 4 +- packages/provider/CHANGELOG.md | 7 + packages/provider/package.json | 2 +- packages/react/CHANGELOG.md | 7 + packages/react/package.json | 6 +- packages/replicate/CHANGELOG.md | 9 + packages/replicate/package.json | 6 +- packages/svelte/CHANGELOG.md | 7 + packages/svelte/package.json | 6 +- packages/togetherai/CHANGELOG.md | 10 + packages/togetherai/package.json | 8 +- packages/ui-utils/CHANGELOG.md | 9 + packages/ui-utils/package.json | 6 +- packages/valibot/CHANGELOG.md | 6 + packages/valibot/package.json | 4 +- packages/vue/CHANGELOG.md | 7 + packages/vue/package.json | 6 +- packages/xai/CHANGELOG.md | 10 + packages/xai/package.json | 8 +- pnpm-lock.yaml | 274 +++++++++--------- 77 files changed, 563 insertions(+), 303 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index ef3762ec8d10..9ab0a367ea93 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -58,6 +58,7 @@ "changesets": [ "angry-poems-learn", "beige-ligers-kneel", + "beige-penguins-greet", "clean-numbers-cover", "cuddly-icons-kick", "eleven-lobsters-rescue", @@ -69,7 +70,9 @@ "silent-nails-taste", "smooth-mirrors-kneel", "tall-rice-flash", + "thick-chairs-remain", "thin-numbers-shave", - "twelve-kids-travel" + "twelve-kids-travel", + "wild-candles-judge" ] } diff --git a/examples/ai-core/package.json b/examples/ai-core/package.json index 4c09a3f88a7f..f4c59a38ae8d 100644 --- a/examples/ai-core/package.json +++ b/examples/ai-core/package.json @@ -3,33 +3,33 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/amazon-bedrock": "3.0.0-canary.1", - "@ai-sdk/anthropic": "2.0.0-canary.1", - "@ai-sdk/azure": "2.0.0-canary.2", - "@ai-sdk/cerebras": "1.0.0-canary.1", - "@ai-sdk/cohere": "2.0.0-canary.1", - "@ai-sdk/deepinfra": "1.0.0-canary.1", - "@ai-sdk/deepseek": "1.0.0-canary.1", - "@ai-sdk/fal": "1.0.0-canary.1", - "@ai-sdk/fireworks": "1.0.0-canary.1", - "@ai-sdk/google": "2.0.0-canary.2", - "@ai-sdk/google-vertex": "3.0.0-canary.2", - "@ai-sdk/groq": "2.0.0-canary.2", - "@ai-sdk/luma": "1.0.0-canary.1", - "@ai-sdk/mistral": "2.0.0-canary.1", - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/perplexity": "2.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/replicate": "1.0.0-canary.1", - "@ai-sdk/togetherai": "1.0.0-canary.1", - "@ai-sdk/xai": "2.0.0-canary.1", - "@ai-sdk/valibot": "1.0.0-canary.2", + "@ai-sdk/amazon-bedrock": "3.0.0-canary.2", + "@ai-sdk/anthropic": "2.0.0-canary.2", + "@ai-sdk/azure": "2.0.0-canary.3", + "@ai-sdk/cerebras": "1.0.0-canary.2", + "@ai-sdk/cohere": "2.0.0-canary.2", + "@ai-sdk/deepinfra": "1.0.0-canary.2", + "@ai-sdk/deepseek": "1.0.0-canary.2", + "@ai-sdk/fal": "1.0.0-canary.2", + "@ai-sdk/fireworks": "1.0.0-canary.2", + "@ai-sdk/google": "2.0.0-canary.3", + "@ai-sdk/google-vertex": "3.0.0-canary.3", + "@ai-sdk/groq": "2.0.0-canary.3", + "@ai-sdk/luma": "1.0.0-canary.2", + "@ai-sdk/mistral": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/perplexity": "2.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/replicate": "1.0.0-canary.2", + "@ai-sdk/togetherai": "1.0.0-canary.2", + "@ai-sdk/xai": "2.0.0-canary.2", + "@ai-sdk/valibot": "1.0.0-canary.3", "@google/generative-ai": "0.21.0", "@opentelemetry/auto-instrumentations-node": "0.54.0", "@opentelemetry/sdk-node": "0.54.2", "@opentelemetry/sdk-trace-node": "1.28.0", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "dotenv": "16.4.5", "image-type": "^5.2.0", "mathjs": "14.0.0", diff --git a/examples/express/package.json b/examples/express/package.json index b1e3f8be8848..48fef040055e 100644 --- a/examples/express/package.json +++ b/examples/express/package.json @@ -7,8 +7,8 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "ai": "5.0.0-canary.3", "dotenv": "16.4.5", "express": "5.0.1" }, diff --git a/examples/fastify/package.json b/examples/fastify/package.json index ea633f91e348..8f06ce8bcb8f 100644 --- a/examples/fastify/package.json +++ b/examples/fastify/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "ai": "5.0.0-canary.3", "dotenv": "16.4.5", "fastify": "5.1.0" }, diff --git a/examples/hono/package.json b/examples/hono/package.json index ceb7e5ace834..a1356a16e83d 100644 --- a/examples/hono/package.json +++ b/examples/hono/package.json @@ -3,9 +3,9 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", "@hono/node-server": "1.13.7", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "dotenv": "16.4.5", "hono": "4.6.9" }, diff --git a/examples/mcp/package.json b/examples/mcp/package.json index 146c77a23819..941219e543ed 100644 --- a/examples/mcp/package.json +++ b/examples/mcp/package.json @@ -12,9 +12,9 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", "@modelcontextprotocol/sdk": "^1.7.0", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "dotenv": "16.4.5", "express": "5.0.1", "zod": "3.23.8" diff --git a/examples/nest/package.json b/examples/nest/package.json index f044317f94f9..858178b94529 100644 --- a/examples/nest/package.json +++ b/examples/nest/package.json @@ -15,11 +15,11 @@ "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", "@nestjs/common": "^10.4.15", "@nestjs/core": "^10.0.0", "@nestjs/platform-express": "^10.4.9", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1" }, diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index 0766f3f6548a..fa74f66ab028 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -11,9 +11,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/ui-utils": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", - "ai": "5.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", + "ai": "5.0.0-canary.3", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-google-vertex/package.json b/examples/next-google-vertex/package.json index 03f8ee673903..945c0c039c60 100644 --- a/examples/next-google-vertex/package.json +++ b/examples/next-google-vertex/package.json @@ -9,8 +9,8 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/google-vertex": "3.0.0-canary.2", - "ai": "5.0.0-canary.2", + "@ai-sdk/google-vertex": "3.0.0-canary.3", + "ai": "5.0.0-canary.3", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-langchain/package.json b/examples/next-langchain/package.json index f1a3eb05706c..9bb911adb485 100644 --- a/examples/next-langchain/package.json +++ b/examples/next-langchain/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/react": "2.0.0-canary.1", + "@ai-sdk/react": "2.0.0-canary.2", "@langchain/openai": "0.0.28", "@langchain/core": "0.1.63", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "langchain": "0.1.36", "next": "latest", "react": "^18", diff --git a/examples/next-openai-kasada-bot-protection/package.json b/examples/next-openai-kasada-bot-protection/package.json index da7dd7974857..35cfa435e1ff 100644 --- a/examples/next-openai-kasada-bot-protection/package.json +++ b/examples/next-openai-kasada-bot-protection/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", "@vercel/functions": "latest", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai-pages/package.json b/examples/next-openai-pages/package.json index af940ba35f1d..deb0b001b3eb 100644 --- a/examples/next-openai-pages/package.json +++ b/examples/next-openai-pages/package.json @@ -9,9 +9,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", - "ai": "5.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", + "ai": "5.0.0-canary.3", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry-sentry/package.json b/examples/next-openai-telemetry-sentry/package.json index 61937b94ebd1..e6f92690c929 100644 --- a/examples/next-openai-telemetry-sentry/package.json +++ b/examples/next-openai-telemetry-sentry/package.json @@ -9,15 +9,15 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@opentelemetry/sdk-logs": "0.55.0", "@sentry/nextjs": "^8.42.0", "@sentry/opentelemetry": "8.22.0", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry/package.json b/examples/next-openai-telemetry/package.json index 96fee85584f5..5a90d8d61553 100644 --- a/examples/next-openai-telemetry/package.json +++ b/examples/next-openai-telemetry/package.json @@ -9,13 +9,13 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/sdk-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-upstash-rate-limits/package.json b/examples/next-openai-upstash-rate-limits/package.json index 8e9799f0f750..4fbe2a3583f1 100644 --- a/examples/next-openai-upstash-rate-limits/package.json +++ b/examples/next-openai-upstash-rate-limits/package.json @@ -9,11 +9,11 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", "@upstash/ratelimit": "^0.4.3", "@vercel/kv": "^0.2.2", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 6268c42ada32..f2384ceb1142 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -9,17 +9,17 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/anthropic": "2.0.0-canary.1", - "@ai-sdk/deepseek": "1.0.0-canary.1", - "@ai-sdk/fireworks": "1.0.0-canary.1", - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/google": "2.0.0-canary.2", - "@ai-sdk/google-vertex": "3.0.0-canary.2", - "@ai-sdk/perplexity": "2.0.0-canary.1", - "@ai-sdk/ui-utils": "2.0.0-canary.1", - "@ai-sdk/react": "2.0.0-canary.1", + "@ai-sdk/anthropic": "2.0.0-canary.2", + "@ai-sdk/deepseek": "1.0.0-canary.2", + "@ai-sdk/fireworks": "1.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/google": "2.0.0-canary.3", + "@ai-sdk/google-vertex": "3.0.0-canary.3", + "@ai-sdk/perplexity": "2.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.2", "@vercel/blob": "^0.26.0", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/node-http-server/package.json b/examples/node-http-server/package.json index 9858efcbdd5f..dc1059c2b35b 100644 --- a/examples/node-http-server/package.json +++ b/examples/node-http-server/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "ai": "5.0.0-canary.3", "dotenv": "16.4.5", "zod": "3.23.8", "zod-to-json-schema": "3.23.5" diff --git a/examples/nuxt-openai/package.json b/examples/nuxt-openai/package.json index a6f3053d38d2..ebfca76cd68c 100644 --- a/examples/nuxt-openai/package.json +++ b/examples/nuxt-openai/package.json @@ -9,9 +9,9 @@ "postinstall": "nuxt prepare" }, "dependencies": { - "@ai-sdk/vue": "2.0.0-canary.1", - "@ai-sdk/openai": "2.0.0-canary.1", - "ai": "5.0.0-canary.2", + "@ai-sdk/vue": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.2", + "ai": "5.0.0-canary.3", "zod": "3.23.8" }, "devDependencies": { diff --git a/examples/sveltekit-openai/package.json b/examples/sveltekit-openai/package.json index 29a1b2f76794..ea52dc39a68a 100644 --- a/examples/sveltekit-openai/package.json +++ b/examples/sveltekit-openai/package.json @@ -16,16 +16,16 @@ }, "type": "module", "devDependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.1", - "@ai-sdk/svelte": "3.0.0-canary.1", - "@ai-sdk/ui-utils": "2.0.0-canary.1", + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/svelte": "3.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2", "@eslint/compat": "^1.2.5", "@eslint/js": "^9.18.0", "@sveltejs/adapter-vercel": "^5.5.2", "@sveltejs/kit": "^2.16.0", "@sveltejs/vite-plugin-svelte": "^5.0.0", - "ai": "5.0.0-canary.2", + "ai": "5.0.0-canary.3", "autoprefixer": "^10.4.20", "bits-ui": "^1.3.9", "clsx": "^2.1.1", diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 970db033de09..5a2b43e3eb6b 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -1,5 +1,15 @@ # ai +## 5.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + - @ai-sdk/ui-utils@2.0.0-canary.2 + ## 5.0.0-canary.2 ### Patch Changes diff --git a/packages/ai/package.json b/packages/ai/package.json index af9f4254f660..924f94cfe142 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "ai", - "version": "5.0.0-canary.2", + "version": "5.0.0-canary.3", "description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript", "license": "Apache-2.0", "sideEffects": false, @@ -59,9 +59,9 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1", - "@ai-sdk/ui-utils": "2.0.0-canary.1", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2", "@opentelemetry/api": "1.9.0", "jsondiffpatch": "0.6.0" }, diff --git a/packages/ai/tests/e2e/next-server/CHANGELOG.md b/packages/ai/tests/e2e/next-server/CHANGELOG.md index 5840ac753dd7..66a4fc665081 100644 --- a/packages/ai/tests/e2e/next-server/CHANGELOG.md +++ b/packages/ai/tests/e2e/next-server/CHANGELOG.md @@ -4,6 +4,12 @@ ### Patch Changes +- ai@5.0.0-canary.3 + +## 0.0.1-canary.0 + +### Patch Changes + - Updated dependencies [bd398e4] - ai@5.0.0-canary.2 diff --git a/packages/amazon-bedrock/CHANGELOG.md b/packages/amazon-bedrock/CHANGELOG.md index 97f4f8cbd3c4..0c3189462f18 100644 --- a/packages/amazon-bedrock/CHANGELOG.md +++ b/packages/amazon-bedrock/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/amazon-bedrock +## 3.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 3.0.0-canary.1 ### Patch Changes diff --git a/packages/amazon-bedrock/package.json b/packages/amazon-bedrock/package.json index f46ccc9a1e56..a9b496e4fb56 100644 --- a/packages/amazon-bedrock/package.json +++ b/packages/amazon-bedrock/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/amazon-bedrock", - "version": "3.0.0-canary.1", + "version": "3.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" diff --git a/packages/anthropic/CHANGELOG.md b/packages/anthropic/CHANGELOG.md index d8291776725b..2b1ed92d9c57 100644 --- a/packages/anthropic/CHANGELOG.md +++ b/packages/anthropic/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/anthropic +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/anthropic/package.json b/packages/anthropic/package.json index 123ebfd34e6c..199f541d4b3b 100644 --- a/packages/anthropic/package.json +++ b/packages/anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/anthropic", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,8 +37,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index 961acf277e17..cddca167afe6 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/azure +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai@2.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index 43ff8a277688..bcaaa7f5e6c1 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,9 +31,9 @@ } }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cerebras/CHANGELOG.md b/packages/cerebras/CHANGELOG.md index 9ad0d71029f7..9136348e4c5a 100644 --- a/packages/cerebras/CHANGELOG.md +++ b/packages/cerebras/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/cerebras +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/cerebras/package.json b/packages/cerebras/package.json index 4c60da85ad06..23d07aebae92 100644 --- a/packages/cerebras/package.json +++ b/packages/cerebras/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cerebras", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cohere/CHANGELOG.md b/packages/cohere/CHANGELOG.md index 73c315a548fd..b95c3dcc4396 100644 --- a/packages/cohere/CHANGELOG.md +++ b/packages/cohere/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/cohere +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/cohere/package.json b/packages/cohere/package.json index b77507e46430..ddebede1e18c 100644 --- a/packages/cohere/package.json +++ b/packages/cohere/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cohere", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepinfra/CHANGELOG.md b/packages/deepinfra/CHANGELOG.md index c6eb5426ebf2..27ad85010af8 100644 --- a/packages/deepinfra/CHANGELOG.md +++ b/packages/deepinfra/CHANGELOG.md @@ -1,5 +1,16 @@ # @ai-sdk/deepinfra +## 1.0.0-canary.2 + +### Patch Changes + +- 7677477: feat (providers/deepinfra): add llama 4 models +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/deepinfra/package.json b/packages/deepinfra/package.json index 40f02f504bdf..82c5562e5cd3 100644 --- a/packages/deepinfra/package.json +++ b/packages/deepinfra/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepinfra", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepseek/CHANGELOG.md b/packages/deepseek/CHANGELOG.md index c288796d44a3..6ec3ad950894 100644 --- a/packages/deepseek/CHANGELOG.md +++ b/packages/deepseek/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/deepseek +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/deepseek/package.json b/packages/deepseek/package.json index 3251e276e7ea..284c9c8573b8 100644 --- a/packages/deepseek/package.json +++ b/packages/deepseek/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepseek", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fal/CHANGELOG.md b/packages/fal/CHANGELOG.md index e20646e82b40..5631b24c2b15 100644 --- a/packages/fal/CHANGELOG.md +++ b/packages/fal/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/fal +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/fal/package.json b/packages/fal/package.json index 0281ddd6d78c..69f8f46b0fe2 100644 --- a/packages/fal/package.json +++ b/packages/fal/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fal", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fireworks/CHANGELOG.md b/packages/fireworks/CHANGELOG.md index 85cbb7aff9f4..a8528ebe09db 100644 --- a/packages/fireworks/CHANGELOG.md +++ b/packages/fireworks/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/fireworks +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/fireworks/package.json b/packages/fireworks/package.json index 3e41302b03e3..4ca5c76e47b8 100644 --- a/packages/fireworks/package.json +++ b/packages/fireworks/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fireworks", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/google-vertex/CHANGELOG.md b/packages/google-vertex/CHANGELOG.md index 8110b81b1afc..01a571f6834f 100644 --- a/packages/google-vertex/CHANGELOG.md +++ b/packages/google-vertex/CHANGELOG.md @@ -1,5 +1,16 @@ # @ai-sdk/google-vertex +## 3.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/anthropic@2.0.0-canary.2 + - @ai-sdk/google@2.0.0-canary.3 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 3.0.0-canary.2 ### Patch Changes diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index fe626f14cf43..268c001d7bbf 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google-vertex", - "version": "3.0.0-canary.2", + "version": "3.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -49,10 +49,10 @@ } }, "dependencies": { - "@ai-sdk/anthropic": "2.0.0-canary.1", - "@ai-sdk/google": "2.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/anthropic": "2.0.0-canary.2", + "@ai-sdk/google": "2.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", "google-auth-library": "^9.15.0" }, "devDependencies": { diff --git a/packages/google/CHANGELOG.md b/packages/google/CHANGELOG.md index d59bc3a5f162..902bbf056b3b 100644 --- a/packages/google/CHANGELOG.md +++ b/packages/google/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/google +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/google/package.json b/packages/google/package.json index eedf4290c7f0..443f0eb97e5c 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/groq/CHANGELOG.md b/packages/groq/CHANGELOG.md index 72d9de98e85f..efc8dc247844 100644 --- a/packages/groq/CHANGELOG.md +++ b/packages/groq/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/groq +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/groq/package.json b/packages/groq/package.json index 045d1f1ecdd2..d8dc0148190d 100644 --- a/packages/groq/package.json +++ b/packages/groq/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/groq", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/luma/CHANGELOG.md b/packages/luma/CHANGELOG.md index 542fb0bad325..70a3f5c2df27 100644 --- a/packages/luma/CHANGELOG.md +++ b/packages/luma/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/luma +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/luma/package.json b/packages/luma/package.json index 6568023edb2f..7392b8209004 100644 --- a/packages/luma/package.json +++ b/packages/luma/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/luma", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/mistral/CHANGELOG.md b/packages/mistral/CHANGELOG.md index 345d6733e7c3..023782c0a67c 100644 --- a/packages/mistral/CHANGELOG.md +++ b/packages/mistral/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/mistral +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/mistral/package.json b/packages/mistral/package.json index 122df0696d0a..85a2726d6cf2 100644 --- a/packages/mistral/package.json +++ b/packages/mistral/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/mistral", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai-compatible/CHANGELOG.md b/packages/openai-compatible/CHANGELOG.md index bbb3eed81019..b911e6fc791b 100644 --- a/packages/openai-compatible/CHANGELOG.md +++ b/packages/openai-compatible/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/openai-compatible +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/openai-compatible/package.json b/packages/openai-compatible/package.json index f9b28eaf9ac2..8d412f348986 100644 --- a/packages/openai-compatible/package.json +++ b/packages/openai-compatible/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai-compatible", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai/CHANGELOG.md b/packages/openai/CHANGELOG.md index afd5f37f537d..5954dacabeb0 100644 --- a/packages/openai/CHANGELOG.md +++ b/packages/openai/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/openai +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/openai/package.json b/packages/openai/package.json index 40a805e0a592..1b08d27f63a0 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/perplexity/CHANGELOG.md b/packages/perplexity/CHANGELOG.md index 152560f423bd..d21a577dc0e6 100644 --- a/packages/perplexity/CHANGELOG.md +++ b/packages/perplexity/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/perplexity +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/perplexity/package.json b/packages/perplexity/package.json index 294a4cf03479..d19e58753996 100644 --- a/packages/perplexity/package.json +++ b/packages/perplexity/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/perplexity", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/provider-utils/CHANGELOG.md b/packages/provider-utils/CHANGELOG.md index 9a1b040baccf..4759f5e441ab 100644 --- a/packages/provider-utils/CHANGELOG.md +++ b/packages/provider-utils/CHANGELOG.md @@ -1,5 +1,13 @@ # @ai-sdk/provider-utils +## 3.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + ## 3.0.0-canary.1 ### Patch Changes diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index f50ffde89dbf..53fbd2dcb2c8 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider-utils", - "version": "3.0.0-canary.1", + "version": "3.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,7 +37,7 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", + "@ai-sdk/provider": "2.0.0-canary.1", "secure-json-parse": "^2.7.0" }, "devDependencies": { diff --git a/packages/provider/CHANGELOG.md b/packages/provider/CHANGELOG.md index d5305a386dd8..1644ce63e9ab 100644 --- a/packages/provider/CHANGELOG.md +++ b/packages/provider/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/provider +## 2.0.0-canary.1 + +### Major Changes + +- c57e248: chore (provider): remove mode +- 33f4a6a: chore (provider): rename providerMetadata inputs to providerOptions + ## 2.0.0-canary.0 ### Major Changes diff --git a/packages/provider/package.json b/packages/provider/package.json index 32f2cad1da87..e305411faca3 100644 --- a/packages/provider/package.json +++ b/packages/provider/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider", - "version": "2.0.0-canary.0", + "version": "2.0.0-canary.1", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/react/CHANGELOG.md b/packages/react/CHANGELOG.md index 5231598e92ea..93beba822479 100644 --- a/packages/react/CHANGELOG.md +++ b/packages/react/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/react +## 2.0.0-canary.2 + +### Patch Changes + +- @ai-sdk/provider-utils@3.0.0-canary.2 +- @ai-sdk/ui-utils@2.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/react/package.json b/packages/react/package.json index 1f7d2a86865c..77e390f77e7d 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/react", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.1", - "@ai-sdk/ui-utils": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2", "swr": "^2.2.5", "throttleit": "2.1.0" }, diff --git a/packages/replicate/CHANGELOG.md b/packages/replicate/CHANGELOG.md index 460450d6c028..63661c7dbf22 100644 --- a/packages/replicate/CHANGELOG.md +++ b/packages/replicate/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/replicate +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/replicate/package.json b/packages/replicate/package.json index fade66cc8ffc..47af0b8ea9a5 100644 --- a/packages/replicate/package.json +++ b/packages/replicate/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/replicate", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/svelte/CHANGELOG.md b/packages/svelte/CHANGELOG.md index 615d33c3f250..6577d86f0cbf 100644 --- a/packages/svelte/CHANGELOG.md +++ b/packages/svelte/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/svelte +## 3.0.0-canary.2 + +### Patch Changes + +- @ai-sdk/provider-utils@3.0.0-canary.2 +- @ai-sdk/ui-utils@2.0.0-canary.2 + ## 3.0.0-canary.1 ### Patch Changes diff --git a/packages/svelte/package.json b/packages/svelte/package.json index 10b2a0d07db5..262cd6666678 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/svelte", - "version": "3.0.0-canary.1", + "version": "3.0.0-canary.2", "license": "Apache-2.0", "scripts": { "build": "pnpm prepack", @@ -51,8 +51,8 @@ } }, "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.1", - "@ai-sdk/ui-utils": "2.0.0-canary.1" + "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2" }, "devDependencies": { "@eslint/compat": "^1.2.5", diff --git a/packages/togetherai/CHANGELOG.md b/packages/togetherai/CHANGELOG.md index c163d5bce55e..741563541183 100644 --- a/packages/togetherai/CHANGELOG.md +++ b/packages/togetherai/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/togetherai +## 1.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 1.0.0-canary.1 ### Patch Changes diff --git a/packages/togetherai/package.json b/packages/togetherai/package.json index 5215bcce1886..ff3b81dc508b 100644 --- a/packages/togetherai/package.json +++ b/packages/togetherai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/togetherai", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/ui-utils/CHANGELOG.md b/packages/ui-utils/CHANGELOG.md index c3b6a5de862b..050c9c4da5e5 100644 --- a/packages/ui-utils/CHANGELOG.md +++ b/packages/ui-utils/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/ui-utils +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index c32450d23eb5..9f3aa26e484e 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/ui-utils", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", "zod-to-json-schema": "^3.24.1" }, "devDependencies": { diff --git a/packages/valibot/CHANGELOG.md b/packages/valibot/CHANGELOG.md index fa868e583556..ebf897a183e1 100644 --- a/packages/valibot/CHANGELOG.md +++ b/packages/valibot/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/valibot +## 1.0.0-canary.3 + +### Patch Changes + +- ai@5.0.0-canary.3 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/valibot/package.json b/packages/valibot/package.json index 8e4ed037e045..3ab7957a42b3 100644 --- a/packages/valibot/package.json +++ b/packages/valibot/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/valibot", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -27,7 +27,7 @@ } }, "dependencies": { - "ai": "5.0.0-canary.2" + "ai": "5.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/vue/CHANGELOG.md b/packages/vue/CHANGELOG.md index fa8bad444e0b..9b7adeaa39d9 100644 --- a/packages/vue/CHANGELOG.md +++ b/packages/vue/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/vue +## 2.0.0-canary.2 + +### Patch Changes + +- @ai-sdk/provider-utils@3.0.0-canary.2 +- @ai-sdk/ui-utils@2.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/vue/package.json b/packages/vue/package.json index e5c4f7efc7a3..44c9a57331e2 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/vue", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.1", - "@ai-sdk/ui-utils": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/ui-utils": "2.0.0-canary.2", "swrv": "^1.0.4" }, "devDependencies": { diff --git a/packages/xai/CHANGELOG.md b/packages/xai/CHANGELOG.md index f2a9fec66b76..fca755a5d9cb 100644 --- a/packages/xai/CHANGELOG.md +++ b/packages/xai/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/xai +## 2.0.0-canary.2 + +### Patch Changes + +- Updated dependencies [c57e248] +- Updated dependencies [33f4a6a] + - @ai-sdk/provider@2.0.0-canary.1 + - @ai-sdk/openai-compatible@1.0.0-canary.2 + - @ai-sdk/provider-utils@3.0.0-canary.2 + ## 2.0.0-canary.1 ### Patch Changes diff --git a/packages/xai/package.json b/packages/xai/package.json index 3ac4e3f03d39..54968246730b 100644 --- a/packages/xai/package.json +++ b/packages/xai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/xai", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.1", - "@ai-sdk/provider": "2.0.0-canary.0", - "@ai-sdk/provider-utils": "3.0.0-canary.1" + "@ai-sdk/openai-compatible": "1.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9cdde8c2e92c..ac0d96d5e0ae 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,70 +60,70 @@ importers: examples/ai-core: dependencies: '@ai-sdk/amazon-bedrock': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../../packages/amazon-bedrock '@ai-sdk/anthropic': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/anthropic '@ai-sdk/azure': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/azure '@ai-sdk/cerebras': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/cerebras '@ai-sdk/cohere': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/cohere '@ai-sdk/deepinfra': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/deepinfra '@ai-sdk/deepseek': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/deepseek '@ai-sdk/fal': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/fal '@ai-sdk/fireworks': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../../packages/google-vertex '@ai-sdk/groq': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/groq '@ai-sdk/luma': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/luma '@ai-sdk/mistral': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/mistral '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/openai-compatible '@ai-sdk/perplexity': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/perplexity '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../../packages/provider '@ai-sdk/replicate': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/replicate '@ai-sdk/togetherai': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/togetherai '@ai-sdk/valibot': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/valibot '@ai-sdk/xai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/xai '@google/generative-ai': specifier: 0.21.0 @@ -138,7 +138,7 @@ importers: specifier: 1.28.0 version: 1.28.0(@opentelemetry/api@1.9.0) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -178,10 +178,10 @@ importers: examples/express: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -206,10 +206,10 @@ importers: examples/fastify: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -231,13 +231,13 @@ importers: examples/hono: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@hono/node-server': specifier: 1.13.7 version: 1.13.7(hono@4.6.9) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -259,13 +259,13 @@ importers: examples/mcp: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@modelcontextprotocol/sdk': specifier: ^1.7.0 version: 1.7.0 ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -293,7 +293,7 @@ importers: examples/nest: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@nestjs/common': specifier: ^10.4.15 @@ -305,7 +305,7 @@ importers: specifier: ^10.4.9 version: 10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai reflect-metadata: specifier: ^0.2.0 @@ -381,13 +381,13 @@ importers: examples/next-fastapi: dependencies: '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/ui-utils ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -436,10 +436,10 @@ importers: examples/next-google-vertex: dependencies: '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../../packages/google-vertex ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -479,7 +479,7 @@ importers: examples/next-langchain: dependencies: '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@langchain/core': specifier: 0.1.63 @@ -488,7 +488,7 @@ importers: specifier: 0.0.28 version: 0.0.28 ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai langchain: specifier: 0.1.36 @@ -534,37 +534,37 @@ importers: examples/next-openai: dependencies: '@ai-sdk/anthropic': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/anthropic '@ai-sdk/deepseek': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/deepseek '@ai-sdk/fireworks': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../../packages/google-vertex '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/perplexity': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/perplexity '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/ui-utils '@vercel/blob': specifier: ^0.26.0 version: 0.26.0 ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai next: specifier: latest @@ -616,16 +616,16 @@ importers: examples/next-openai-kasada-bot-protection: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@vercel/functions': specifier: latest version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0)) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai next: specifier: latest @@ -671,13 +671,13 @@ importers: examples/next-openai-pages: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai next: specifier: latest @@ -726,10 +726,10 @@ importers: examples/next-openai-telemetry: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -744,7 +744,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.29.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai next: specifier: latest @@ -793,10 +793,10 @@ importers: examples/next-openai-telemetry-sentry: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -817,7 +817,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.28.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai next: specifier: latest @@ -866,10 +866,10 @@ importers: examples/next-openai-upstash-rate-limits: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/react '@upstash/ratelimit': specifier: ^0.4.3 @@ -878,7 +878,7 @@ importers: specifier: ^0.2.2 version: 0.2.4 ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai next: specifier: latest @@ -924,10 +924,10 @@ importers: examples/node-http-server: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -952,13 +952,13 @@ importers: examples/nuxt-openai: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/vue': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/vue ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai zod: specifier: 3.23.8 @@ -1007,16 +1007,16 @@ importers: examples/sveltekit-openai: devDependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/openai '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../../packages/provider-utils '@ai-sdk/svelte': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../../packages/svelte '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/ui-utils '@eslint/compat': specifier: ^1.2.5 @@ -1034,7 +1034,7 @@ importers: specifier: ^5.0.0 version: 5.0.3(svelte@5.22.4)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../../packages/ai autoprefixer: specifier: ^10.4.20 @@ -1091,13 +1091,13 @@ importers: packages/ai: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../ui-utils '@opentelemetry/api': specifier: 1.9.0 @@ -1167,10 +1167,10 @@ importers: packages/amazon-bedrock: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils '@smithy/eventstream-codec': specifier: ^4.0.1 @@ -1201,10 +1201,10 @@ importers: packages/anthropic: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1226,13 +1226,13 @@ importers: packages/azure: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../openai '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1254,13 +1254,13 @@ importers: packages/cerebras: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1331,10 +1331,10 @@ importers: packages/cohere: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1356,13 +1356,13 @@ importers: packages/deepinfra: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1384,13 +1384,13 @@ importers: packages/deepseek: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1412,10 +1412,10 @@ importers: packages/fal: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1437,13 +1437,13 @@ importers: packages/fireworks: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1465,10 +1465,10 @@ importers: packages/google: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1490,16 +1490,16 @@ importers: packages/google-vertex: dependencies: '@ai-sdk/anthropic': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../anthropic '@ai-sdk/google': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../google '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils google-auth-library: specifier: ^9.15.0 @@ -1524,10 +1524,10 @@ importers: packages/groq: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1549,10 +1549,10 @@ importers: packages/luma: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1574,10 +1574,10 @@ importers: packages/mistral: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1599,10 +1599,10 @@ importers: packages/openai: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1624,10 +1624,10 @@ importers: packages/openai-compatible: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1649,10 +1649,10 @@ importers: packages/perplexity: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1696,7 +1696,7 @@ importers: packages/provider-utils: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider secure-json-parse: specifier: ^2.7.0 @@ -1724,10 +1724,10 @@ importers: packages/react: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../ui-utils react: specifier: ^18 || ^19 || ^19.0.0-rc @@ -1788,10 +1788,10 @@ importers: packages/replicate: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1813,10 +1813,10 @@ importers: packages/svelte: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../ui-utils devDependencies: '@eslint/compat': @@ -1877,13 +1877,13 @@ importers: packages/togetherai: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': @@ -1905,10 +1905,10 @@ importers: packages/ui-utils: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils zod-to-json-schema: specifier: ^3.24.1 @@ -1942,7 +1942,7 @@ importers: specifier: ^1.0.0-rc.0 || ^1.0.0 version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.8.3)) ai: - specifier: 5.0.0-canary.2 + specifier: 5.0.0-canary.3 version: link:../ai devDependencies: '@types/node': @@ -1964,10 +1964,10 @@ importers: packages/vue: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../ui-utils swrv: specifier: ^1.0.4 @@ -2019,13 +2019,13 @@ importers: packages/xai: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.0 + specifier: 2.0.0-canary.1 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.1 + specifier: 3.0.0-canary.2 version: link:../provider-utils devDependencies: '@types/node': From 225f087d180556b988de2bcc4a512f08b9d2c7f1 Mon Sep 17 00:00:00 2001 From: choi sung keun <86150470+cgoinglove@users.noreply.github.com> Date: Tue, 8 Apr 2025 02:04:41 +0900 Subject: [PATCH 0042/1307] fix (ai/mcp): prevent mutation of customEnv (#5583) --- .changeset/fix-env-mutation.md | 5 +++++ packages/ai/mcp-stdio/get-environment.test.ts | 13 +++++++++++++ packages/ai/mcp-stdio/get-environment.ts | 2 +- 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 .changeset/fix-env-mutation.md create mode 100644 packages/ai/mcp-stdio/get-environment.test.ts diff --git a/.changeset/fix-env-mutation.md b/.changeset/fix-env-mutation.md new file mode 100644 index 000000000000..92454ae660df --- /dev/null +++ b/.changeset/fix-env-mutation.md @@ -0,0 +1,5 @@ +--- +'@ai/core': patch +--- + +fix (ai/mcp): prevent mutation of customEnv diff --git a/packages/ai/mcp-stdio/get-environment.test.ts b/packages/ai/mcp-stdio/get-environment.test.ts new file mode 100644 index 000000000000..1ea0bb900766 --- /dev/null +++ b/packages/ai/mcp-stdio/get-environment.test.ts @@ -0,0 +1,13 @@ +import { describe, it, expect } from 'vitest'; +import { getEnvironment } from './get-environment'; + +describe('getEnvironment', () => { + it('should not mutate the original custom environment object', () => { + const customEnv = { CUSTOM_VAR: 'custom_value' }; + + const result = getEnvironment(customEnv); + + expect(customEnv).toStrictEqual({ CUSTOM_VAR: 'custom_value' }); + expect(result).not.toBe(customEnv); + }); +}); diff --git a/packages/ai/mcp-stdio/get-environment.ts b/packages/ai/mcp-stdio/get-environment.ts index 843d7edb41e9..d22b7f854caf 100644 --- a/packages/ai/mcp-stdio/get-environment.ts +++ b/packages/ai/mcp-stdio/get-environment.ts @@ -24,7 +24,7 @@ export function getEnvironment( ] : ['HOME', 'LOGNAME', 'PATH', 'SHELL', 'TERM', 'USER']; - const env: Record = customEnv ?? {}; + const env: Record = customEnv ? { ...customEnv } : {}; for (const key of DEFAULT_INHERITED_ENV_VARS) { const value = globalThis.process.env[key]; From e907dfc2717621be36c85a9f343f2d27a4eb098d Mon Sep 17 00:00:00 2001 From: Matt <77928207+mattzcarey@users.noreply.github.com> Date: Mon, 7 Apr 2025 18:55:32 +0100 Subject: [PATCH 0043/1307] docs: add stackone toolset (#5585) --- content/docs/02-foundations/04-tools.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/content/docs/02-foundations/04-tools.mdx b/content/docs/02-foundations/04-tools.mdx index ac8a2acf3338..e56ef4745d5a 100644 --- a/content/docs/02-foundations/04-tools.mdx +++ b/content/docs/02-foundations/04-tools.mdx @@ -94,6 +94,7 @@ There are several providers that offer pre-built tools as **toolkits** that you - **[agentic](https://github.com/transitive-bullshit/agentic)** - A collection of 20+ tools. Most tools connect to access external APIs such as [Exa](https://exa.ai/) or [E2B](https://e2b.dev/). - **[browserbase](https://docs.browserbase.com/integrations/vercel-ai/introduction)** - Browser tool that runs a headless browser - **[Stripe agent tools](https://docs.stripe.com/agents)** - Tools for interacting with Stripe. +- **[StackOne ToolSet](https://docs.stackone.com/agents)** - Agentic integrations for hundreds of [enterprise SaaS](https://www.stackone.com/integrations) - **[Toolhouse](https://docs.toolhouse.ai/toolhouse/using-vercel-ai)** - AI function-calling in 3 lines of code for over 25 different actions. - **[Agent Tools](https://ai-sdk-agents.vercel.app/?item=introduction)** - A collection of tools for agents. - **[AI Tool Maker](https://github.com/nihaocami/ai-tool-maker)** - A CLI utility to generate AI SDK tools from OpenAPI specs. From 7027de7ed16548631da1bc4c4b03279c13f8e714 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 8 Apr 2025 07:33:59 +0200 Subject: [PATCH 0044/1307] fix: changeset --- .changeset/fix-env-mutation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changeset/fix-env-mutation.md b/.changeset/fix-env-mutation.md index 92454ae660df..86cd47b5e2d3 100644 --- a/.changeset/fix-env-mutation.md +++ b/.changeset/fix-env-mutation.md @@ -1,5 +1,5 @@ --- -'@ai/core': patch +'ai': patch --- fix (ai/mcp): prevent mutation of customEnv From 3a3f013b2548bfb6ee7646d395eeb406d23244a8 Mon Sep 17 00:00:00 2001 From: faiz-gear <70053309+faiz-gear@users.noreply.github.com> Date: Tue, 8 Apr 2025 14:37:58 +0800 Subject: [PATCH 0045/1307] fix(docs): correct Completions API example by using openai() instead of openai.responses() (#5589) --- content/docs/02-guides/19-openai-responses.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/02-guides/19-openai-responses.mdx b/content/docs/02-guides/19-openai-responses.mdx index f53fdfe71e2e..928fc2ded153 100644 --- a/content/docs/02-guides/19-openai-responses.mdx +++ b/content/docs/02-guides/19-openai-responses.mdx @@ -180,7 +180,7 @@ import { openai } from '@ai-sdk/openai'; // Completions API const { text } = await generateText({ - model: openai.responses('gpt-4o', { parallelToolCalls: false }), + model: openai('gpt-4o', { parallelToolCalls: false }), prompt: 'Explain the concept of quantum entanglement.', }); From a166433b260b8aa0b4fdff34c2dce32b4800ce9e Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 8 Apr 2025 09:07:51 +0200 Subject: [PATCH 0046/1307] feat: add transcription with experimental_transcribe (#5593) Co-authored-by: Hayden Bleasel Co-authored-by: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> --- .changeset/happy-kangaroos-roll.md | 8 + .../docs/03-ai-sdk-core/36-transcription.mdx | 153 +++++++ content/docs/03-ai-sdk-core/index.mdx | 5 + .../01-ai-sdk-core/11-transcribe.mdx | 138 ++++++ .../07-reference/01-ai-sdk-core/index.mdx | 5 + .../01-ai-sdk-providers/02-openai.mdx | 64 +++ .../ai-core/src/transcribe/openai-string.ts | 23 + examples/ai-core/src/transcribe/openai-url.ts | 23 + examples/ai-core/src/transcribe/openai.ts | 21 + .../ai/core/generate-image/generate-image.ts | 11 +- packages/ai/core/index.ts | 1 + .../convert-to-language-model-prompt.ts | 11 +- .../core/test/mock-transcription-model-v1.ts | 24 ++ packages/ai/core/transcribe/index.ts | 2 + .../ai/core/transcribe/transcribe-result.ts | 60 +++ .../ai/core/transcribe/transcribe.test.ts | 229 ++++++++++ packages/ai/core/transcribe/transcribe.ts | 150 +++++++ .../transcription-model-response-metadata.ts | 16 + packages/ai/core/types/transcription-model.ts | 15 + .../core/util/detect-image-mimetype.test.ts | 138 ------ .../ai/core/util/detect-image-mimetype.ts | 68 --- packages/ai/core/util/detect-mimetype.test.ts | 400 ++++++++++++++++++ packages/ai/core/util/detect-mimetype.ts | 105 +++++ .../errors/no-transcript-generated-error.ts | 20 + packages/openai/src/internal/index.ts | 2 + packages/openai/src/openai-provider.ts | 19 + .../src/openai-transcription-model.test.ts | 182 ++++++++ .../openai/src/openai-transcription-model.ts | 259 ++++++++++++ .../src/openai-transcription-settings.ts | 34 ++ packages/openai/src/transcript-test.mp3 | Bin 0 -> 40169 bytes packages/provider-utils/src/post-to-api.ts | 30 ++ .../provider-utils/src/test/test-server.ts | 16 + packages/provider/src/index.ts | 1 + .../provider/src/provider/v1/provider-v1.ts | 12 +- .../provider/src/transcription-model/index.ts | 1 + .../src/transcription-model/v1/index.ts | 3 + .../v1/transcription-model-v1-call-options.ts | 46 ++ .../v1/transcription-model-v1-call-warning.ts | 16 + .../v1/transcription-model-v1.ts | 116 +++++ 39 files changed, 2216 insertions(+), 211 deletions(-) create mode 100644 .changeset/happy-kangaroos-roll.md create mode 100644 content/docs/03-ai-sdk-core/36-transcription.mdx create mode 100644 content/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx create mode 100644 examples/ai-core/src/transcribe/openai-string.ts create mode 100644 examples/ai-core/src/transcribe/openai-url.ts create mode 100644 examples/ai-core/src/transcribe/openai.ts create mode 100644 packages/ai/core/test/mock-transcription-model-v1.ts create mode 100644 packages/ai/core/transcribe/index.ts create mode 100644 packages/ai/core/transcribe/transcribe-result.ts create mode 100644 packages/ai/core/transcribe/transcribe.test.ts create mode 100644 packages/ai/core/transcribe/transcribe.ts create mode 100644 packages/ai/core/types/transcription-model-response-metadata.ts create mode 100644 packages/ai/core/types/transcription-model.ts delete mode 100644 packages/ai/core/util/detect-image-mimetype.test.ts delete mode 100644 packages/ai/core/util/detect-image-mimetype.ts create mode 100644 packages/ai/core/util/detect-mimetype.test.ts create mode 100644 packages/ai/core/util/detect-mimetype.ts create mode 100644 packages/ai/errors/no-transcript-generated-error.ts create mode 100644 packages/openai/src/openai-transcription-model.test.ts create mode 100644 packages/openai/src/openai-transcription-model.ts create mode 100644 packages/openai/src/openai-transcription-settings.ts create mode 100644 packages/openai/src/transcript-test.mp3 create mode 100644 packages/provider/src/transcription-model/index.ts create mode 100644 packages/provider/src/transcription-model/v1/index.ts create mode 100644 packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts create mode 100644 packages/provider/src/transcription-model/v1/transcription-model-v1-call-warning.ts create mode 100644 packages/provider/src/transcription-model/v1/transcription-model-v1.ts diff --git a/.changeset/happy-kangaroos-roll.md b/.changeset/happy-kangaroos-roll.md new file mode 100644 index 000000000000..8b0e54911d9c --- /dev/null +++ b/.changeset/happy-kangaroos-roll.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/provider-utils': patch +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +feat: add transcription with experimental_transcribe diff --git a/content/docs/03-ai-sdk-core/36-transcription.mdx b/content/docs/03-ai-sdk-core/36-transcription.mdx new file mode 100644 index 000000000000..24d3dca4f1d1 --- /dev/null +++ b/content/docs/03-ai-sdk-core/36-transcription.mdx @@ -0,0 +1,153 @@ +--- +title: Transcription +description: Learn how to transcribe audio with the AI SDK. +--- + +# Transcription + +Transcription is an experimental feature. + +The AI SDK provides the [`transcribe`](/docs/reference/ai-sdk-core/transcribe) +function to transcribe audio using a transcription model. + +```ts +import { experimental_transcribe as transcribe } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { readFile } from 'fs/promises'; + +const transcript = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), +}); +``` + +The `audio` property can be a `Uint8Array`, `ArrayBuffer`, `Buffer`, `string` (base64 encoded audio data), or a `URL`. + +To access the generated transcript: + +```ts +const text = transcript.text; // transcript text e.g. "Hello, world!" +const segments = transcript.segments; // array of segments with start and end times, if available +const language = transcript.language; // language of the transcript e.g. "en", if available +const durationInSeconds = transcript.durationInSeconds; // duration of the transcript in seconds, if available +``` + +## Settings + +### Provider-Specific settings + +Transcription models often have provider or model-specific settings which you can set using the `providerOptions` parameter. + +```ts highlight={"9"} +import { experimental_transcribe as transcribe } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { readFile } from 'fs/promises'; + +const transcript = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), + providerOptions: { + openai: { + timestampGranularities: ['word'], + }, + }, +}); +``` + +### Abort Signals and Timeouts + +`transcribe` accepts an optional `abortSignal` parameter of +type [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal) +that you can use to abort the transcription process or set a timeout. + +```ts highlight={"7"} +import { openai } from '@ai-sdk/openai'; +import { experimental_transcribe as transcribe } from 'ai'; +import { readFile } from 'fs/promises'; + +const transcript = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), + abortSignal: AbortSignal.timeout(1000), // Abort after 1 second +}); +``` + +### Custom Headers + +`transcribe` accepts an optional `headers` parameter of type `Record` +that you can use to add custom headers to the transcription request. + +```ts highlight={"7"} +import { openai } from '@ai-sdk/openai'; +import { experimental_transcribe as transcribe } from 'ai'; +import { readFile } from 'fs/promises'; + +const transcript = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), + headers: { 'X-Custom-Header': 'custom-value' }, +}); +``` + +### Warnings + +Warnings (e.g. unsupported parameters) are available on the `warnings` property. + +```ts +import { openai } from '@ai-sdk/openai'; +import { experimental_transcribe as transcribe } from 'ai'; +import { readFile } from 'fs/promises'; + +const transcript = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), +}); + +const warnings = transcript.warnings; +``` + +### Error Handling + +When `transcribe` cannot generate a valid transcript, it throws a [`AI_NoTranscriptGeneratedError`](/docs/reference/ai-sdk-errors/ai-no-transcript-generated-error). + +This error can arise for any the following reasons: + +- The model failed to generate a response +- The model generated a response that could not be parsed + +The error preserves the following information to help you log the issue: + +- `responses`: Metadata about the transcription model responses, including timestamp, model, and headers. +- `cause`: The cause of the error. You can use this for more detailed error handling. + +```ts +import { + experimental_transcribe as transcribe, + NoTranscriptGeneratedError, +} from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { readFile } from 'fs/promises'; + +try { + await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), + }); +} catch (error) { + if (NoTranscriptGeneratedError.isInstance(error)) { + console.log('NoTranscriptGeneratedError'); + console.log('Cause:', error.cause); + console.log('Responses:', error.responses); + } +} +``` + +## Transcription Models + +| Provider | Model | +| ----------------------------------------------------------------- | ------------------------ | +| [OpenAI](/providers/ai-sdk-providers/openai#transcription-models) | `whisper-1` | +| [OpenAI](/providers/ai-sdk-providers/openai#transcription-models) | `gpt-4o-transcribe` | +| [OpenAI](/providers/ai-sdk-providers/openai#transcription-models) | `gpt-4o-mini-transcribe` | + +Above are a small subset of the transcription models supported by the AI SDK providers. For more, see the respective provider documentation. diff --git a/content/docs/03-ai-sdk-core/index.mdx b/content/docs/03-ai-sdk-core/index.mdx index 521994c46ec9..3a3c93a46f2a 100644 --- a/content/docs/03-ai-sdk-core/index.mdx +++ b/content/docs/03-ai-sdk-core/index.mdx @@ -49,6 +49,11 @@ description: Learn about AI SDK Core. description: 'Learn how to generate images with AI SDK Core.', href: '/docs/ai-sdk-core/image-generation', }, + { + title: 'Transcription', + description: 'Learn how to transcribe audio with AI SDK Core.', + href: '/docs/ai-sdk-core/transcription', + }, { title: 'Provider Management', description: 'Learn how to work with multiple providers.', diff --git a/content/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx b/content/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx new file mode 100644 index 000000000000..86f52973bda7 --- /dev/null +++ b/content/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx @@ -0,0 +1,138 @@ +--- +title: transcribe +description: API Reference for transcribe. +--- + +# `transcribe()` + +`transcribe` is an experimental feature. + +Generates a transcript from an audio file. + +```ts +import { experimental_transcribe as transcribe } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { readFile } from 'fs/promises'; + +const { transcript } = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('audio.mp3'), +}); + +console.log(transcript); +``` + +## Import + + + +## API Signature + +### Parameters + +>', + isOptional: true, + description: 'Additional provider-specific options.', + }, + { + name: 'maxRetries', + type: 'number', + isOptional: true, + description: 'Maximum number of retries. Default: 2.', + }, + { + name: 'abortSignal', + type: 'AbortSignal', + isOptional: true, + description: 'An optional abort signal to cancel the call.', + }, + { + name: 'headers', + type: 'Record', + isOptional: true, + description: 'Additional HTTP headers for the request.', + }, + ]} +/> + +### Returns + +', + description: + 'An array of transcript segments, each containing a portion of the transcribed text along with its start and end times in seconds.', + }, + { + name: 'language', + type: 'string | undefined', + description: + 'The language of the transcript in ISO-639-1 format e.g. "en" for English.', + }, + { + name: 'durationInSeconds', + type: 'number | undefined', + description: 'The duration of the transcript in seconds.', + }, + { + name: 'warnings', + type: 'TranscriptionWarning[]', + description: + 'Warnings from the model provider (e.g. unsupported settings).', + }, + { + name: 'responses', + type: 'Array', + description: + 'Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.', + properties: [ + { + type: 'TranscriptionModelResponseMetadata', + parameters: [ + { + name: 'timestamp', + type: 'Date', + description: 'Timestamp for the start of the generated response.', + }, + { + name: 'modelId', + type: 'string', + description: + 'The ID of the response model that was used to generate the response.', + }, + { + name: 'headers', + type: 'Record', + isOptional: true, + description: 'Response headers.', + }, + ], + }, + ], + }, + ]} +/> diff --git a/content/docs/07-reference/01-ai-sdk-core/index.mdx b/content/docs/07-reference/01-ai-sdk-core/index.mdx index 509e7a78b516..78c347f522f9 100644 --- a/content/docs/07-reference/01-ai-sdk-core/index.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/index.mdx @@ -52,6 +52,11 @@ AI SDK Core contains the following main functions: 'Generate images based on a given prompt using an image model.', href: '/docs/reference/ai-sdk-core/generate-image', }, + { + title: 'transcribe()', + description: 'Generate a transcript from an audio file.', + href: '/docs/reference/ai-sdk-core/transcribe', + }, ]} /> diff --git a/content/providers/01-ai-sdk-providers/02-openai.mdx b/content/providers/01-ai-sdk-providers/02-openai.mdx index 1a008b049402..c86f18f0fc4c 100644 --- a/content/providers/01-ai-sdk-providers/02-openai.mdx +++ b/content/providers/01-ai-sdk-providers/02-openai.mdx @@ -804,3 +804,67 @@ const model = openai.image('dall-e-3'); | ---------- | ------------------------------- | | `dall-e-3` | 1024x1024, 1792x1024, 1024x1792 | | `dall-e-2` | 256x256, 512x512, 1024x1024 | + +## Transcription Models + +You can create models that call the [OpenAI transcription API](https://platform.openai.com/docs/api-reference/audio/transcribe) +using the `.transcription()` factory method. + +The first argument is the model id e.g. `whisper-1`. + +```ts +const model = openai.transcription('whisper-1'); +``` + +OpenAI transcription models support an `audio` argument which can be a `DataContent` (`string | Uint8Array | ArrayBuffer | Buffer`) or a `URL`. + +```ts +const model = openai.transcription('whisper-1', { + audio: new Uint8Array([1, 2, 3, 4]), +}); +``` + +You can also pass additional provider-specific options using the `providerOptions` argument. For example, supplying the input language in ISO-639-1 (e.g. `en`) format will improve accuracy and latency. + +```ts highlight="6" +import { experimental_transcribe as transcribe } from 'ai'; +import { openai } from '@ai-sdk/openai'; + +const result = await transcribe({ + model: openai.transcription('whisper-1'), + audio: new Uint8Array([1, 2, 3, 4]), + providerOptions: { openai: { language: 'en' } }, +}); +``` + +The following provider options are available: + +- **timestampGranularities** _string[]_ + The granularity of the timestamps in the transcription. + Defaults to `['segment']`. + Possible values are `['word']`, `['segment']`, and `['word', 'segment']`. + Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + +- **language** _string_ + The language of the input audio. Supplying the input language in ISO-639-1 format (e.g. 'en') will improve accuracy and latency. + Optional. + +- **prompt** _string_ + An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. + Optional. + +- **temperature** _number_ + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. + Defaults to 0. + Optional. + +- **include** _string[]_ + Additional information to include in the transcription response. + +### Model Capabilities + +| Model | Transcription | Duration | Segments | Language | +| ------------------------ | ------------------- | ------------------- | ------------------- | ------------------- | +| `whisper-1` | | | | | +| `gpt-4o-mini-transcribe` | | | | | +| `gpt-4o-transcribe` | | | | | diff --git a/examples/ai-core/src/transcribe/openai-string.ts b/examples/ai-core/src/transcribe/openai-string.ts new file mode 100644 index 000000000000..5376c79e799b --- /dev/null +++ b/examples/ai-core/src/transcribe/openai-string.ts @@ -0,0 +1,23 @@ +import { openai } from '@ai-sdk/openai'; +import { experimental_transcribe as transcribe } from 'ai'; +import 'dotenv/config'; +import { readFile } from 'fs/promises'; + +async function main() { + const result = await transcribe({ + model: openai.transcription('whisper-1'), + audio: Buffer.from( + await readFile('examples/ai-core/data/galileo.mp3'), + ).toString('base64'), + }); + + console.log('Text:', result.text); + console.log('Duration:', result.durationInSeconds); + console.log('Language:', result.language); + console.log('Segments:', result.segments); + console.log('Warnings:', result.warnings); + console.log('Responses:', result.responses); + console.log('Provider Metadata:', result.providerMetadata); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/transcribe/openai-url.ts b/examples/ai-core/src/transcribe/openai-url.ts new file mode 100644 index 000000000000..7c64932cd102 --- /dev/null +++ b/examples/ai-core/src/transcribe/openai-url.ts @@ -0,0 +1,23 @@ +import { openai } from '@ai-sdk/openai'; +import { experimental_transcribe as transcribe } from 'ai'; +import 'dotenv/config'; + +async function main() { + const result = await transcribe({ + model: openai.transcription('whisper-1'), + audio: new URL( + '/vercel/ai/raw/refs/heads/main/examples/ai-core/data/galileo.mp3', + 'https://github.com', + ), + }); + + console.log('Text:', result.text); + console.log('Duration:', result.durationInSeconds); + console.log('Language:', result.language); + console.log('Segments:', result.segments); + console.log('Warnings:', result.warnings); + console.log('Responses:', result.responses); + console.log('Provider Metadata:', result.providerMetadata); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/transcribe/openai.ts b/examples/ai-core/src/transcribe/openai.ts new file mode 100644 index 000000000000..fafa586c6498 --- /dev/null +++ b/examples/ai-core/src/transcribe/openai.ts @@ -0,0 +1,21 @@ +import { openai } from '@ai-sdk/openai'; +import { experimental_transcribe as transcribe } from 'ai'; +import 'dotenv/config'; +import { readFile } from 'fs/promises'; + +async function main() { + const result = await transcribe({ + model: openai.transcription('whisper-1'), + audio: await readFile('examples/ai-core/data/galileo.mp3'), + }); + + console.log('Text:', result.text); + console.log('Duration:', result.durationInSeconds); + console.log('Language:', result.language); + console.log('Segments:', result.segments); + console.log('Warnings:', result.warnings); + console.log('Responses:', result.responses); + console.log('Provider Metadata:', result.providerMetadata); +} + +main().catch(console.error); diff --git a/packages/ai/core/generate-image/generate-image.ts b/packages/ai/core/generate-image/generate-image.ts index cf06d8f7d498..ae0a79a550ec 100644 --- a/packages/ai/core/generate-image/generate-image.ts +++ b/packages/ai/core/generate-image/generate-image.ts @@ -8,7 +8,10 @@ import { prepareRetries } from '../prompt/prepare-retries'; import { ImageGenerationWarning } from '../types/image-model'; import { ImageModelResponseMetadata } from '../types/image-model-response-metadata'; import { GenerateImageResult } from './generate-image-result'; -import { detectImageMimeType } from '../util/detect-image-mimetype'; +import { + detectMimeType, + imageMimeTypeSignatures, +} from '../util/detect-mimetype'; /** Generates images using an image model. @@ -146,7 +149,11 @@ Only applicable for HTTP-based providers. image => new DefaultGeneratedFile({ data: image, - mimeType: detectImageMimeType(image) ?? 'image/png', + mimeType: + detectMimeType({ + data: image, + signatures: imageMimeTypeSignatures, + }) ?? 'image/png', }), ), ); diff --git a/packages/ai/core/index.ts b/packages/ai/core/index.ts index 2a55e5c1b3e7..e553b3df5b71 100644 --- a/packages/ai/core/index.ts +++ b/packages/ai/core/index.ts @@ -31,6 +31,7 @@ export * from './embed'; export * from './generate-image'; export * from './generate-object'; export * from './generate-text'; +export * from './transcribe'; export * from './middleware'; export * from './prompt'; export * from './registry'; diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index 3bbb269d3d3a..a4a1d8da4de4 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -7,7 +7,10 @@ import { } from '@ai-sdk/provider'; import { download } from '../../util/download'; import { CoreMessage } from '../prompt/message'; -import { detectImageMimeType } from '../util/detect-image-mimetype'; +import { + detectMimeType, + imageMimeTypeSignatures, +} from '../util/detect-mimetype'; import { FilePart, ImagePart, TextPart } from './content-part'; import { convertDataContentToBase64String, @@ -341,7 +344,11 @@ function convertPartToLanguageModelPart( // When detection fails, use provided mimetype. if (normalizedData instanceof Uint8Array) { - mimeType = detectImageMimeType(normalizedData) ?? mimeType; + mimeType = + detectMimeType({ + data: normalizedData, + signatures: imageMimeTypeSignatures, + }) ?? mimeType; } return { type: 'image', diff --git a/packages/ai/core/test/mock-transcription-model-v1.ts b/packages/ai/core/test/mock-transcription-model-v1.ts new file mode 100644 index 000000000000..87d6007793bc --- /dev/null +++ b/packages/ai/core/test/mock-transcription-model-v1.ts @@ -0,0 +1,24 @@ +import { TranscriptionModelV1 } from '@ai-sdk/provider'; +import { notImplemented } from './not-implemented'; + +export class MockTranscriptionModelV1 implements TranscriptionModelV1 { + readonly specificationVersion = 'v1'; + readonly provider: TranscriptionModelV1['provider']; + readonly modelId: TranscriptionModelV1['modelId']; + + doGenerate: TranscriptionModelV1['doGenerate']; + + constructor({ + provider = 'mock-provider', + modelId = 'mock-model-id', + doGenerate = notImplemented, + }: { + provider?: TranscriptionModelV1['provider']; + modelId?: TranscriptionModelV1['modelId']; + doGenerate?: TranscriptionModelV1['doGenerate']; + } = {}) { + this.provider = provider; + this.modelId = modelId; + this.doGenerate = doGenerate; + } +} diff --git a/packages/ai/core/transcribe/index.ts b/packages/ai/core/transcribe/index.ts new file mode 100644 index 000000000000..596a3d8b7af7 --- /dev/null +++ b/packages/ai/core/transcribe/index.ts @@ -0,0 +1,2 @@ +export { transcribe as experimental_transcribe } from './transcribe'; +export type { TranscriptionResult as Experimental_TranscriptionResult } from './transcribe-result'; diff --git a/packages/ai/core/transcribe/transcribe-result.ts b/packages/ai/core/transcribe/transcribe-result.ts new file mode 100644 index 000000000000..ed5a34d2289f --- /dev/null +++ b/packages/ai/core/transcribe/transcribe-result.ts @@ -0,0 +1,60 @@ +import { JSONValue } from '@ai-sdk/provider'; +import { TranscriptionWarning } from '../types/transcription-model'; +import { TranscriptionModelResponseMetadata } from '../types/transcription-model-response-metadata'; + +/** +The result of a `transcribe` call. +It contains the transcript and additional information. + */ +export interface TranscriptionResult { + /** + * The complete transcribed text from the audio. + */ + readonly text: string; + + /** + * Array of transcript segments with timing information. + * Each segment represents a portion of the transcribed text with start and end times. + */ + readonly segments: Array<{ + /** + * The text content of this segment. + */ + readonly text: string; + /** + * The start time of this segment in seconds. + */ + readonly startSecond: number; + /** + * The end time of this segment in seconds. + */ + readonly endSecond: number; + }>; + + /** + * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English). + * May be undefined if the language couldn't be detected. + */ + readonly language: string | undefined; + + /** + * The total duration of the audio file in seconds. + * May be undefined if the duration couldn't be determined. + */ + readonly durationInSeconds: number | undefined; + + /** + Warnings for the call, e.g. unsupported settings. + */ + readonly warnings: Array; + + /** + Response metadata from the provider. There may be multiple responses if we made multiple calls to the model. + */ + readonly responses: Array; + + /** + Provider metadata from the provider. + */ + readonly providerMetadata: Record>; +} diff --git a/packages/ai/core/transcribe/transcribe.test.ts b/packages/ai/core/transcribe/transcribe.test.ts new file mode 100644 index 000000000000..72a555783a78 --- /dev/null +++ b/packages/ai/core/transcribe/transcribe.test.ts @@ -0,0 +1,229 @@ +import { + JSONValue, + TranscriptionModelV1, + TranscriptionModelV1CallWarning, +} from '@ai-sdk/provider'; +import { MockTranscriptionModelV1 } from '../test/mock-transcription-model-v1'; +import { transcribe } from './transcribe'; + +const audioData = new Uint8Array([1, 2, 3, 4]); // Sample audio data +const testDate = new Date(2024, 0, 1); + +const sampleTranscript = { + text: 'This is a sample transcript.', + segments: [ + { + startSecond: 0, + endSecond: 2.5, + text: 'This is a', + }, + { + startSecond: 2.5, + endSecond: 4.0, + text: 'sample transcript.', + }, + ], + language: 'en', + durationInSeconds: 4.0, +}; + +const createMockResponse = (options: { + text: string; + segments: Array<{ + text: string; + startSecond: number; + endSecond: number; + }>; + language?: string; + durationInSeconds?: number; + warnings?: TranscriptionModelV1CallWarning[]; + timestamp?: Date; + modelId?: string; + headers?: Record; + providerMetadata?: Record>; +}) => ({ + text: options.text, + segments: options.segments, + language: options.language, + durationInSeconds: options.durationInSeconds, + warnings: options.warnings ?? [], + response: { + timestamp: options.timestamp ?? new Date(), + modelId: options.modelId ?? 'test-model-id', + headers: options.headers ?? {}, + }, + providerMetadata: options.providerMetadata ?? {}, +}); + +describe('transcribe', () => { + it('should send args to doGenerate', async () => { + const abortController = new AbortController(); + const abortSignal = abortController.signal; + + let capturedArgs!: Parameters[0]; + + await transcribe({ + model: new MockTranscriptionModelV1({ + doGenerate: async args => { + capturedArgs = args; + return createMockResponse({ + ...sampleTranscript, + }); + }, + }), + audio: audioData, + headers: { 'custom-request-header': 'request-header-value' }, + abortSignal, + }); + + expect(capturedArgs).toStrictEqual({ + audio: audioData, + mimeType: 'audio/wav', + headers: { 'custom-request-header': 'request-header-value' }, + abortSignal, + providerOptions: {}, + }); + }); + + it('should return warnings', async () => { + const result = await transcribe({ + model: new MockTranscriptionModelV1({ + doGenerate: async () => + createMockResponse({ + ...sampleTranscript, + warnings: [ + { + type: 'other', + message: 'Setting is not supported', + }, + ], + providerMetadata: { + 'test-provider': { + 'test-key': 'test-value', + }, + }, + }), + }), + audio: audioData, + }); + + expect(result.warnings).toStrictEqual([ + { + type: 'other', + message: 'Setting is not supported', + }, + ]); + }); + + it('should return the transcript', async () => { + const result = await transcribe({ + model: new MockTranscriptionModelV1({ + doGenerate: async () => + createMockResponse({ + ...sampleTranscript, + }), + }), + audio: audioData, + }); + + expect(result).toEqual({ + ...sampleTranscript, + warnings: [], + responses: [ + { + timestamp: expect.any(Date), + modelId: 'test-model-id', + headers: {}, + }, + ], + providerMetadata: {}, + }); + }); + + describe('error handling', () => { + it('should throw NoTranscriptGeneratedError when no transcript is returned', async () => { + await expect( + transcribe({ + model: new MockTranscriptionModelV1({ + doGenerate: async () => + createMockResponse({ + text: '', + segments: [], + language: 'en', + durationInSeconds: 0, + timestamp: testDate, + }), + }), + audio: audioData, + }), + ).rejects.toMatchObject({ + name: 'AI_NoTranscriptGeneratedError', + message: 'No transcript generated.', + responses: [ + { + timestamp: testDate, + modelId: expect.any(String), + }, + ], + }); + }); + + it('should include response headers in error when no transcript generated', async () => { + await expect( + transcribe({ + model: new MockTranscriptionModelV1({ + doGenerate: async () => + createMockResponse({ + text: '', + segments: [], + language: 'en', + durationInSeconds: 0, + timestamp: testDate, + headers: { + 'custom-response-header': 'response-header-value', + }, + }), + }), + audio: audioData, + }), + ).rejects.toMatchObject({ + name: 'AI_NoTranscriptGeneratedError', + message: 'No transcript generated.', + responses: [ + { + timestamp: testDate, + modelId: expect.any(String), + headers: { + 'custom-response-header': 'response-header-value', + }, + }, + ], + }); + }); + }); + + it('should return response metadata', async () => { + const testHeaders = { 'x-test': 'value' }; + + const result = await transcribe({ + model: new MockTranscriptionModelV1({ + doGenerate: async () => + createMockResponse({ + ...sampleTranscript, + timestamp: testDate, + modelId: 'test-model', + headers: testHeaders, + }), + }), + audio: audioData, + }); + + expect(result.responses).toStrictEqual([ + { + timestamp: testDate, + modelId: 'test-model', + headers: testHeaders, + }, + ]); + }); +}); diff --git a/packages/ai/core/transcribe/transcribe.ts b/packages/ai/core/transcribe/transcribe.ts new file mode 100644 index 000000000000..2114522845bc --- /dev/null +++ b/packages/ai/core/transcribe/transcribe.ts @@ -0,0 +1,150 @@ +import { TranscriptionModelV1, JSONValue } from '@ai-sdk/provider'; +import { NoTranscriptGeneratedError } from '../../errors/no-transcript-generated-error'; +import { prepareRetries } from '../prompt/prepare-retries'; +import { TranscriptionWarning } from '../types/transcription-model'; +import { TranscriptionModelResponseMetadata } from '../types/transcription-model-response-metadata'; +import { TranscriptionResult } from './transcribe-result'; +import { DataContent } from '../prompt'; +import { convertDataContentToUint8Array } from '../prompt/data-content'; +import { download } from '../../util/download'; +import { + audioMimeTypeSignatures, + detectMimeType, +} from '../util/detect-mimetype'; +import { ProviderOptions } from '../types/provider-metadata'; + +/** +Generates transcripts using a transcription model. + +@param model - The transcription model to use. +@param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL. +@param providerOptions - Additional provider-specific options that are passed through to the provider +as body parameters. +@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2. +@param abortSignal - An optional abort signal that can be used to cancel the call. +@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers. + +@returns A result object that contains the generated transcript. + */ +export async function transcribe({ + model, + audio, + providerOptions = {}, + maxRetries: maxRetriesArg, + abortSignal, + headers, +}: { + /** +The transcription model to use. + */ + model: TranscriptionModelV1; + + /** +The audio data to transcribe. + */ + audio: DataContent | URL; + + /** +Additional provider-specific options that are passed through to the provider +as body parameters. + +The outer record is keyed by the provider name, and the inner +record is keyed by the provider-specific metadata key. +```ts +{ + "openai": { + "temperature": 0 + } +} +``` + */ + providerOptions?: ProviderOptions; + + /** +Maximum number of retries per transcript model call. Set to 0 to disable retries. + +@default 2 + */ + maxRetries?: number; + + /** +Abort signal. + */ + abortSignal?: AbortSignal; + + /** +Additional headers to include in the request. +Only applicable for HTTP-based providers. + */ + headers?: Record; +}): Promise { + const { retry } = prepareRetries({ maxRetries: maxRetriesArg }); + const audioData = + audio instanceof URL + ? new Uint8Array((await download({ url: audio })).data) + : convertDataContentToUint8Array(audio); + + const result = await retry(() => + model.doGenerate({ + audio: audioData, + abortSignal, + headers, + providerOptions, + mimeType: + detectMimeType({ + data: audioData, + signatures: audioMimeTypeSignatures, + }) ?? 'audio/wav', + }), + ); + + if (!result.text) { + throw new NoTranscriptGeneratedError({ responses: [result.response] }); + } + + return new DefaultTranscriptionResult({ + text: result.text, + segments: result.segments, + language: result.language, + durationInSeconds: result.durationInSeconds, + warnings: result.warnings, + responses: [result.response], + providerMetadata: result.providerMetadata, + }); +} + +class DefaultTranscriptionResult implements TranscriptionResult { + readonly text: string; + readonly segments: Array<{ + text: string; + startSecond: number; + endSecond: number; + }>; + readonly language: string | undefined; + readonly durationInSeconds: number | undefined; + readonly warnings: Array; + readonly responses: Array; + readonly providerMetadata: Record>; + + constructor(options: { + text: string; + segments: Array<{ + text: string; + startSecond: number; + endSecond: number; + }>; + language: string | undefined; + durationInSeconds: number | undefined; + warnings: Array; + responses: Array; + providerMetadata: Record> | undefined; + }) { + this.text = options.text; + this.segments = options.segments; + this.language = options.language; + this.durationInSeconds = options.durationInSeconds; + this.warnings = options.warnings; + this.responses = options.responses; + this.providerMetadata = options.providerMetadata ?? {}; + } +} diff --git a/packages/ai/core/types/transcription-model-response-metadata.ts b/packages/ai/core/types/transcription-model-response-metadata.ts new file mode 100644 index 000000000000..9fcf62f3fcd6 --- /dev/null +++ b/packages/ai/core/types/transcription-model-response-metadata.ts @@ -0,0 +1,16 @@ +export type TranscriptionModelResponseMetadata = { + /** +Timestamp for the start of the generated response. + */ + timestamp: Date; + + /** +The ID of the response model that was used to generate the response. + */ + modelId: string; + + /** +Response headers. + */ + headers?: Record; +}; diff --git a/packages/ai/core/types/transcription-model.ts b/packages/ai/core/types/transcription-model.ts new file mode 100644 index 000000000000..d2eba1244fd0 --- /dev/null +++ b/packages/ai/core/types/transcription-model.ts @@ -0,0 +1,15 @@ +import { + TranscriptionModelV1, + TranscriptionModelV1CallWarning, +} from '@ai-sdk/provider'; + +/** +Transcription model that is used by the AI SDK Core functions. + */ +export type TranscriptionModel = TranscriptionModelV1; + +/** +Warning from the model provider for this call. The call will proceed, but e.g. +some settings might not be supported, which can lead to suboptimal results. + */ +export type TranscriptionWarning = TranscriptionModelV1CallWarning; diff --git a/packages/ai/core/util/detect-image-mimetype.test.ts b/packages/ai/core/util/detect-image-mimetype.test.ts deleted file mode 100644 index cf5cfa61988e..000000000000 --- a/packages/ai/core/util/detect-image-mimetype.test.ts +++ /dev/null @@ -1,138 +0,0 @@ -import { describe, it, expect } from 'vitest'; -import { detectImageMimeType } from './detect-image-mimetype'; - -describe('detectImageMimeType', () => { - describe('GIF', () => { - it('should detect GIF from bytes', () => { - const gifBytes = new Uint8Array([0x47, 0x49, 0x46, 0xff, 0xff]); - expect(detectImageMimeType(gifBytes)).toBe('image/gif'); - }); - - it('should detect GIF from base64', () => { - const gifBase64 = 'R0lGabc123'; // Base64 string starting with GIF signature - expect(detectImageMimeType(gifBase64)).toBe('image/gif'); - }); - }); - - describe('PNG', () => { - it('should detect PNG from bytes', () => { - const pngBytes = new Uint8Array([0x89, 0x50, 0x4e, 0x47, 0xff, 0xff]); - expect(detectImageMimeType(pngBytes)).toBe('image/png'); - }); - - it('should detect PNG from base64', () => { - const pngBase64 = 'iVBORwabc123'; // Base64 string starting with PNG signature - expect(detectImageMimeType(pngBase64)).toBe('image/png'); - }); - }); - - describe('JPEG', () => { - it('should detect JPEG from bytes', () => { - const jpegBytes = new Uint8Array([0xff, 0xd8, 0xff, 0xff]); - expect(detectImageMimeType(jpegBytes)).toBe('image/jpeg'); - }); - - it('should detect JPEG from base64', () => { - const jpegBase64 = '/9j/abc123'; // Base64 string starting with JPEG signature - expect(detectImageMimeType(jpegBase64)).toBe('image/jpeg'); - }); - }); - - describe('WebP', () => { - it('should detect WebP from bytes', () => { - const webpBytes = new Uint8Array([0x52, 0x49, 0x46, 0x46, 0xff, 0xff]); - expect(detectImageMimeType(webpBytes)).toBe('image/webp'); - }); - - it('should detect WebP from base64', () => { - const webpBase64 = 'UklGRgabc123'; // Base64 string starting with WebP signature - expect(detectImageMimeType(webpBase64)).toBe('image/webp'); - }); - }); - - describe('BMP', () => { - it('should detect BMP from bytes', () => { - const bmpBytes = new Uint8Array([0x42, 0x4d, 0xff, 0xff]); - expect(detectImageMimeType(bmpBytes)).toBe('image/bmp'); - }); - - it('should detect BMP from base64', () => { - const bmpBase64 = 'Qkabc123'; // Base64 string starting with BMP signature - expect(detectImageMimeType(bmpBase64)).toBe('image/bmp'); - }); - }); - - describe('TIFF', () => { - it('should detect TIFF (little endian) from bytes', () => { - const tiffLEBytes = new Uint8Array([0x49, 0x49, 0x2a, 0x00, 0xff]); - expect(detectImageMimeType(tiffLEBytes)).toBe('image/tiff'); - }); - - it('should detect TIFF (little endian) from base64', () => { - const tiffLEBase64 = 'SUkqAAabc123'; // Base64 string starting with TIFF LE signature - expect(detectImageMimeType(tiffLEBase64)).toBe('image/tiff'); - }); - - it('should detect TIFF (big endian) from bytes', () => { - const tiffBEBytes = new Uint8Array([0x4d, 0x4d, 0x00, 0x2a, 0xff]); - expect(detectImageMimeType(tiffBEBytes)).toBe('image/tiff'); - }); - - it('should detect TIFF (big endian) from base64', () => { - const tiffBEBase64 = 'TU0AKgabc123'; // Base64 string starting with TIFF BE signature - expect(detectImageMimeType(tiffBEBase64)).toBe('image/tiff'); - }); - }); - - describe('AVIF', () => { - it('should detect AVIF from bytes', () => { - const avifBytes = new Uint8Array([ - 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x61, 0x76, 0x69, 0x66, - 0xff, - ]); - expect(detectImageMimeType(avifBytes)).toBe('image/avif'); - }); - - it('should detect AVIF from base64', () => { - const avifBase64 = 'AAAAIGZ0eXBhdmlmabc123'; // Base64 string starting with AVIF signature - expect(detectImageMimeType(avifBase64)).toBe('image/avif'); - }); - }); - - describe('HEIC', () => { - it('should detect HEIC from bytes', () => { - const heicBytes = new Uint8Array([ - 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x68, 0x65, 0x69, 0x63, - 0xff, - ]); - expect(detectImageMimeType(heicBytes)).toBe('image/heic'); - }); - - it('should detect HEIC from base64', () => { - const heicBase64 = 'AAAAIGZ0eXBoZWljabc123'; // Base64 string starting with HEIC signature - expect(detectImageMimeType(heicBase64)).toBe('image/heic'); - }); - }); - - describe('error cases', () => { - it('should return undefined for unknown image formats', () => { - const unknownBytes = new Uint8Array([0x00, 0x01, 0x02, 0x03]); - expect(detectImageMimeType(unknownBytes)).toBeUndefined(); - }); - - it('should return undefined for empty arrays', () => { - const emptyBytes = new Uint8Array([]); - expect(detectImageMimeType(emptyBytes)).toBeUndefined(); - }); - - it('should return undefined for arrays shorter than signature length', () => { - const shortBytes = new Uint8Array([0x89, 0x50]); // Incomplete PNG signature - expect(detectImageMimeType(shortBytes)).toBeUndefined(); - }); - - it('should return undefined for invalid base64 strings', () => { - const invalidBase64 = 'invalid123'; - expect(detectImageMimeType(invalidBase64)).toBeUndefined(); - }); - }); -}); diff --git a/packages/ai/core/util/detect-image-mimetype.ts b/packages/ai/core/util/detect-image-mimetype.ts deleted file mode 100644 index cbb708fb50eb..000000000000 --- a/packages/ai/core/util/detect-image-mimetype.ts +++ /dev/null @@ -1,68 +0,0 @@ -const mimeTypeSignatures = [ - { - mimeType: 'image/gif' as const, - bytesPrefix: [0x47, 0x49, 0x46], - base64Prefix: 'R0lG', - }, - { - mimeType: 'image/png' as const, - bytesPrefix: [0x89, 0x50, 0x4e, 0x47], - base64Prefix: 'iVBORw', - }, - { - mimeType: 'image/jpeg' as const, - bytesPrefix: [0xff, 0xd8], - base64Prefix: '/9j/', - }, - { - mimeType: 'image/webp' as const, - bytesPrefix: [0x52, 0x49, 0x46, 0x46], - base64Prefix: 'UklGRg', - }, - { - mimeType: 'image/bmp' as const, - bytesPrefix: [0x42, 0x4d], - base64Prefix: 'Qk', - }, - { - mimeType: 'image/tiff' as const, - bytesPrefix: [0x49, 0x49, 0x2a, 0x00], - base64Prefix: 'SUkqAA', - }, - { - mimeType: 'image/tiff' as const, - bytesPrefix: [0x4d, 0x4d, 0x00, 0x2a], - base64Prefix: 'TU0AKg', - }, - { - mimeType: 'image/avif' as const, - bytesPrefix: [ - 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x61, 0x76, 0x69, 0x66, - ], - base64Prefix: 'AAAAIGZ0eXBhdmlm', - }, - { - mimeType: 'image/heic' as const, - bytesPrefix: [ - 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x68, 0x65, 0x69, 0x63, - ], - base64Prefix: 'AAAAIGZ0eXBoZWlj', - }, -] as const; - -export function detectImageMimeType( - image: Uint8Array | string, -): (typeof mimeTypeSignatures)[number]['mimeType'] | undefined { - for (const signature of mimeTypeSignatures) { - if ( - typeof image === 'string' - ? image.startsWith(signature.base64Prefix) - : image.length >= signature.bytesPrefix.length && - signature.bytesPrefix.every((byte, index) => image[index] === byte) - ) { - return signature.mimeType; - } - } - - return undefined; -} diff --git a/packages/ai/core/util/detect-mimetype.test.ts b/packages/ai/core/util/detect-mimetype.test.ts new file mode 100644 index 000000000000..d965c712d359 --- /dev/null +++ b/packages/ai/core/util/detect-mimetype.test.ts @@ -0,0 +1,400 @@ +import { describe, it, expect } from 'vitest'; +import { + detectMimeType, + imageMimeTypeSignatures, + audioMimeTypeSignatures, +} from './detect-mimetype'; + +describe('detectMimeType', () => { + describe('GIF', () => { + it('should detect GIF from bytes', () => { + const gifBytes = new Uint8Array([0x47, 0x49, 0x46, 0xff, 0xff]); + expect( + detectMimeType({ data: gifBytes, signatures: imageMimeTypeSignatures }), + ).toBe('image/gif'); + }); + + it('should detect GIF from base64', () => { + const gifBase64 = 'R0lGabc123'; // Base64 string starting with GIF signature + expect( + detectMimeType({ + data: gifBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/gif'); + }); + }); + + describe('PNG', () => { + it('should detect PNG from bytes', () => { + const pngBytes = new Uint8Array([0x89, 0x50, 0x4e, 0x47, 0xff, 0xff]); + expect( + detectMimeType({ data: pngBytes, signatures: imageMimeTypeSignatures }), + ).toBe('image/png'); + }); + + it('should detect PNG from base64', () => { + const pngBase64 = 'iVBORwabc123'; // Base64 string starting with PNG signature + expect( + detectMimeType({ + data: pngBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/png'); + }); + }); + + describe('JPEG', () => { + it('should detect JPEG from bytes', () => { + const jpegBytes = new Uint8Array([0xff, 0xd8, 0xff, 0xff]); + expect( + detectMimeType({ + data: jpegBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/jpeg'); + }); + + it('should detect JPEG from base64', () => { + const jpegBase64 = '/9j/abc123'; // Base64 string starting with JPEG signature + expect( + detectMimeType({ + data: jpegBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/jpeg'); + }); + }); + + describe('WebP', () => { + it('should detect WebP from bytes', () => { + const webpBytes = new Uint8Array([0x52, 0x49, 0x46, 0x46, 0xff, 0xff]); + expect( + detectMimeType({ + data: webpBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/webp'); + }); + + it('should detect WebP from base64', () => { + const webpBase64 = 'UklGRgabc123'; // Base64 string starting with WebP signature + expect( + detectMimeType({ + data: webpBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/webp'); + }); + }); + + describe('BMP', () => { + it('should detect BMP from bytes', () => { + const bmpBytes = new Uint8Array([0x42, 0x4d, 0xff, 0xff]); + expect( + detectMimeType({ data: bmpBytes, signatures: imageMimeTypeSignatures }), + ).toBe('image/bmp'); + }); + + it('should detect BMP from base64', () => { + const bmpBase64 = 'Qkabc123'; // Base64 string starting with BMP signature + expect( + detectMimeType({ + data: bmpBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/bmp'); + }); + }); + + describe('TIFF', () => { + it('should detect TIFF (little endian) from bytes', () => { + const tiffLEBytes = new Uint8Array([0x49, 0x49, 0x2a, 0x00, 0xff]); + expect( + detectMimeType({ + data: tiffLEBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/tiff'); + }); + + it('should detect TIFF (little endian) from base64', () => { + const tiffLEBase64 = 'SUkqAAabc123'; // Base64 string starting with TIFF LE signature + expect( + detectMimeType({ + data: tiffLEBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/tiff'); + }); + + it('should detect TIFF (big endian) from bytes', () => { + const tiffBEBytes = new Uint8Array([0x4d, 0x4d, 0x00, 0x2a, 0xff]); + expect( + detectMimeType({ + data: tiffBEBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/tiff'); + }); + + it('should detect TIFF (big endian) from base64', () => { + const tiffBEBase64 = 'TU0AKgabc123'; // Base64 string starting with TIFF BE signature + expect( + detectMimeType({ + data: tiffBEBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/tiff'); + }); + }); + + describe('AVIF', () => { + it('should detect AVIF from bytes', () => { + const avifBytes = new Uint8Array([ + 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x61, 0x76, 0x69, 0x66, + 0xff, + ]); + expect( + detectMimeType({ + data: avifBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/avif'); + }); + + it('should detect AVIF from base64', () => { + const avifBase64 = 'AAAAIGZ0eXBhdmlmabc123'; // Base64 string starting with AVIF signature + expect( + detectMimeType({ + data: avifBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/avif'); + }); + }); + + describe('HEIC', () => { + it('should detect HEIC from bytes', () => { + const heicBytes = new Uint8Array([ + 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x68, 0x65, 0x69, 0x63, + 0xff, + ]); + expect( + detectMimeType({ + data: heicBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/heic'); + }); + + it('should detect HEIC from base64', () => { + const heicBase64 = 'AAAAIGZ0eXBoZWljabc123'; // Base64 string starting with HEIC signature + expect( + detectMimeType({ + data: heicBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBe('image/heic'); + }); + }); + + describe('MP3', () => { + it('should detect MP3 from bytes', () => { + const mp3Bytes = new Uint8Array([0xff, 0xfb]); + expect( + detectMimeType({ data: mp3Bytes, signatures: audioMimeTypeSignatures }), + ).toBe('audio/mpeg'); + }); + + it('should detect MP3 from base64', () => { + const mp3Base64 = '//s='; // Base64 string starting with MP3 signature + expect( + detectMimeType({ + data: mp3Base64, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/mpeg'); + }); + }); + + describe('WAV', () => { + it('should detect WAV from bytes', () => { + const wavBytes = new Uint8Array([0x52, 0x49, 0x46, 0x46]); + expect( + detectMimeType({ data: wavBytes, signatures: audioMimeTypeSignatures }), + ).toBe('audio/wav'); + }); + + it('should detect WAV from base64', () => { + const wavBase64 = 'UklGRiQ='; // Base64 string starting with WAV signature + expect( + detectMimeType({ + data: wavBase64, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/wav'); + }); + }); + + describe('OGG', () => { + it('should detect OGG from bytes', () => { + const oggBytes = new Uint8Array([0x4f, 0x67, 0x67, 0x53]); + expect( + detectMimeType({ data: oggBytes, signatures: audioMimeTypeSignatures }), + ).toBe('audio/ogg'); + }); + + it('should detect OGG from base64', () => { + const oggBase64 = 'T2dnUw'; // Base64 string starting with OGG signature + expect( + detectMimeType({ + data: oggBase64, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/ogg'); + }); + }); + + describe('FLAC', () => { + it('should detect FLAC from bytes', () => { + const flacBytes = new Uint8Array([0x66, 0x4c, 0x61, 0x43]); + expect( + detectMimeType({ + data: flacBytes, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/flac'); + }); + + it('should detect FLAC from base64', () => { + const flacBase64 = 'ZkxhQw'; // Base64 string starting with FLAC signature + expect( + detectMimeType({ + data: flacBase64, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/flac'); + }); + }); + + describe('AAC', () => { + it('should detect AAC from bytes', () => { + const aacBytes = new Uint8Array([0x40, 0x15, 0x00, 0x00]); + expect( + detectMimeType({ data: aacBytes, signatures: audioMimeTypeSignatures }), + ).toBe('audio/aac'); + }); + + it('should detect AAC from base64', () => { + const aacBase64 = 'QBUA'; // Base64 string starting with AAC signature + expect( + detectMimeType({ + data: aacBase64, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/aac'); + }); + }); + + describe('MP4', () => { + it('should detect MP4 from bytes', () => { + const mp4Bytes = new Uint8Array([0x66, 0x74, 0x79, 0x70]); + expect( + detectMimeType({ data: mp4Bytes, signatures: audioMimeTypeSignatures }), + ).toBe('audio/mp4'); + }); + + it('should detect MP4 from base64', () => { + const mp4Base64 = 'ZnR5cA'; // Base64 string starting with MP4 signature + expect( + detectMimeType({ + data: mp4Base64, + signatures: audioMimeTypeSignatures, + }), + ).toBe('audio/mp4'); + }); + }); + + describe('error cases', () => { + it('should return undefined for unknown image formats', () => { + const unknownBytes = new Uint8Array([0x00, 0x01, 0x02, 0x03]); + expect( + detectMimeType({ + data: unknownBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for unknown audio formats', () => { + const unknownBytes = new Uint8Array([0x00, 0x01, 0x02, 0x03]); + expect( + detectMimeType({ + data: unknownBytes, + signatures: audioMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for empty arrays for image', () => { + const emptyBytes = new Uint8Array([]); + expect( + detectMimeType({ + data: emptyBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for empty arrays for audio', () => { + const emptyBytes = new Uint8Array([]); + expect( + detectMimeType({ + data: emptyBytes, + signatures: audioMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for arrays shorter than signature length for image', () => { + const shortBytes = new Uint8Array([0x89, 0x50]); // Incomplete PNG signature + expect( + detectMimeType({ + data: shortBytes, + signatures: imageMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for arrays shorter than signature length for audio', () => { + const shortBytes = new Uint8Array([0x4f, 0x67]); // Incomplete OGG signature + expect( + detectMimeType({ + data: shortBytes, + signatures: audioMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for invalid base64 strings for image', () => { + const invalidBase64 = 'invalid123'; + expect( + detectMimeType({ + data: invalidBase64, + signatures: imageMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + + it('should return undefined for invalid base64 strings for audio', () => { + const invalidBase64 = 'invalid123'; + expect( + detectMimeType({ + data: invalidBase64, + signatures: audioMimeTypeSignatures, + }), + ).toBeUndefined(); + }); + }); +}); diff --git a/packages/ai/core/util/detect-mimetype.ts b/packages/ai/core/util/detect-mimetype.ts new file mode 100644 index 000000000000..1851e3217a03 --- /dev/null +++ b/packages/ai/core/util/detect-mimetype.ts @@ -0,0 +1,105 @@ +export const imageMimeTypeSignatures = [ + { + mimeType: 'image/gif' as const, + bytesPrefix: [0x47, 0x49, 0x46], + base64Prefix: 'R0lG', + }, + { + mimeType: 'image/png' as const, + bytesPrefix: [0x89, 0x50, 0x4e, 0x47], + base64Prefix: 'iVBORw', + }, + { + mimeType: 'image/jpeg' as const, + bytesPrefix: [0xff, 0xd8], + base64Prefix: '/9j/', + }, + { + mimeType: 'image/webp' as const, + bytesPrefix: [0x52, 0x49, 0x46, 0x46], + base64Prefix: 'UklGRg', + }, + { + mimeType: 'image/bmp' as const, + bytesPrefix: [0x42, 0x4d], + base64Prefix: 'Qk', + }, + { + mimeType: 'image/tiff' as const, + bytesPrefix: [0x49, 0x49, 0x2a, 0x00], + base64Prefix: 'SUkqAA', + }, + { + mimeType: 'image/tiff' as const, + bytesPrefix: [0x4d, 0x4d, 0x00, 0x2a], + base64Prefix: 'TU0AKg', + }, + { + mimeType: 'image/avif' as const, + bytesPrefix: [ + 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x61, 0x76, 0x69, 0x66, + ], + base64Prefix: 'AAAAIGZ0eXBhdmlm', + }, + { + mimeType: 'image/heic' as const, + bytesPrefix: [ + 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x68, 0x65, 0x69, 0x63, + ], + base64Prefix: 'AAAAIGZ0eXBoZWlj', + }, +] as const; + +export const audioMimeTypeSignatures = [ + { + mimeType: 'audio/mpeg' as const, + bytesPrefix: [0xff, 0xfb], + base64Prefix: '//s=', + }, + { + mimeType: 'audio/wav' as const, + bytesPrefix: [0x52, 0x49, 0x46, 0x46], + base64Prefix: 'UklGR', + }, + { + mimeType: 'audio/ogg' as const, + bytesPrefix: [0x4f, 0x67, 0x67, 0x53], + base64Prefix: 'T2dnUw', + }, + { + mimeType: 'audio/flac' as const, + bytesPrefix: [0x66, 0x4c, 0x61, 0x43], + base64Prefix: 'ZkxhQw', + }, + { + mimeType: 'audio/aac' as const, + bytesPrefix: [0x40, 0x15, 0x00, 0x00], + base64Prefix: 'QBUA', + }, + { + mimeType: 'audio/mp4' as const, + bytesPrefix: [0x66, 0x74, 0x79, 0x70], + base64Prefix: 'ZnR5cA', + }, +] as const; + +export function detectMimeType({ + data, + signatures, +}: { + data: Uint8Array | string; + signatures: typeof audioMimeTypeSignatures | typeof imageMimeTypeSignatures; +}): (typeof signatures)[number]['mimeType'] | undefined { + for (const signature of signatures) { + if ( + typeof data === 'string' + ? data.startsWith(signature.base64Prefix) + : data.length >= signature.bytesPrefix.length && + signature.bytesPrefix.every((byte, index) => data[index] === byte) + ) { + return signature.mimeType; + } + } + + return undefined; +} diff --git a/packages/ai/errors/no-transcript-generated-error.ts b/packages/ai/errors/no-transcript-generated-error.ts new file mode 100644 index 000000000000..9ed11aa1eeb7 --- /dev/null +++ b/packages/ai/errors/no-transcript-generated-error.ts @@ -0,0 +1,20 @@ +import { AISDKError } from '@ai-sdk/provider'; +import { TranscriptionModelResponseMetadata } from '../core/types/transcription-model-response-metadata'; + +/** +Error that is thrown when no transcript was generated. + */ +export class NoTranscriptGeneratedError extends AISDKError { + readonly responses: Array; + + constructor(options: { + responses: Array; + }) { + super({ + name: 'AI_NoTranscriptGeneratedError', + message: 'No transcript generated.', + }); + + this.responses = options.responses; + } +} diff --git a/packages/openai/src/internal/index.ts b/packages/openai/src/internal/index.ts index b2fca011ffb8..c14303526866 100644 --- a/packages/openai/src/internal/index.ts +++ b/packages/openai/src/internal/index.ts @@ -6,4 +6,6 @@ export * from '../openai-embedding-model'; export * from '../openai-embedding-settings'; export * from '../openai-image-model'; export * from '../openai-image-settings'; +export * from '../openai-transcription-model'; +export * from '../openai-transcription-settings'; export * from '../responses/openai-responses-language-model'; diff --git a/packages/openai/src/openai-provider.ts b/packages/openai/src/openai-provider.ts index 8383bdb101f6..4fed89d43d7c 100644 --- a/packages/openai/src/openai-provider.ts +++ b/packages/openai/src/openai-provider.ts @@ -3,6 +3,7 @@ import { ImageModelV1, LanguageModelV2, ProviderV2, + TranscriptionModelV1, } from '@ai-sdk/provider'; import { FetchFunction, @@ -27,6 +28,8 @@ import { OpenAIImageSettings, } from './openai-image-settings'; import { openaiTools } from './openai-tools'; +import { OpenAITranscriptionModel } from './openai-transcription-model'; +import { OpenAITranscriptionModelId } from './openai-transcription-settings'; import { OpenAIResponsesLanguageModel } from './responses/openai-responses-language-model'; import { OpenAIResponsesModelId } from './responses/openai-responses-settings'; @@ -112,6 +115,11 @@ Creates a model for image generation. settings?: OpenAIImageSettings, ): ImageModelV1; + /** +Creates a model for transcription. + */ + transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1; + /** OpenAI-specific tools. */ @@ -234,6 +242,14 @@ export function createOpenAI( fetch: options.fetch, }); + const createTranscriptionModel = (modelId: OpenAITranscriptionModelId) => + new OpenAITranscriptionModel(modelId, { + provider: `${providerName}.transcription`, + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + fetch: options.fetch, + }); + const createLanguageModel = ( modelId: OpenAIChatModelId | OpenAICompletionModelId, settings?: OpenAIChatSettings | OpenAICompletionSettings, @@ -281,6 +297,9 @@ export function createOpenAI( provider.image = createImageModel; provider.imageModel = createImageModel; + provider.transcription = createTranscriptionModel; + provider.transcriptionModel = createTranscriptionModel; + provider.tools = openaiTools; return provider as OpenAIProvider; diff --git a/packages/openai/src/openai-transcription-model.test.ts b/packages/openai/src/openai-transcription-model.test.ts new file mode 100644 index 000000000000..cee508de293a --- /dev/null +++ b/packages/openai/src/openai-transcription-model.test.ts @@ -0,0 +1,182 @@ +import { createTestServer } from '@ai-sdk/provider-utils/test'; +import { OpenAITranscriptionModel } from './openai-transcription-model'; +import { createOpenAI } from './openai-provider'; +import { readFile } from 'node:fs/promises'; +import path from 'node:path'; + +const audioData = await readFile(path.join(__dirname, 'transcript-test.mp3')); +const provider = createOpenAI({ apiKey: 'test-api-key' }); +const model = provider.transcription('whisper-1'); + +const server = createTestServer({ + 'https://api.openai.com/v1/audio/transcriptions': {}, +}); + +describe('doGenerate', () => { + function prepareJsonResponse({ + headers, + }: { + headers?: Record; + } = {}) { + server.urls['https://api.openai.com/v1/audio/transcriptions'].response = { + type: 'json-value', + headers, + body: { + task: 'transcribe', + text: 'Hello from the Vercel AI SDK!', + words: [ + { + word: 'Hello', + start: 0, + end: 5, + }, + { + word: 'from', + start: 5, + end: 10, + }, + { + word: 'the', + start: 10, + end: 15, + }, + { + word: 'Vercel', + start: 15, + end: 20, + }, + { + word: 'AI', + start: 20, + end: 25, + }, + { + word: 'SDK', + start: 25, + end: 30, + }, + { + word: '!', + start: 30, + end: 35, + }, + ], + durationInSeconds: 35, + language: 'en', + _request_id: 'req_1234', + }, + }; + } + + it('should pass the model', async () => { + prepareJsonResponse(); + + await model.doGenerate({ + audio: audioData, + mimeType: 'audio/wav', + }); + + expect(await server.calls[0].requestBodyMultipart).toMatchObject({ + model: 'whisper-1', + }); + }); + + it('should pass headers', async () => { + prepareJsonResponse(); + + const provider = createOpenAI({ + apiKey: 'test-api-key', + organization: 'test-organization', + project: 'test-project', + headers: { + 'Custom-Provider-Header': 'provider-header-value', + }, + }); + + await provider.transcription('whisper-1').doGenerate({ + audio: audioData, + mimeType: 'audio/wav', + headers: { + 'Custom-Request-Header': 'request-header-value', + }, + }); + + expect(server.calls[0].requestHeaders).toMatchObject({ + authorization: 'Bearer test-api-key', + 'content-type': expect.stringMatching( + /^multipart\/form-data; boundary=----formdata-undici-\d+$/, + ), + 'custom-provider-header': 'provider-header-value', + 'custom-request-header': 'request-header-value', + 'openai-organization': 'test-organization', + 'openai-project': 'test-project', + }); + }); + + it('should extract the transcription text', async () => { + prepareJsonResponse(); + + const result = await model.doGenerate({ + audio: audioData, + mimeType: 'audio/wav', + }); + + expect(result.text).toBe('Hello from the Vercel AI SDK!'); + }); + + it('should include response data with timestamp, modelId and headers', async () => { + prepareJsonResponse({ + headers: { + 'x-request-id': 'test-request-id', + 'x-ratelimit-remaining': '123', + }, + }); + + const testDate = new Date(0); + const customModel = new OpenAITranscriptionModel('whisper-1', { + provider: 'test-provider', + url: () => 'https://api.openai.com/v1/audio/transcriptions', + headers: () => ({}), + _internal: { + currentDate: () => testDate, + }, + }); + + const result = await customModel.doGenerate({ + audio: audioData, + mimeType: 'audio/wav', + }); + + expect(result.response).toMatchObject({ + timestamp: testDate, + modelId: 'whisper-1', + headers: { + 'content-type': 'application/json', + 'x-request-id': 'test-request-id', + 'x-ratelimit-remaining': '123', + }, + }); + }); + + it('should use real date when no custom date provider is specified', async () => { + prepareJsonResponse(); + + const testDate = new Date(0); + const customModel = new OpenAITranscriptionModel('whisper-1', { + provider: 'test-provider', + url: () => 'https://api.openai.com/v1/audio/transcriptions', + headers: () => ({}), + _internal: { + currentDate: () => testDate, + }, + }); + + const result = await customModel.doGenerate({ + audio: audioData, + mimeType: 'audio/wav', + }); + + expect(result.response.timestamp.getTime()).toEqual(testDate.getTime()); + expect(result.response.modelId).toBe('whisper-1'); + }); +}); diff --git a/packages/openai/src/openai-transcription-model.ts b/packages/openai/src/openai-transcription-model.ts new file mode 100644 index 000000000000..5720b4932081 --- /dev/null +++ b/packages/openai/src/openai-transcription-model.ts @@ -0,0 +1,259 @@ +import { + TranscriptionModelV1, + TranscriptionModelV1CallOptions, + TranscriptionModelV1CallWarning, +} from '@ai-sdk/provider'; +import { + combineHeaders, + convertBase64ToUint8Array, + createJsonResponseHandler, + parseProviderOptions, + postFormDataToApi, +} from '@ai-sdk/provider-utils'; +import { z } from 'zod'; +import { OpenAIConfig } from './openai-config'; +import { openaiFailedResponseHandler } from './openai-error'; +import { + OpenAITranscriptionModelId, + OpenAITranscriptionModelOptions, +} from './openai-transcription-settings'; + +// https://platform.openai.com/docs/api-reference/audio/createTranscription +const OpenAIProviderOptionsSchema = z.object({ + include: z + .array(z.string()) + .optional() + .describe( + 'Additional information to include in the transcription response.', + ), + language: z + .string() + .optional() + .describe('The language of the input audio in ISO-639-1 format.'), + prompt: z + .string() + .optional() + .describe( + "An optional text to guide the model's style or continue a previous audio segment.", + ), + temperature: z + .number() + .min(0) + .max(1) + .optional() + .default(0) + .describe('The sampling temperature, between 0 and 1.'), + timestampGranularities: z + .array(z.enum(['word', 'segment'])) + .optional() + .default(['segment']) + .describe( + 'The timestamp granularities to populate for this transcription.', + ), +}); + +export type OpenAITranscriptionCallOptions = Omit< + TranscriptionModelV1CallOptions, + 'providerOptions' +> & { + providerOptions?: { + openai?: z.infer; + }; +}; + +interface OpenAITranscriptionModelConfig extends OpenAIConfig { + _internal?: { + currentDate?: () => Date; + }; +} + +// https://platform.openai.com/docs/guides/speech-to-text#supported-languages +const languageMap = { + afrikaans: 'af', + arabic: 'ar', + armenian: 'hy', + azerbaijani: 'az', + belarusian: 'be', + bosnian: 'bs', + bulgarian: 'bg', + catalan: 'ca', + chinese: 'zh', + croatian: 'hr', + czech: 'cs', + danish: 'da', + dutch: 'nl', + english: 'en', + estonian: 'et', + finnish: 'fi', + french: 'fr', + galician: 'gl', + german: 'de', + greek: 'el', + hebrew: 'he', + hindi: 'hi', + hungarian: 'hu', + icelandic: 'is', + indonesian: 'id', + italian: 'it', + japanese: 'ja', + kannada: 'kn', + kazakh: 'kk', + korean: 'ko', + latvian: 'lv', + lithuanian: 'lt', + macedonian: 'mk', + malay: 'ms', + marathi: 'mr', + maori: 'mi', + nepali: 'ne', + norwegian: 'no', + persian: 'fa', + polish: 'pl', + portuguese: 'pt', + romanian: 'ro', + russian: 'ru', + serbian: 'sr', + slovak: 'sk', + slovenian: 'sl', + spanish: 'es', + swahili: 'sw', + swedish: 'sv', + tagalog: 'tl', + tamil: 'ta', + thai: 'th', + turkish: 'tr', + ukrainian: 'uk', + urdu: 'ur', + vietnamese: 'vi', + welsh: 'cy', +}; + +export class OpenAITranscriptionModel implements TranscriptionModelV1 { + readonly specificationVersion = 'v1'; + + get provider(): string { + return this.config.provider; + } + + constructor( + readonly modelId: OpenAITranscriptionModelId, + private readonly config: OpenAITranscriptionModelConfig, + ) {} + + private getArgs({ + audio, + mimeType, + providerOptions, + }: OpenAITranscriptionCallOptions) { + const warnings: TranscriptionModelV1CallWarning[] = []; + + // Parse provider options + const openAIOptions = parseProviderOptions({ + provider: 'openai', + providerOptions, + schema: OpenAIProviderOptionsSchema, + }); + + // Create form data with base fields + const formData = new FormData(); + const blob = + audio instanceof Uint8Array + ? new Blob([audio]) + : new Blob([convertBase64ToUint8Array(audio)]); + + formData.append('model', this.modelId); + formData.append('file', new File([blob], 'audio', { type: mimeType })); + + // Add provider-specific options + if (openAIOptions) { + const transcriptionModelOptions: OpenAITranscriptionModelOptions = { + include: openAIOptions.include, + language: openAIOptions.language, + prompt: openAIOptions.prompt, + temperature: openAIOptions.temperature, + timestamp_granularities: openAIOptions.timestampGranularities, + }; + + for (const key in transcriptionModelOptions) { + const value = + transcriptionModelOptions[ + key as keyof OpenAITranscriptionModelOptions + ]; + if (value !== undefined) { + formData.append(key, value as string); + } + } + } + + return { + formData, + warnings, + }; + } + + async doGenerate( + options: OpenAITranscriptionCallOptions, + ): Promise>> { + const currentDate = this.config._internal?.currentDate?.() ?? new Date(); + const { formData, warnings } = this.getArgs(options); + + const { value: response, responseHeaders } = await postFormDataToApi({ + url: this.config.url({ + path: '/audio/transcriptions', + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + formData, + failedResponseHandler: openaiFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler( + openaiTranscriptionResponseSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + let language: string | undefined; + + if (response.language && response.language in languageMap) { + language = languageMap[response.language as keyof typeof languageMap]; + } + + return { + text: response.text, + segments: response.words.map(word => ({ + text: word.word, + startSecond: word.start, + endSecond: word.end, + })), + language, + durationInSeconds: response.duration, + warnings, + response: { + timestamp: currentDate, + modelId: this.modelId, + headers: responseHeaders, + body: response, + }, + + // When using format `verbose_json` on `whisper-1`, OpenAI includes the things like `task` and enhanced `segments` information. + providerMetadata: { + openai: { + transcript: response, + }, + }, + }; + } +} + +const openaiTranscriptionResponseSchema = z.object({ + text: z.string(), + language: z.string().optional(), + duration: z.number().optional(), + words: z.array( + z.object({ + word: z.string(), + start: z.number(), + end: z.number(), + }), + ), +}); diff --git a/packages/openai/src/openai-transcription-settings.ts b/packages/openai/src/openai-transcription-settings.ts new file mode 100644 index 000000000000..f289b603bc08 --- /dev/null +++ b/packages/openai/src/openai-transcription-settings.ts @@ -0,0 +1,34 @@ +export type OpenAITranscriptionModelId = + | 'whisper-1' + | 'gpt-4o-mini-transcribe' + | 'gpt-4o-transcribe' + | (string & {}); + +export type OpenAITranscriptionModelOptions = { + /** + * Additional information to include in the transcription response. + */ + include?: string[]; + + /** + * The language of the input audio in ISO-639-1 format. + */ + language?: string; + + /** + * An optional text to guide the model's style or continue a previous audio segment. + */ + prompt?: string; + + /** + * The sampling temperature, between 0 and 1. + * @default 0 + */ + temperature?: number; + + /** + * The timestamp granularities to populate for this transcription. + * @default ['segment'] + */ + timestamp_granularities?: Array<'word' | 'segment'>; +}; diff --git a/packages/openai/src/transcript-test.mp3 b/packages/openai/src/transcript-test.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6a4cf7b67483c076cebf897b5456f64d0372df90 GIT binary patch literal 40169 zcmXt=bzGD0_y4z1qX!HaNDLS-($OJZ(u{5#-AD^qqq}o-H%Nzqba#q?N(+LD3W@=I z;r;pj&L8*v$L_}-=Q_{pT=(lb*V!~wMDPK(#$jY=sCsj!1OV`~Z3CUeqy&W}1cgwj z|33ZS<@#^RFaQYEaR+lZEyNu)l{!XO3IR0M=Ns{{S#-mq*&Rf%_6Sr1e0MbuO^Kyi z_$b{7NRi>on#EVjVdXB`hBwK8BV$xhUI{5?p>`v&GZO@H3~%iw3u4`l0)Cop3@Rn} zetElbio;<6_+WK1LJcw^F5Sxwnk9$~6Jg#~6Uox-(xQ3lwsN_SF6jO=-TjtM+^E^ z?cKwqJI|{~T)_a6aT$R06nG>`iH=$VkQM4SNFfZKs-ImBu*n8elQ!d(*{MQ_gNKAV zwObjiW2NnBI)qY>#E-g9lbh8kEU>J|%P|_J{y@!uyc8 zn7)>F%{cqeAA|(%HPWJHhH!V zP9XG+c$pUVbI&uK78KxVQZCJq)E(WzUbV=kG#RhSIz1WPF!ngEx8e_@IWa6TN+Xra z2&15aRI9OfR>AtZagb)gf_DFaV8>uI#*H|kJ?g=XK@vyTixdK5`3jhtf6EC;dGnWsb%8eFBYiit?+o3 zMU${8OLt3%6@UYVLZ)R?hA11@2BIssC=Jh82_1f@_X7BZdCDj#4OB`>mfsD&gl*yO z^h8dXGph?G@#pdgQkzguSXiG4H;^7ny;m@p0RNn6dwXwsHAkGI74JBwmB90^7%fgp z#D~X%aL(q?LVP!DF!FPun8QTUgWtZ#36>>2dQHbwF5E7}7e^}%g#`v#m$gR?tc=W= z_K8^!udk21K2~PA0{CDN*sazbiQZTIw%+6b2`rDAzy>TwI)_;6&6jq z7MU^KYMvSyDN+xWXjFg8RmdM_+Jl0#Vk3>s44|AUo{P}%Faj)_-()>INxLw=C#Iy$ zIg{8F0^7hF&k^im@MMXi$*A6^tR5Opo&O`+#saNJN;AsLN*UFN4nCN-TyxiXq zWLHM|YH)19ij~SkHtq%o6n%UNxSN8CT*l^vfeYY1+o>GPvEP`q6QSF+(Y&NS0;~>z zgr+V=_5TQk=8Sk!kjbVE@d??nMR7wos_SD;0~~^KqTQ2e9wRA%M#H>lZMiv{y|bM$F)|;nbr18Sxz_NnGH*U|8trGse7_U)d!|E zL4Rnwjmf2rEQ~IaQ};g^9c`mwp;Uk3Uw&&zt>Vbe!yTf8DA`1c)q& zY`ruF0t~$0`}%{^toObkY0Llsky=YUIF2flob(^IEfvfg(o;v4Ut8Qk-{Wg=-uKy- z>D~H#G-;NJ(#Z3TT6=@ok@T%{>v0EEiYrRkq(orNq5+)4r{%Bm+P~Jzh&g%8zW*pV zHL;Cf8`mTx%3)RtskKw6{v9R~DvrkbaEy}<7sr(PadQd!glZvBj`m|4d0cSZyyWCC zs2o&j$tI?ht#G#K8ErJVI7QZG?4+Jc$0PxDa&+EWC){}5nm?qHjFfy*R`l+#AwB>| zLh4N*+$!#99B68x!fV{Mlc?%dYDi{IVJ=BJ?@DGbCi4~FYj7)a=snoWCeRaZO+YnrR1YbW~xIY@IMyKDKD zwPnz!$Ji4yere5Ro@2P-)++(b9AzmH>VFjfI`v0_nwjhu#)GzwM7#^ha1E!~_S~(^mx*Vj{X98Gx)=6?Uaf&?2B~5(8&Td^v;l`j!wGmcdhltveq8`q#|CjXDKF$ufpSX~0+>zTm%iq5Me3Y`Q-s)aE{JbefKA zC|>#rA(yN;U!~#uf_b7kZzmq7H7a416hr9I)?l|@%U5@UEq03R$+yttb>7BOkCO!= z+oF_#I{zeCqKc#4V1+0Dga|cDj;W;YEb@>6d0yxU733~M-`o?4@Fh?2$|Hq#y1F92 zzc}Z!p6Z%?+uiUn>P}x2YZmk9YRDUrsjDmM3Dty?XAJiLR!)w0De5iDa|U@q1SmyN z%#HW~iVZ(gY3p~JL{_FP6Lx#=6b0oBwO#ZNonb&-?6FUbr7Gij-e?>Ijxt!x8hTG? z7u8Qd`F|EzcyldP<;JGs3`-LYn!brw7K}2lZKmqG4d(lJWjcveA>*niqoz1czR-ks ziW+7WjQR8gRztm=Fwe>J^kJFqv68*plQc>30}__Ws9J5$=dozilmElVS%x&EfznQi z9I^BH_(!1Hefm$oeqyJKpG7F&uUeg-fHCD{&V^(xsnmzqdoFR#4243xrA5gSO2`{N zjRew1*Yf#6k-sBJUifJU)V6jsjMt66xW4{7LvBeS@cjDvhx%GnNdN$R115#Ntd6XH z9QBvZmWFCNMwrSVXC2R6ctx0w8w2fFgVC6S(tNY9V*>^}R-RsBfd|c-?JU{~27#Fz zYdeS#)5(&_^o+$Ck!U(^_Oz?}aMMD!O8=ZW;f`pV6VHw3s|YELkiwX7s7w}I`;jLiM~S9X?&DEkYqe{pHb5dzY9EzuHTfd)u z3Sf6j2m)LSaO6(tm5KYx=a`zEm&%m1V22!vQj`>(zgj8z=Nn7ps7`7-%PZCE-S!es z(6ihDT!=hqfK(QK`u6KNV{i9Df36OV1stF3Wq~w>HItH}fpEFKt||%mGj&h{L9>kx zHwJPpB_Kuc+Pj|+GowpB!pL-`5nqCNVxzN~At0l4$HG*nP`(^Y_NlwfG`Mo7PhX9x z+tA~0h)}>3PsS@~pWD~x*^$cE8N)Pvr+-c+9hef&Pn=UELu$C}w zv7m%0M!hC4vo36YiA{^Q!Z(%ctdJ{PxMlp>d?wd|^;xr9yx?y2ku)(!nZmPoFXyj% zKhAv#^m>%0SW;X39~1zLoR@&#k&DGmk)QFh$Sw`!yXr^gN)piF^TmZC5axiUn5Win zx+O#xic7S<+u*o6dCup2)YJTut@v9QI8e=&^PS+IS7N%L%@U$@8HI7;e{iKN&|5At zAc~=eWL`!s96JzHbf9Jmvdy-Ul& zFX((&m^FaX;Wao(n)(#**OrL;P?QZbF8HxDly`_xcIWH8i{HH>=j5|@EZ4Fk-EsR# z)0u9;6c^D!eb+8o?L9KXOEgix;zmc%0HCi8m<{zPoe+-+Akt5Ff$15X#E(!Aawh=97CJi8I=wAlLP{zM_YZ8S zwX^QwrWeykxXd4-&$6@jTWgnrr2F1nGE7OJJ7*k`++tqw`F|v=%U(ZguY?M}JT+vO zxPo(`NBVE^08RbP$QFq2=dZ?er5&K`mwI>!V;M`-yFz$z<~0x9V5z^f2tq5}6TCfd z3BiE50rB^AdU_^*H-EJ5pijwZ#UhpUx8|YUTkOPsOKj2~78U|{6q@-~eiGBMfD6@D z#^~)cD@dQU8Y>Y&`xBG^OHCtVM){0c2Fn$%{~+^@P>+)dR@xJe$Hy4H4y0D{Qev`x@C*j-bf>Xr~~ zXvRSFJ%Jvp+`wk%g3kC<8-d)#mW_c_KlKVP(XL()=|WK$rI`%2RyMF8+cN(#>VbjV zq~@e<7(RQb56JdWvA?DijYsyE{v(9KVT)-z{BV7kWO99dJv_{Y1=?pwQjphy0A2U{Rb?ar z^aL3ML*sdyY@oC|Qe3Dd<_!K<%`3mZ<=E&%KX#Kzn+s4YFiUlSyrkykkQZ*fyX+?v z_0e5mlL5;>n}ygq-CX}^Bpm$GC7d?s_5Ck|eb)~J|200XJ0g8$`*^y>OyBw0&xA*g z9&W8l@r3yF6EdqGdi}qQ)KW$Cj6aFHUm?GI(r~jeo85<_Y$2Mrfd#!cn85)q(ivr# z)`TGC^Pj%EqC@*2|XRMiY={_SM6!}Z^oFZ1UAfo^`=}tN6EHo4GJVx7RDf^a? zG$4H>QBk1DDpywM=qS73$CmuxI^Fty~^Y!P@CVG*y6*%OOK3hF}Qes1!>%(9-1~`JuVtIcxhL zJ35X!W36-LkIvI ze}izKw5*bow3?9RKuCx{?KxOAO#hhdhF!XlJWaLpEG|P%SN9% z=?_BSoCZIi@Y^J&2(rk0cd6Bwa#Wn!QVc!9sY??QRkv6=?|&%n5ZgYQyS_e^=;-5k zztZ@G_d|aD8wJm!t=Z)))0(VCKRKxx81GLQc_S>;NmU|{%-n6*WZLAz%>=wYuhA`q z;A0?gVf}spfYn)bKme!6kS-?>SDQLnI6a}r-7|nrXAUD!r;TwYw6dj%jg%O?CFB~K z$((p!N5(47r@1DeB0B~A)EoEsZ-}10DC(H4T)BF2vX_t2oRiDmgIiZ&o)q~dan9^e z*tUh1*6+xu)cPI~`u=cX-}SHkb<#!a^tm=+YegagmBkgZ*-UogvGGW?D3yS+ zQ_gI;w1g8|^UiM_{?ofTc)&D?-oomvgx*FLBta~pS+|LkDrJ|EBAeS#>A}YFjErmZ zJkRc_-%V3&Cl;GGFjq?m+BfD{xcYXcIagYcm<$)KMj0MQ+cz~6d0&xISKz~H0v|>P3 zVKLy~ObG&MoEi)-t{ncvqC)K#O}aY4GY?c{Hl?8-4qp#}MS~Rvqj=xv^<&C059}3N z#8q-rRh&>M;({3E`g{CM%-15p3$qe>?yzx|3|w}cXb86~kA zI{ZDkFPf1R9i*ww$TytA{Ipt3IhB9Q60MEDCRp;(!5j z&6c;3Ug*FtNj8h5E{6bs01)CLuGlebkV=OlrzV!S1G_I}y7X-MDQ&ASxXn9KjY~7L zYj*Q#mP5&8ivBC;H$a#(hMtdEwjEw={nHVi%!!DLtX$aY=ASgH6ykofWA^X?FqF0E zdU@8;C=}7PFoI`06w_?$C_YZ<%1VdwaH|u&t_u82<3DA1eL%-bw{TRmT6rVM@0V0D zdUxq7#12;9!Uq7T8};WELFgu5k(|A8yLGQz;kXfNz@YQxJ3Qo|W;9_FVNF^Wlf#du zte-OhmSKNi(JG3pH4z31QA)wLa_4@~R7Q_r3tIqcj~V$XfREn15h8u>emCUBMvChogIg%r{*?RcRqMUfk_VH!oo6aRxvMn=S^1& za_3GX;C;FfA82&q*=0ID(9dua&beaHHZvMFcm3~{;@EPJ_;xMV);9)XjRL(z*sf60 zq}Ia*?SxN}^Sulc5ay>;u82mSitON4YHybjy&LXM&pvTO&3E2{`M~%9YNOP$>*$w~ z1IKuTOiXZGSU(YLV%Bk!;w@YSJQS~Ct@kd{B5SJV;eN&D{0a(djB_*6wQ~6W)uF!l zP>N65h;`ajfirYzX<^UOU3;&|>|RI)?#OHQ-PJYoPFYZ0NI6%^`>|QUPmh}GSJhK% zu(8ByiF=OwyASE>njm64V=^3J4=2 zLaT66QTMcmhW3LrLDjY7#3YY16N~4D53Qp{+?dLr?xMbh#n2cWsVB2c(0#Q2&gIWj@15Zn$YDL5yi{8$dsLVl}GQjKzU1O9&3~uE9Y5mHTj^9yC944 zt=`dthFa*!9K;=LClpxpFyc45`k}IPM!vKWBY9mFp~>EFd$cm|N{pS*r0g4q{(Z?r zLOfN7TYF=^nW0-I)w+H1mcS#h=|pT_;?c`%GU_S zDv9V^es-quf0*^;wAT^B#>-W8#d39lUi|uja>6j6PTIm%RKf3$ZwuR;QZ`o ziLWf|zz{U#CzR=4p0#e@FKT6O>MTjCw7!s9IXz{jChn~g&KXbb2-t;}Up%3Sa?ja) zCeCl_w)uLL1Da4SoYN!fQUi&_s+;-(tXAnQO)Xx1F}l}H$~XdHz0=L+w(U%}Gh?B2 zfBP-lpPd&9o#J_#bZ2?xgHSG2hPG;@g-HWc3nQ98MwmzR71;>ah;~y6(&g_mgMXHc zUx-V4M(4S0UhO6;F?Sq59RQe24N#`uWlpcne4)l&ESX{c9$pkzAkbhC$VN2sjk-#C zT@lrW<{(i1Ay{hkJusdxC~6y^RvqWilhhtD$Wc8M>UvA)B0P6HRoGKDb4a7P;-nU=`7e{WztigML3;R$gVGB-W+A+aW)S(r{tcY#w=GW-Yh zg`dE-Z#L7e|991_VLM2^wd~+f_=pHdIMXasmV=aIglG96^U3S>DqqjzurpE{Cg&6p z5i9bsbnh>5E=zCvK0>_g<#M`6cfkt4JD`vnTl}r^eD&p>H09rmTfdc91la0{RQ0(T zyABOGXTAgs>mlg5%ieLpYksbEP1{qcd85xClU<{rf}XFq{e4Vo$QReu=UeB^SOx1F zqt0!dn5xR!qo{taYtL_UYDMhE@t<30L$LZ399z|z(s0C<1ubHg>PoQ?8QqTo$f&^8 z)=b)71I)PhRZ9lcIMz1TxWbAPA`4tqm&JI91lqhEL@>A@*x@!0-&4En6?#&Fsy7<@ zX`9tCg|%6Q_3sP%Qa!&rmK&QerT{F!pAGT|OU|;T_2c}QHT~#@9e8HjpD`5*GB_KQ z#1fhkcBmdi{d}x%wmMhQxwK7zwkkv}K3prx{+Sa?GkHts6JX8J^)8vv=P5CvisD}_ zUC#HE+$y$fjFaDP%{f=w7n-UY=v4BBleSNcUsn6J@*S{^#m=s_S)(&-BXDuS73&L= zLC*RQx#C+nVTKHNF&5f>czhXJsj2vv!w5HHll(}de?dHKG;CA`5fLWPSkv)Pqw$@| zsJMj)#%AbSP2mR;gA7tZ!GEsiIlY0rp%Ws@WcCqhZL`9q;SezBuW^W4j7-1J9(Xz*LIrkk^ue?d@8{W%0mklKC+G-dx?o0>Ha9*mUpS-jzPYi8 zdYAMI;m#@TPI({Smf%?7sC*&2FDBgI>x{Vhlp3-`c*1HN44Je~H_ekAyOOOD3#uCN zZPFs=5nLqtz6BTyTit$B{+-&rxC)s{kjrdJ=GX7MwRQDZDwy2EsPV#PV!_8Xt*m!> z?%%B<|1pwE!&`i_*?|#+!h_?Ig4asEVMBSMRrCw-sgg0cB&FtZk8 zNhO!-VS`>}4z_RlKD8-nWZUnVuf7Y3i3F>tQwS;h>?9;PVonFFuO4%?X5nyo&sKZo z#Y!)bI~zx`)hfCLjfx$0| z5k-qd;IFlGe}55NKI!5*1OSThFb0>f0W*0p3b8zd|4M7vu?WEZtA4})L`tYk4hOp; zt0^3%FT`i9V@+d?Rk_g=YWm@(jD7Z?Lo(7NF?S z8%HclC_MHljQym8w&#_-k=0xGmrgh4%;n_#YTv0`u+B!J#QN7#qaP_TQTRwKv)xsc zDfhCwxD-z&4XHL8HDj}p){{ctYX+QoMJKq}b<%#m)4K2a`gOZ-jQMM0fVnFct{4}y zU0MRfhBKrJOC>2G;W`rd<{}7P5O7q9-F6_xh)PVw1LXk|65S9Xhp@Affw&@4n!)%4 z)QZIhfuV}&)Yy42*cN;RX{-vKnw(~k{G{4ba!cql5EjZILb0E0aoEJo(?O5hvY!oa zL95k*Npr8Ow!7#^$e;;s@o^+VXzKG5MzUu5?+)_bTgCJJNH8sAchW&R!~9ka+}OeQ zFTNT%med9vulM=dJ*`n>uLELV*Qpjr>38+MiClkF)Oo;u(ywdSv*aP4#WLegTA zKFv>(3w)sI`x@$a`H*4h*-md7JVcTzeEam-X!~-e{Pp$5GusG=n}z{|4N-ju=EDJ0 z3@vtfr+hf>?O$tlGo8Z1ydg-Z2^STr91;T@ih^yLx)JAaCmj0?vSkN`OeTiMiCYTl3QSD2C=}BG=G(x6&>sPc7+wi4N6JN6FSKRaYo`GPUN)zzqjz4rlQu*i^(4L4`< zRE@*@StIp$@#?dyCi4*9N`B9r7zJ(a8fby@(Ic@(!~=cQ@n%zIYg2cxuRr`Rve(zo zWr8<5rv>#lR^ArGDK%+R4M+|I$!pi{L1)mW+K3VC8nBVfVGzPZQab2i4f+5J21;Rz zX>z#Jc*0?gAhe&|>Fu4NXy$^qb* z*&Z#oXjgiP4yrkvOIlqWGdP$6v1NDCw4%5}8cDCKwx!S#E;Z9lAansf*ZAl3+C+F= z!2jK&DSr>6_tw|nEUM7Agck8I?U67_Kklhds9l23y+>eMe%dfEf`B%_MIj%*dUb?` zwDd@D!VYUFf~(RpeZ(*I#CR?QQ3@-O_eh`*KW?_Wqv+Z7dF zMXfxV%S85FUte*q`xJX*oVc*#8I;_ykrYG5?5bh zYt$2^#&%062gD{7Pj~l`!%(F#qTpR`brfe985xd?0#5ABb03&$KiE$$jcO@rYcp<5 zrsm|QO5@4#22@WJOcOD{c)G*G?tOac{T5D zVA!Aa1bKoV<_T7io8}iF00PQBASD_t-2hZUhQ0eg85<3Hn+?DZ&4eDWdxL2+^JC!i zCBZ4I;VmUX&}i58Maov4Mooc-oK6`?*E}3o3Ba`y5)ViztxN=A7wCuGlbxH=-p&n+ z8sBXgb?hwSYgFcLrGjtcL^wN=FCU}do8s!Ry58K(qgsQl?H9*WpMGDWw_VBmE;O8+ zC|*A6!M2FJDq?0)6r_luirM@2cz73EPgGQ1$#0i&bksBb{#VIR-UB<%WAPeC(X>w~ zXUzvm+{VT{<_WTT^ye|zk&ZGa6%{fwld#CX#E_-ddpzad7*m2Wp4QEs_ICD350~fH z-`a1)2l%FZvYE{#PU1Dlj&=ZHNPpZVqGkn>c18gJ6@sWH%P&^#o0AGVFw6x(NlMH3 zE2n3pd(R6zkijSQ-1bb*^A1B2-#^`L*J0XIf(&n+r+vRlj=Ahce=A6mD}>6FBo~4r zQ*T_<4i@YI?afT|ScB6jJ}zXCaa;?4&rMk&PRWwgR!e&?Th$xF=_X0{ljK zat&a^jr(u<_b5wruCjz&L>J|!O}Ud!2s~M7{XF`fJy8~ z6~{><`ajR~GE*;=e!63iqE5-${lvFMttQogziv`{_K4MOM~Kh%&tfz)xYWag7#zhK z#TSquDhu@)aF5QK;1H2^aePcjrln{)`_Ab;&CF9W`8yQ;0%z)RxV4U&L>4p4)&H&C z{}|O)8;JC)&tInIvHTJZ34qMp8~o{lG-wXQ-i)}?_Q0I6DiSk&0YOr@OOT)yNSxVW zIk9PN9dHA8Rc6^xV3^uJX{gRw>1XO{5AYTp&(~ly_aL zH^Moa5wQ8=w`R6;axi9X$#%A-+rip9&gmXXW8#qq7vS66+1HvaiM z=VLK{WDFn#_?ocO-P2fJrd~mnQl;AiI8x*A8WdAF=qXt6Q1e?ZkO-Bj&=2i2A&dzDeYfz5*M(kh3`piioh4ka~n@3sdJz~3WD-jOA-M^g8G+XC) zN-X{~O76!PLo+F4phDCf%mx$ke=2{46h$iluuyG7qG-xov``Ru#)Sku*ojbkdV4d{wK%JZ+{W7)eR$tP(7PvAA%2tKi9=rfPm8 zjpklsE~ocLxfT0nZbCO-(?|<`7kRpLRM7)Ihwg-_?;mWa*G;=~cBdUHIM-T_^=xyG z>*80Jg~ETCE3GyL{i8QQ?~0K-0`L=L!!9QrJr!h92Uj} ze@y}=%;Kam(-EDFm21@0E&Z+`xQ(HWbhjjl8K1v7iVL#?D8OFlHNUs%?7o!B7@1-! zx20xi-cA!Yl~=)$j)mK>Rym3k3(Q=J+mgIWKiU=8;OIV`qqO<7QnTHV2}|d0#CoU` zF9%q{cS!7I^5Z%0klFO^f1l9PjrM;jxBTMV9sz)w!_cU0YU6Ey{Z;-{`?s`PLWV%< zP#sQzrUlI~zJ00=&Tdhn-+yh+FBEuD#1Nv9R5i;d(~so22XkWbH&2^_H9Pt?G&%Ed zwHk(5=aI4ReL?n>^bZ_jg%6OvvJX7{-0=v;M#vQXgLY)~!;-kepU_C?dx^Z`;8J{_ zU}j=I1PI-hS>i?xL+H8X_6?lWQHA&ppWnbEwn8I3Kf}8@8a(~|W)um9uDN2ilpchVi4x3O&@@itFlfCLfn6peBvs1JX}hOJ`2&TLVfaZW>8#oBRo?Xs8H zn@!snQbO5D%Opdb-&CC3W9(ZX*S&+B~G-kSoD~I=7{K3ThnF7K9AcB=d86m?en%0?3>0K&+VPSX z$fUV7nfB|1SFIiFqK_Z_h<%fnSHtiAWr^7>vP&!0LcafaCc9obyw7{%tqvp^T!37x z_Is*IlByhI*J>HpD%I$&kqGn1CyyQ6@(|ZOWIq|;T<3z{RVm|8MsCnCQ-iSw6wq)T zH(4wp6{O@1?+kl}eOmVKUDyZ-Y;3t>ehgw;)7^Jb3vg&i*tXLz22TcYCb8k?_aZpO zASK$`?B-lg3~~4{FmYZAHHbTho5d&86+75}7z98PD8ph{FsH6{W0er|QHbbFWO#&j zQglF(ig^;ZvVn&{m`$yhKY*VE4kaL^N|*Q?IzUdll~_|u7*u&cwtj#fQUHn!77xYr z)tV9s!yhGI-_-rDlQ$zyK1qo$*%QHYLL%(uwEgSu>Nf;V!QCY@XrnUgGxGuT(c{u_jRR z^R#vg5t}AKJD2{Ibt;6`d>HWQY}qoGh8k#;2_T=G9|9?r5{mrW9|Zd8EfH?0$Pi9uBJe0#Mtfj=`ONcSEYq+Y9q{?G&xOw& zw=hJD{PndTSUrvkjLy}h_ffANREtD)H|mV8bP$(ctmbEYTHgM-_c&+N5?Hv@LRZMj z&DfS9QAEfttHG@AFLv4cWOX`1r2R&)0DPw6d68I(^eeun}qC++~) z@8?5yCQ9Z*7`?ZqYDuNnTIOBTudf%?yQk^~)>{XwOr%gky9aG}$qfHN*5QlIZg< zqR{U5ndTLY%}fCg`<9LFjJ%K zXR9+@-box0+`lLNNp@n+Br$1tm(;=L^c2S)UpI1!OT;jSJUEZ%NTa%CQ(lYZ>PnL} z%~d05+Scfnbt&TIo1ajW#%S?UYbr2$O(n|)1p%-|{`rT~P-int`W}0YedQSqD+Ae`hdMuo6TQj+aEH|`I3^sd1_Bz1juJbn zA<_&%XlgMB2u8`m2{<@Z$;2P&3MoHM08H&`Ne|mSN6Y>ioe69;`LF2n2ya`X2#o!cM5qpG@uLqq2vH#lr|%aqajvMNaH-DgNQ4xUM%KL)v8mthSh>>9Z4B; z%ryU8M9O&odig+^cTnV6=4$0r78iaJVlxL~K^zz<&g|qCl^OUpc`nen<;Kl9g<2jg z`Ig|Porlz`^J-S!79xBKl_G{eRrc5?n9aA}Y7oyKNvi8AQz^!MLws}9vF7-rbR17n z$_T=zNU-Jvz)(3JLJEkNob#$^ELAEH$w4ITYFAEoYDE>sSgAapfT15g?`IUnL}q|< zI0;h_6X>@oHpO;XFTZRw2j;p}2n3#dbXqh%MmTEWd}6V(r*4+PaW9m0mtS1r@-8L5 zTp~v-U)zv&;PMI)N^zpoKju&!_1$Ar6w_&ac&7!K@rnxwMb_cl9KwYzoAZg0$L1>) zRXB#u2nA7hqxINmG=_&&Q0M*T8E$$Tg;~1A%BlbYc6@vafPg~=d?9y9d`z{gHd zd2-su>~ceB`ca4&yi-rZ*EFKk&;MKSvnSmecIR%LO<(Px=sm!{jo^2OPL7Hz>!-nt zn>Kf{Cpk#ZjRuMI`%wgaPnHM5_@zMu15LpRUY~sjqAayd^iGP5ASeDDdex5*QX?7n zHvC$j9CqtjEiHe%SM$sCI)9h1%q&8e#*2YWMivX8N&(;j;W4NxM}eY)q;8m)XhYQq z#D`h#Wp*QJdsU@x>;^a4SO~%jD;Z{)0yEw zf?>*T*eSf!D$Yae?JFWSZNqQRD5UaA9*>c+-8ndF?;K@%%AV*cnt5}4y|ARa-2Z^+ z2hxT{$%*SBt+PyE5PdLT!1AbOFBst>>goxLY-cWvp!ZqzXTBx$5=-L~pjcmSLwerK zC6F(l>ZSc~ZMO!#eCTKsbot(Qd4^VPzj4LfZk4c8!!zc`$2(aEPL58&(fV)z@JS<+ zKxNFb{QVNrxKhrjvl`bo?yG>cR$t9*;nI%B?l_wQW+gADMCao&oE6vD_gIxoaT?VL z(Ar)pGI_XrR3|#$ zK*%Xx%n0m&Hz5K(ZDHHBj5k!i_t?m6zF7BQ{Ehz}?6?y)O)@oDNAqTu;VGf4)8@<9 z5^ff*UnwqB`+q$=p}9#ECtIuv;Aa>&772(}f-#}^${9hSp##W<^i$1u4}SeUuX@;) z^iY0fHdFUmlZDVXh+HC1H9ifai@3)&>N#$kP;^ppggMfnKfx%wjd*Z<$4@wG1rO+B zZqMjY!Llx}^W`02POHebJhMKc=fDUt`h%9 zmLm1Mni-B9ac*EVSspy-hm+A^acw*k5jsyXZ+js6Uv6N|{qRLZU2$bLQ`vB_j|!%1d9$c*ws0tVion zsF?|sIZKDLeE0fQzOZGNn_=!2Wu%AzrRe>Upd0SLQ0NnkQz)LOq(FK!3<>xLm9P9T zkSjtf{_;CHNIuV36Hk3=Ib23qTb8=#`*XRP!^wTnPH8N4@O~X7l{7sG&7zsLA;IZy zJzG%;!tGfMT?IS2wq7Q;T|l7B`}x0MgP2H~RiRDF5`Hp+XKVzP2Zw?#6*STXy|F3* zorold-*$Ive*8{&L`#Sptd+>*k^Zdq)K2tsP^7G1^y|Ec*nX`vOI0Hy5h?)SmDw{8 z1DzE15qEVY@8Kyo5eZZOp%qAhD5!?GtYfgdS?i6T8&wUDT(8Y-4ov`z2dr-iX%6#N z2c7i>8;=>2sBt@`m$soxAT|2hsbGWhS*fYGePU4!a7X@_M_jd<(>I>!a-FVD?iA1D z2ai_Eh#M%_d!;M%xOJdMKJ<6G%O9{VWS&$BS7eFstvZR1p14Ml1l~!Gh#HCesTHCY z{z6tFInI=Y_R+U$MMbULoA(n-D#UwxjiX1MLOn7;WLr!hPp$zxUs^khi6Ugq%=!YA zq;Wj4uUokLzRrw|wZ^IwxnXoPIA}>Lh~rN9>u>wGhTb3wf)Unzh*Xc z!is?7r;ct|09h=C{F+&GfNWqq++0lCg-^t7lbnu}T!(XdAzZs@Yd1=%f~$%UUmexN z!K0(1R181myQ_J@R;279w(xj8u3XP#u&Qgy82^AuCv z0o8Pm3$9UtG!oSA$z{iAzo`(5ret4*?a1(xD9=pqJL%`fwM+IXrQ+cfp?)QzR4(~MuoGATMRgC?ad^fjHi%HEI}NR=ja&>#FI z#+T2Gj|}_M@2JCUP+s{ptKlo;m6x2%@6}y?ml!SU34f#18T?8bf|wj4F=#YFVuS`` zEJ=-17!+KS`ZbcH9ZGx23=E0XgZa0dM=0*({L;)cMFJ4}1KDS-OG^ zS{k%IR$Br6`h6j(pQrvSOfW)Q0nknkyh<2ln&Q_PvEe`BXWNY^tXQ5-c}OU*ec#(q zPR^p3&BSmZW`T;znW#=oWpj_Eb?r#7M2R9IC4@qC!YIS)wVff)AL`HDPmQ9xn(_{` zQ0LUGjdysBn`3o%g37IC)vy4(!6DtjYF!Ai+4n8{Eb(MG9F3%rfG8O8I_7|V`~}sd zitC}oC^paVv_gyOClF5=6hB#en)Fij0Ab9CLmeQyrJPo`^p?;maD7{ux6$)x@S3AS zak}@kdbbGv?FoImbn_&>qEnSSFAJ1e11S_AEW+8x$VKAh&7LIeh$Q1Y#*fR5Su??V zou8KbORmTYmvr_WL#kBY7x4p4JBO!WlfNeb>&L}7W&gk~i>p{~IfTO00o8snYo)O8 z`B(lW1&#x~_Xz{42>kvaKSwaL-Oz#K}1(&q~S zme^W>KL~75i%iw8z7Yv{MhNjQOlF#r&auSOvX13ejb;b2Q5%kaOaemc74$4##|g5@b6 zR!-qjpO2zR=wBvQrvWn{;ve!GR@BjU{CSf(E#JuZdeFlDme4T}7jOsJ==gPtk;5@^ zW^Y5sj8i5QgvQ{9yT|XDbBA&|Qqt(eax=A6$8-SbxsY~0$&i29$;g^K%gWUIahOgZ z7pMumO`(?WJJ<8&y$gvUA~VU@e8)wKY2>tv$;;($4eLAJ`9yRIjS436M^id|$0A*Y z$7-QY{#+34X>P9l>$sF><#L?@j?2vJ0TtR?0CJHohv8!pMf~Yhe2M8MSD3f3ZB)#t z76Jy1LfN8@kRu=M?^2M>Ruh4gNeJM3C1zIQ#q}w>cM^d4n-;9M%xVs9jBhYU&@X+KrSXQ{clUkco=9 zqDO|fEK{SDM)AmB0-TPL6r#dJ?>LORm-+UfX&7=7ZnkUcSy7YfIdI?9hpv&MTxxW< zHZOa{(7gVywvnonwU{vTLdwssmlN}5(_-amKnT<1utFOAZ4_P<4;6!p`b~#ddqNn% z1%rXtC=d`EM76wHVh-tOJ(cm0w6paNjtY+=qNk+OW(4Y{?Pdc{z2BsLA^^p&Fg|sd zTaNtW@XgiXF%M+{$S0vDRR#S!ZsG z>f660Xfpmkrrs*7?dEwO{zQNP0fGl@@ZiC{B)B)Y6ew;5id$(39<;dA;ts{FhC3}z zy+sRDkd{96{KNP69=v;!oaDMTbMMT~&fGH-Dno}I(r>J_-4hdw?}&~yHz!3p+gUmh zopZ5IKU!T6oOjAzUS4%L4*C0K|7umc{@uu%R~hb=6dN_TE8g@Dm$ZKBUE)yp&*tF; zLi+$()Du9$y;VmYkk^^=xQSksU9- z;mH&c+r##WX{TuB@66C*5qHC@&{tm9dDzrOvHnY8QI%!@X5sGm0n70U0)R-aT=mRs zkL$2_S>a15br6vK;8Tsxh|u`+W(8e?PAT+EcU)sIuez72lDf+!@EE;Jpk>w4rdhpJ zztJ1e;4piSY4xbX4)s^LM2>HFY^1CicD0*94*x!#igX{#{@M!#_bB$aJ3Wf&yf=~i zO~^wfz~oH_<<76SQVko;*H=GIWt?-;Z<(fo-m zV=WL^!_g%fVcx!Ukjx57Zqca7v{;k8AG@79d!%R6yjOIq?7SHMSDdm72rtL+aXck3 zeXnoO?yCanR<~{h_Z9uT*#9ezt8ZD57Js^p-}?2pe7VXq#@$zG*w*@wdmvtovvpFj z{5^M}A73lw8XA{D3Gu?Vxg|}kCDC)`r@~cDR%#M7iI^&3OGFe?x||XLN*P;#4?5@P zgC^(&I*e?0tOS~`_O+VXb>%cUl&v`4+XCE5nNTjpx^DxN1ZvFJrs{NX` z*=>6g%X$wxxTBTY{-}O60NyOaT-Ln4MaUYV?3P$Exm-BggfFo{unA1qPILi1jmTAQ zCXIvF@)X+d(Ud48RVp~iZG>&8Jbj7*isTPNvSF$TGG=s$@^9I$ObFn_D+eLSGsa4&(&$x)@}f*p?a9Rtn`$UBfHixz5VqVqa`udg~6opf=^{w-jT3x(9n;?;C|TXPoaYXf2#lNe_dF)PEZj@PPY9pV4S_FK5eQe zXRs|vK7>1t>%%fE6MPY2`gB3=@m=u3iupg1c`~=*ne#VO#{{l5j4A&vQ=E zGFCiou<1HC;6rIAkx+pd;xeV7W`v+mU3636*_O)gV>Tb!2kf_YC7Lxapdc+P`3h<< zj1(E7H8eFz3-X6+2W4s&ygK#neyyeE-5;)smlxethcQSzyOT8f*fbaS8Dop;KFz5@ zZzi&Jdc8kQ!vb^W_XrG!So=<-V8pP#*5RlS1}!Fq0$v$f5Uk|Y*>fp@j}U5L``0tb zO<-i4z6CJA8u;&l5m@4~o-IKcsl#;@H`yGFa(c-@shIP=b03PqYLP<|G+=WOzm=nh zTJ)OMQGTJ_`pG3)AxjeJv~=ur*&t685w8C3ucN=g?I=?w02`}}D9$$Gbz>J|iP$Vx z3rZO~st@q}?2>jf*CCE0B~ojPN-G2emb*acLE@&VrWAcZz}#Zf-sq%U&hY!~5r%4x z#P4;?Ke%ZM+~a+^=qZ_=rh^m&ui3B9zdy*N8y{KW7YVmj>$G|=_iZVHIzyFqNkU~V zOHG>+GHTc$A7H``AFenYQw9W?wOsi-53C%kqB_9{p&Wb&CpR=;KrOs$m(#4vn?yP& z$Q8_Eqnw25=f@qiFS6t++w=2>ep^jat$cRV;xa+ko1x)SNn4J(Ye*ZXk12CH5h!PL zNz&?7rSS~six;MK(X)H<(g6-KKqZWbnargm9lQwd)y#cp_>RetDgXhR#f~r&F(9C1 zuPF%*QVs++z*uOJwnAXjkubq1F$|b7bqe$)2jb}O6LfoQ{CY{)<$$~Fg?l=OlsBIW z#cJJ6ZYL|I8^5^=*EsOakV{VUS5AdMhNWy!iI$eIk7JfBh4g<$dkQn1mkBn`N=XRc zyp_dwOSQkm06@hKgE`JH9tloKG{TV!64O1#w8tq#yoK=ABhQw8Wd_mxL{ZExP^E~mSa3ffDXxJQ{N_Wmz13F?!m)do+>s;&N$NFD=c@WwLT#x z^Kfo=ZnXPHJ*u&SF3Da_Pp^?Mn)iE^(qdDDGzd93a!?Z%m1ZgAEHc-i!6SoO&3_>P zR#&asX^Rj6u#zf4hR{3`NYj@F%S@z%EAUsTZe>y-W{eNs72+Ji!Wp@93IBa6AQ&Pi zW}aC+pH6t1ROjF_+M2RXS@ujmnkEAZ868LMjvLdDaU7YB*MbldKY}?kN{FhA$$HD8 zIYdGc!H!cQlE#R`E>glRi<}FAW@Ilvyv~y?SXY{%bdKs_4j!=-ykwJ%GRUN(S>_Se zg?B^h>O9b$SUdFleG31BxZZD{ZQL((`W=~{#8Ih#6z2Ks7xmgCc8cs-qc$>SBV6~B zsstF1z8S!;_fG3z`CAqo&>jVf{02ybNWgTXcc(lqVbU^~BnB5Av=-t;)a?P&xP?y( z%b-jR`3WudZPfkI5fKg9)szc_4ibwhRisRtD-thB)-U!q<~vrf7yaQMQ&3{!*L@EL zY0Gi9`jFD@E59hcJV%er7z->VV&-WjdB2rI z+?Ra!Xy{;8TiBcvTkPAJ%i~j<9LvqN znovy|oy3hxcmNPG)rTBYAQUs+P%!P|&KGUmqkH&x2~HLPeM3&m+$_v}Am>9wD{QK58AJ}_;+~xtx!npMjJxrzaA5Pfg5^sM3}U-B(`OIlgh8^a=F%bLC-Pnv)=xA#0zm>6PR^A^Bo= zVq^ouF-K)`B}SOGVL5+CS$1*wt&eJ#e{LlLIY(1VF<;PRDNS8WLk_dMLaD9 z0rwL8oDxIfvK>2=a`~>Fb;{0IbaItp3aB)bK(3smE_HXU82N#qjLzhxUSOg&;e?tf zg23;C6A3^LB@ASe0blT?1||)ZuSBVjy!V%&A_6Y&jf!%?sP|c})q^4JCfqSW0NABT z!u7-yrxwsMH22g6DaoFtFuwrxpt#n+2!OE;@=jwifv<$@jWx#R%5y$PJa$wT-~yFloiz=hKkWL+fC`Cw`i)s}$uVSDfB%uceIHMY{Cw}bRN2zKfA`Nn%8aQW zU}~xPi29Tqyd^=Zw^MLEFQ$6_!sz&{XEMdEPxy#_XQ3-;vY}*_5Un!_$L`K3d_j^8AP2-XG*AHfZb_e zqm&M7V!rMj^v%5);R48p6faD;$u^x#0XqB6Mot(PU{KkD8Wf_*6pR9&*5eZ5DV~M# zMO-?E*zB%JT8JtfvU`=LGnoL82u>$qyRD>}*? zDll*sz9&~tsbw7%6MA#5&**Q#>MH8jy36q{1KBK*JFKME2R-GQCV%_4V~ha6GOs6@ zqDC+1ay5X6=Ph22rvUs3hA&mE3fRhtx{e__Vg{w4xQ06m6zy^+;h0Z$vMvRfUI9kv zjxNe2+a2GJVdLn6eoe`w#zK4LC`|XiszT8dPeplSl?x0whEO6_KLI881$++^a;}uh z=5|87u3br|K^P*HijES-m?PuC_)}Gf-x4`lhNEeeO?)xP!WD~x7T`jRWzGr-pmvs6 zPfLcLZ&*hE79@y=wea=>-37<}sGOw_=OdEy3W4c|cUr-rxy;1Nqrsp%E4Qwe8>|KV zYOv{}kZ4n4sz~9Ne!xn|h{`#p_&FpYTf7={es4}8a6yVEQ~qN_YwQ3QCLH>^2i-Un z^14y%yyBHp?#+xtauiYoTi~c6hz{X#8MK(IY@p$89M&GuJ(h9|V z1-FMZ7!XWwhwGZd{9S|fJX8u$6f~ev#&~xSA(0|Izlv2r3chOpX@?%vLpz!;_&k(m zVaG!c!ilvY5Ou&@WA?IEkTGC^!$5Fgf#KoRCyupe`w zLb3!t6;{;HTLMIt*K=B>6QECFN*E1FYNFsU5&>bMHLj*~4-75#(JnJE7c7mdoviEg z$4HH4UjmSpFzO3)b^$0P8w*FeirctpkWfn0r{+o_A9=8Yk>lno0t@W+CE+f({G1OF zNrkBbb3+z5Knt483!>mm9azG9cOe9LpBLr|U=gCazE%_2pMv3{$#Tqe@UY1&dlahO-PvH5{TFILA!UzUAs}CbfZ)4TVc2;+ttunj ztGlW6_=8cxf&?|Cu3jdb!{0fW^C|;T&MJ$X2k-pQBTrK!wJlLHVxI|2?m9e?CQ?!t zl~UQB%MLp%|8(L}`zxcy?M80WrU|v-v0yW2t+sM)sGT zy%n8vn}e-Dm-)xtz-z-W*LI969^5osQIaDXU3@cYqiRoDy5=B^Z~C;G{f&}S^YwMw zc@pXKlqT7pzN)?(N4onNL@M}4cAF@2k7!dtI#F)bH)fg`!*n3#<_O^5252EZjaP`KjAyFpD5SNo&tlCNHa6v;&*iUQ8-|f^S&NBKX8ItwLUZpIdyR)Nos$%lS)BC0DU+ zX>IWp$N974II?;A|E>SBV~YNLpP3HYqJWQST4m)CEH9kU|bic`1+6xxAJ}ih;XZ znRi$13egCUGa-Asd~*T7tDo&JYOK3Umw}VUY87q%E@$FfNc-lm1gP&e#vncoy{Jat zXJ|^XyYO*1RwS6-eE=jr2SYU^UrFS2NO+>UE2);V8s#eJvXw$rgNU}Vxg zxQMm ztRK6B>@v$|8Tso#uGJb96ti)=^dPE+RWoI!ecGOTg9@s6SX!63Q+06I&f~hT7G6a@8vyW|5(E?OJ}RlUN)o)}NKUMGdm!ebv6rUV?EzNU*K{d~(1^*=I}VtoFowMLfue>9Oo9}F9dIf~&b zbIIZ}Z<*@~oegsyJSy-Jr%l>PStwE?k-j~j(km2hWDYIOiE*8(wcgYBHx=Z;@{*%{ zNM3c9XH%K5AO2HW^qDG4yjiLyqeIn<#n6@%t5O_Iu5$WIedI>1m5%RnRgguo5DNJ# zi=#_oK@w_!ZPJy$B`N^F{87`2pQ{?RC+&@}7p8~I*u#2)c7m>~-BzIY%+euu9|%-! zPF7&YlN)5_&Ey%4jeI_fmH>e$Q?q%c$k*)`TN@SrE)d9_Nw8*Gh@kp8xtFG;W40b{007$n4p!ACU(kr)RkM!{4_zg$UVf367}bellZeU%Ni?t6KSU6x3IZ z^iw`M*A?-RJ)cI|L4!W$xklDR%^2=GH&1z>CK>Gycj2cG?i#3SnEQEoFQuz0*Z8d0dsdmgyS0;;}tzU zt=p1;?78Ma&kA}<7goX(kWmTS)8;0d_9;`B&Uii0UD_lX#m$^mCHiq4Kc+pu{!E#5 zl?d8{M)*mIM&7n&ZS|p(cP5mVFNn=kok)gw+7sH{dr>1lyY0#sBM`#h@f_#9M$6lda! zZ0AqpjM~dTk2-(Jk6#y)O2JoAE0YPhi|^YA1usTYA(?isCkgNtm_hF%N+n6>23G>x zVttQ<|FbhIjU~n^pCgc)A3z|^`??x;)Zo7kaQRj+#`M zgbI`LDg)tp7f?0YAUIoK<(kctGpUVRo-l&aT8+qdXE9QG(s*=cavWXFEiyRnV3o5@ zNPGb(hcZ*p*b#Ng(yq`eEBOfcKM;+b-fB}Jk)C`Z#gU)Y;vF<`QQG+^MNeo z|Kdo+dmWx_pI4vSHrtRiNz(5=$Z%!a6ud0hLk%SRA4l?-3r@*c268PZU&HuTjDC?b z83adq$zhONC9b)%TUmORjL4O}WS zNLYrG<)L55>AgbAYD=E3Zy{fDld|=0a3YCr-r-$@qbE=_Y-FP$hviT2q#_5`a!i*J-Pa02siBKhux@G%a}y z@Que#c3DXlUcEZ`IA^1-7Whg9j6)9q%ObDOCN{TbaA3ySgjBZrRW46mya{ZBNH`wv z7l39_CQ_s@(5OoLH{``f%jO-9tZh6n6)Vu)Dr}op%94nFU>BaV{eUZq<0{_OSAtgh zN<(*h?jfDTt`YAouWiiC5*;_sU_`G}TG~T;C-71SUIN4i+Kj67hJ>wZ1^xfYv9sHO;LG%Lr$l!IKB z43rt9ZhT45ZgzCR5R`c|#C>ucy`hK_1d|Oc-eI@}A_pYxGTIn%x%RS8-fx>tPL z+H?~!0#Kys2Hw3uC>#*_d5gdFDF7`L*@ci1a^B?0t{Psfn7hB=7|UT2|F62VU|LGs zR<$K<=`41wLH$4>)l6D6%;jpPfgbp()O_Bt=JWrP9qHZw_u1Dro491@`0Rd9mB4ge zN|10#frEQ(+VnP6q&7!fjC<%|2E?dfCaFgpfJezval&I!G|46?IoU0VN!K0}OJ)mF z?x)T;VMo$FTl`Ee*<|Ed?En~VV~z0(n-)IBqk=jQ-a?dw>4Y(uEt6Mb5QB7FZS@w} za)S}*E|WR_9#A}ZtOv!pz}b(v2|R5G<%T7>u7Z-L3a98G?BR^auO{(A!bU8}ncHqT zcp~K@J8=XyMo3Y*NQnfV(n zzBC+~`TA+%ePkI;V=d|U7y!z8A;N7ta`Jj~ps&I+H&uGq=;VX61>)~l1zak{Ckb32 z#0YRa(onQ-2f)LE`xohN!A)rf?9>n7Wic#{i+I6&(- z5e(g4XhOSC1M9=fc=pdjY%FZ*QcL1tm)R@&_1>bePz9y<;F1Tj*#8W1A2=p{pDg}b zEISK!YmgeQ*NG8wFN@xlmiRU~OwDUWz6DC37)!O&&ux|PdQqi1|BR)1x+8i`+C~M> zd`Gacfd72+${liKLu{r#rQXUHt??+V1N)NpAUB>YSkUq`VS&0dwE&yhGIZqtvyb=M?w%LlH8e}VL3dmFP4l7a60iIoE0p7`aO$ol7F)Eg{%R&f_k$QS zes7RemMp|~jPcLh?mK41{{P8Z?UTgZxqC>z+K=VPqWKTZEds|YitA)AW{p`y{`LaL ztsf`@GdcJGzpqyCWQJU4sXdzdJ35^&iO4{u-ZHh^3Xbf4P&G;IYQX)*a`cCZay)2` z5R|Nt-}PnAtFpDvP~$r1qU96SJ2B?er*+OrZ~H)&5ycxF4s~{iyVnaY5Ry+UcqERI z-20eU(XvlH&=Ad96i_LGQ`C{X+f-wsH(95ZhV&&KA(ToH$xY(Sw=VYmS9V-2Gb%_h zBxQ#=GFks~kHW~)cG_<`M`)@60WhsvHTa;^h?yRo|EOkyZ>s(S1*1ZQ&CDd>TEYIc zdV-Q}p={k>T9Ce$iL>?7HR@Vafw)f82^6z#@{Fe(uJ*R4b))CE3N>|fpmhZ2Gu`j| zGe3M%-WkhvI@?mRBkz}$zp~UXeR(a-RZWAQ$vJ{qyQ+VHH`DJ&)=f9z zg6d{nL-RF+VP)b}ide}!a)6g~q_jOmn;}wn6#x!{_@cJx=lxu<+f_A~WFN`|q3t+f zmC(FE-#ZUXRV^u$v}QG6S0vhK4KJ-9WNk(;MibWq>+uS&ey-QAD`r_97Iky18?2@2 zDwbL)()IbSJ9!KTJv6F1(=42?&+9r#YC%4wa0yyq(*SFauFP*q2}R1-&Go-)59s{z z@zapgGtrYwz3GD9!`)VRl~?-xA)ANtt6y>_CPfYqvIGE}nc!hw{N@57D?)ysxs++= z3z}w${jTC}6%}rqDUyoxeDke?;*s9&rh-4~YUkB~zSi?RzTYeEE7r{1C;XF3jUV>y z_EPyOmbK@>owSu)x_+I3;R`WHi($Mum$NZTS4YU*f!*R?i-jykyK5GqK$9eU`pTlM z(^iNGU8=+v-9bD{O64SWKo@^T2Ysz^Io0+t5_>%VmI=;){8-xQ8fUQk-+V;9=;qIl z?>^jbdvfcs3SWX817Y)eEwKtvP1+YQ6=WG?3fXnau3e;Yw=OZRe+>()cu8nUr?q;< zkJ0biv5smBioe`;T#;=}D-Zu6`L)k_=UxdeSZL%#V{FnF*L(b zw~SAoJsdYBT@ST?_tVQe;*Ip^i!9G0zKICU4hi*1hDxK;!}*E#k5z~DeJ&7+1PXoQ zG*h!qX*xPYmZlq*KmP}MUZI3YGrVQJQ;lP}-6l1e9CnTNXG12}?D+fQimrP_LO-XN za8h>Ali`%#GXWpeg|~a_!ayRslT?Lb6=aiMg8mj zxSa;I!4(#I%F!`=^HWZ&ZE=Yao3^Pj_pv+gObFDV0D34Ec}f3|dA5eM+fY;O1wwg% zwY?{9isSQGmH0u_*khMV<(e-Fyz`Rz)pEVlKcAhxb3Es~Te8%~Q(!NHw zHq+;^Iaa)1_<6j6cXP3uC;${O`m;@?ChytI)o3DiVxNe6IuLR=IbJf%w4Vn5_3ocd z{dM}&ujkthg=JAz`CD{^`Qf z!QWfFa1X|Dx_(;s1GYqpRrzO+w;jGdX&0BHc*MY1R@P$Nluo!OG(O84&bB(m@gt>M zOTU%K5m1`k8>(URMSVwepE zO=)X$Oxwd&oY)3BzHpkAvyt~hF&>YcRdZ^?Id2Y$S99w8^tZ*$WxqbMc$w5_M-fe} za)o_P>gB}Uy!vxw9KtO8Bt04)EWL?GFP^!p@h`eKGK1DyZw925}f|f{R%b zBV($$@2q)Y*Ppe8XD;(h+{=8~)pNF(F%G zXHsjOe7}~jGw&L7S7X*b@Ri)R(>C-C8{E zH(6+t?shuk?j1*TU5+}>H!m%J&Ay{kEIDWG_gJv+S+n^~Ym5&9yZpC!n_PpH}ZCeaKZ1wkf2#m0tFj#)onK=%4J1t5Ka9j~(xI zxr+h|H8;q--FFE0K;e5uz0b8qD5{Qn`idDe7fbmUdQr-=y+pi6&d=7Oss77iy74+| z3|ONLoWQ~y41k5)W|b97p$D{*o&X6ZQCj?LCF-24w22uM(K84P^4vsrh$<>#VhzV1k_FWC;;TI9kyO zWkF#u=KNbD988^njuCu;53$lnhYWj3Ls;2YbaUCBf=c-6gS30ZU?6kY`1l1vyMRb( zDT3oM0Ns}rVpr+}cSl>n6LXNdS9+sM1Eh#3}nD9cM zGMYA(e<^9&VYNAxK}mVoT5dq-(AtKY8kh3v&-k5x`KP%m=H58>-e;}C#p6ZZr$2Ri zPM$sbsne~YFBRPUwe`nZapZgU??pw5Xa8BPoqZAm9ut5Aa~F*iYJI^8$AtXbTbX?4 zoAAD2yL(F$fkDaaH#~#OM1!t{+Lrn%F2Ak@#mgu6>1B9%hIu}x0l~yL-XFxtT=zKANM<)j-zfm#W=9%jg|4wcHD1*Ddcjwc2~!n)0KypmE!rY zBOur#C5WQ(hLrw{3WeEaj(4H;0je#{&l1gz88p?Foccs^74ZLReaqZfKg+_+mrkyq zn_QYvx;(59*+2k5^rO0olN_N`*=8;NFoq1e<~8c;BC+uG8(4;?(zl)m&VQa?iJZco zb~K`+1XKa)aXog)L^3UvnXynLlKpdmOx@6|D(d?O$ykoo&erWc4}ZFdcFKD8gWzVi z{%|n-OeM9>gvavpzt?YqI4Sp<<~Qo&e9dW|aq{WN^LAf;?RZy8!BAFj$YW+Jk^K_; z$vx74f5JT$7rk^nNo0C&6#xf#YAMXTDp`@pmPq#zWjCJ%#uRq-c~hg$zahYAP< z_Nh`&UD*=qIy~za`39ZvNjMvyuCdw)#v`Bc7AYwUP11QsS=FK9ezki-!`p2WJ`lNe z4UIC+`>PHUhH9t#J_O((W@3ZHY^SzaVzSoSC>@B$^!dG|gYVtRk z6UpcY$Y1IiP5xq*syCy=U|8O2T@!`S5>yBx6}>yJ?9-k2VHW1Th{I+X#Lx?+D?@{=bI45&f510sj%#E zooYC}R?PyPRe$|-?_7Rw^?o+Jxv#&18!T@dKkCqI@K60iq%&QSI@;GccR8Q3`tmNb zt(V+c|JtlhCR8OM57diPNio=E+O%)zn~wjlEQPD~+u;+*7f-Cwvyp_kWMUBr5dk`I zAlN3}T^XZabe>_&8*J4BXAGyo)7m)P^>#1*0Y<~)URK3IvvM1uM`NO zXZv6;s9Ot?W#9ZOZ=wR&{>=e^{HjczUITGGgWVtP>}2tRpZ|4M<5MU8|rko3CeN3l8k#pz6w5LW>QHh_r>aOxQ8->bb8a}Gij;XDogP+zL3hiRo7cX5GHGe=1V?r;$v`LC>F{YpekAdyg z67DqjHx!0JuY&caCDUcxSFlUINfmfUupK#W-3Am=cBr7;YwOaI)YXDRH)<0WXs5dG z?W2cY7d-Z6c&NBD#+g)Ef%}^C`WCjV);P?y)206VoyUPY5C19~_>lA@JAH&JXQSV7 z&^m=p{2qKMui)AF179)GZnOC3bmYIRafifsmbbEkI$^hct5?UA(fS+<%j-E}uXk6N zl4!>)h>;x0yom*YEE`QpJ%Nv~V%!;Y2Ab@}b1igc5^wqCdaep5kzz`zl?*>=ITaLt zFP3Wxu?7RQ-o{p+1>tPL9`Du)aUSol*vR&PDWn8_&CK8b6zvS?xtH|FrdmQ5)aFEF zY>}ie9(`<-`Za!S1GjWO#-y6L^P^2D86J8*_{*%P*{3y;M?T%%oU1wXQy2Gz4t)V$ zaCp+2x~7e-xAXHAi5Si#SJsuAXU%;>eb4104+ki5RQC+3DTL zg3uV^Y4*szP3d<~vT$jYw(q=+bLW$ohAi>t#(m5g`*utZ0`Ny`q46IL%xip8IwftU zdqxSrIqz|`bIVl(p7(XNd&hA-dmD-djbA4xs(xwIfk7YwmfUnjj;|{L#z@136fDYt zV?A|K+Cn^Qmd<-vK(gj?VeyLM6iLTgQ0YkfV-iCo7`;il&}vEGE>VH4-jsg#1{Zm>)Zo`A7*o2*bsRq2&?&5~)**}Q7)8FX@@JHrO70KhgXPlOIc>lA7C>P^hA zH@Dh;oPF}7&GoaiZ%lY|f>B{$?6Yh@bdGRSW0b=wSx&H*BozLTDWK#-~A@H zJ^dW_z#EZ}K1=7)A_T_Yv$N^p%E1~JNyccyu~NMa_M$1Cn_?4U3o8yi(y!CpB-siQ zbFPZ}2e4=@J#aU!Ka06!)wC%36(dr?bPpIS`8~?7j8FUNb#o#b_=eAeB zoXUXMi-jh`ylpcpagpEk4Z=s*q}@D&E^+ha{?t1?RBH|PW;OaP5eC6b*~azE=eDM= zYoCr-ux3f6mv&Q1$JzUC-rqYq53G*R$g%w`~sma!aDUe z#ftw3;Kr5)zSKr!-E2(krkQLd!zEcSp>juf;>n^9_vDYMda!XYJVTiWMNr>s5n-;^ z=Si{okJ~Vm1Q*1`;CZV)y(xGgsgV9)`DPG&B2Yd|0JiH!Lwy?1pM;}EcUP^otY`$z zn5=IY9WUL|WX(Qs|F>~|%jV^?n2*l=+cml{V+Q(#^}*NTCFatd9wA@=8U-o0^o8up>vTMz$I3fc_idW3EuM4 z_H8O_)v7CG-(fLkW`{*VVd!*r&<(6FTjl08#S22?Q}W~# zO=+sFg1JPIeb&@Ql-8dPy%JRp;zK$C4Lizom4*INT}}<7h+0IqPe&^Cj*e%qOy-_y z>3q5T!hP4pzqOs7A;z;m7I{EaA&@F=df6{4!}z0jr0ZI(2T^|4h_4r%keU2bHM#M> zIt-ohvzKjLq$f%%xf1*J^bGnF+B~y2KKR((8C)8Gf`Dm^dOn>=rM zfz!6JAkEb}!OA?}q#rMs@`?pzX1Ak@<4B}{jJ{Ag#)R=&!GSW`!SrcCcUriecv7P( z_pag_xlUtF0SE36Udn~~Tdx_2*4eHN)JX2w78ksEk*yvL0Atomg_QtdK0>Ia<|<=K zE+mbC%1?b01M{4mLdkVAoe3>USJYgUr}h*L4Uq~H;hF&1fX9yH81rAK&~f66N=r1| z-p6s2=pMNeywlZJ*hNg9+pAiZ87-r&^v1-SoV`_h+@8x#gZ=?BzbgccP;^Sk%26}|{aS|;=NUM`v# zH}uR5wB%9C{A+bzDS7xfSCdu!jrYmDf$kX5!a$hS+GLdZ_m(GW}NkytO4mYMYYrjUmo- z&o6E_jOYT9j`25sS1&auXd$S;2^cD%2GxY8FD9->Fs)xEcTtbIy&=URz;~CAAXhv0 z)qKrZT_isP8S%LA!6UoX>R$%28|b|VwWsH|Gv40>JeTB~_&>uiskrzauYQXJz8f2D zu7<=qeqnxoNtrCCpIwo-C8=PL62P3Kr_Pq))g&=wsqbNnvRz_~$C)u1jp4 z*Q$Iti$WoU`T+YJ3P0inN?(a05sN16fZG;_`hrDAaV25P=fAJ zDJ@I05blviwDJg+e#we`Spz)Mu@(}s>}0G1lJ8ffi5?c(_4L1@q*$nDqU~W;?7NWM z-S6{JzUUq4b2R^0?oL7EMnKDVWFkEC?a{Cmn2V3RC<7mu zf_7)jjR}i48cBp!gfcP?QlK39t3`FtEIQCpQ>0xKYOE)IHilWI)pb(5#4D`^KY;u!xk0TwI zFzY1R7N<(kJl0dSM(i4@IXLMPlSb06lPw+T&n-8bb@|!)ul^gs>4T1rr)lclDpKc6 zCsiYXu|wYe)(eEfx1tmshF_fZfV`N|vDE!v50QE|J4InP%=F|b-L$r+N3|QG4G`N} zGzs8e$dfr2Ad_=Yp4yO>A+gZJFy{-$f@vLj*n*cfRfB|G2!!%~L%}~KzTh(yqucwQ zh!hpVEk25p0)G-kx8~bR+qpDoaTW7~wBt!1##qa8*Si?>x=mbEEa0-`?oU^B& zqVL#mzyFO0CjDHNUv=wAh^?y(4(xkNfJ?_pO^52VM?@2L~OVj%M zkm${0qju01Eb`?B=lUI=!QS*{hUaNQ%}g6H8}V!2rDrCMB_rvO>n51B6Bu07W2y#B zm z*0s%rTL6>N>-q0}lP|=`Zzd$<<-u8v6;)esO6={lET}3@NB94%+>0(_@;&rj8-?}+ zJ$yWC`mGj4+yrUG*EAEamDCJI7rZIz{O(ZZxBKv9le>cRxTB;zW^b+q|5W#?pnv?& zc4$0NpY$X8*5@&NIpvqw-LWEYN84E{%9GOgPJ4`wA zBT^nVo-lHrEexiU$P!KXwMEHv}kUjibV;&#J!d)b!?!cB;cR-#4*=6U%M0b?Is1?9 zexjr^C;|DUd%xsY4bnoTxYow=fiG`tg2S4PI6|DY^iRX&U%LO-`y)z7*XGV6% zjb7o~dV6%^HoyBnt<%pfGtZcCQvtQ=3slsG(T;#IWBd^%C72mQJr>83t#c6llRCc4 z*FCiAk7&7dz|(}pI&1^D>HoENo^MSx>lzO|p&3FCE%eY^=tYPiAv6gjG|^B3BoqNf z!SYG~Ayg?r2-15|>0Ny3ARs~n1O%lE5mZFoV0k&X|AT$Xw=HQVSHXQM&x$gDo@MBt8IS!J< zA|DgfDubV3Hxcta=?w~gbS6{8M(T6MzebrDj%Qf8XRwVI78uys`nuBjD>qY~_R!l# zES1G?|MpR@I-Pa8i_9Zp2BLA*lEAL`MbVu%3JMarVDe0^K3j3+iwH&3c*O)GANKx2 z8&gI;Dh*GWnSN{8m{BQTch;`@T9xPX4qRbRs7Us>caT->kS7AY9Q`_h_RRUnYjz?@ z@%gQ@`BO?SPEM|M5f7N9IkMcKsDi@3PmY4ka2K0Ly_>-dI7v*;*})MwUudr$i+&(? z$5-bXzfavE|3Bp)eCM+p|01@0$|s7GHXhmo$mRgB+`I>#*!S%!8^5t;xs;i(r=_=u zN2nqpVg>zvLL=DYCVa27 zY|a+tZ#&w~Wt-Tbey8r(SJs(-hh^%IwVBV zNk#Wu&o$Kv^TYJ$MY)O_b$C@ch7J#S#XieZioMjpil-){CnVlOU@dTAaT)PYH1k9p zvvI>ykuFJ&t@mNO_<51ZGM=CMe+Ye#5t_TAa#~2@siad;`$Fqh6DGmy)=A{$-0%5X z#l}Ygf5kq?Qe!WbwgqEVQc#;E9~u;sZ(q6D;doj%$ko==9E4vDTSj~lel9=T7o{mD zBi3SlL*ynQ%jt=K-{NlZu2-JgKdHk9kCh|JUdmkux{5Hc>Iua^FWz5d^w|WrT&s1y zZU6xCl>lG>qeNa?drlWVkX@dR0H;XIz#6Q`l9|0$8mu0j=Lmm{2vy>>*&hiVXv!qc zB`jVYtGUF&6nkU>v}}t@b}Od!7OF$k-12?s-`-oj5T8YN;bdQJ644X5n(qn$f4Q~t zzSGbNX`4dV50){3%zwoN_1xWCdEI0?9HHe$u@5`;ak0#vke_JNO^Se;#J=xt(&dV2 z8Zh5BZ{`DJnp3j(DQ!!5@R0dO;MbN?+s?pusq z#iU9EAWl!3l`_zoAD|+UvXgI#-#P%U)V@Ql-36j9^&r|@Mg#|f3xT6eVCNLuR1*L?KPj@v_UCEym^~pdk%0) z#|{2B?;KP|sd#jv(!3$->__%YHbNpUpfe=mUYpV!2=G{>W{hzd_RHgnV|U6eSAz+^ zPj8^poq`dAU+3itwKrM^dra~XdG|eU{=U048SU#CVbP&=VWI5f*EhlLTIQm75a)bV zE!Q@H+|RT#7KN`t!I*$CZJiVB@%$w%t;YANh+&Dbtz6)=;dhCQDzHXM@z-N^Nk$FWZ;Bn z2gFxgzb2WLMGKBM58Tp;G0?XLa#xkmz%BB2lPhW6QBPljZt~smK6hqabhRke{=iN3 zHKpj`q{!$X$(Vbl7@6CE=rs0!_o43b#s}e9i5Ev?$5l(_X6DoZjsTN>?DBhDH=Q(l}w9GPRa`8eIr!Bi|P_L$*_219d%zb;lk67z%YxcpK zp{f`56VG;QD4JH=39P@kH0|O8z2WmvU;Da_*;P(V_9Wl(@eM!Ee+}j5mNHY|bdDLobbw-mxDbFU^t{R4q0q_7zGtQ5{l#U7YTx{c8`w`&_3Cgp{E_NeBZB;Vid6CDZ zHwjqcPk5rz{@j!y;DVMej0<+niro4q!;DJRF-RJw(#$C<7eu$g`V%FS1 zC%&!cpqe5MY$_z&I7)zP{*>jgvd-Jo#axUUwqQ;w9`2pNV(7cqTc8w6NM(}dixi$- zai%1c3okyMgLXFwp1TbP{vq@cko)+u=o6DgF3b8dwnqw4?$buOg;Ms3Tcz!A$$B{B zzSCA%=abH75hltZk0V1|H7@=Vx#=IR zQFVQ1?dys->Al#RR*gO?oJU|Kl#pmun*r%-Lr^in?p>tc#BFu5^alykKtrfXI1b_r}+ zT0~(+|a8w2{X83n-Q%MKWjcPGWY|)U?X4lkIkpM}~P5 zG6qWMS8UYU#%Q?N7sajFq06KXLaUgH*O)+Ph#;mZnYpjJ#MfJlYA=YQTqAmm#cXMa zk_T?gur)I*TOUkva$fXWHp}&?<57jOJMP>4xQNif9bYbfH z!|{pll_4_fN7hT8nY;tgpG*NNmm>76G$gB~KxI5b<#giFysaYCV(nO(G6DwKO!Fp< z11ss#;OdSjoxr%%Fm-Z*{Ifj)R(#>5o5RwV zYaCb;SWUxAwS?ls$>$MNC?H8U!5VBHwtkb7v%n&uit|P#I`>(f`WtQB)KcR;lCwd+ zXQ{0sDU?5TOg4m;_-74$jmuYa=jQIsi<4I?9e%(?(rj~$=5wD+pD5F&9i=7Qu}_)f zZi;@3_U5bg3k+8YB*?}t_#F3ljji@)OByh0BAvV}^D@akx)R>D78(G&k-!&IIj&PYi z^n9xa6q3T40dC?IOAX<*>l|f%#QCxF^EnV`J5%c zNec__&R02v3KPdo9qzLXzvZ3FwI&iiR5Z`BOnN0o7M>S&@GW6DUpo-}#b;ppw`lX1 z!Ta^j?O?&yE}w6s0U6b>F6dQc8G8;!APZKikO3QGn|Ak>)TA|g3+C~EfW7jvfyAgx z%kv=#8!59=FtaR!w}_D33!WsOkypJu+WlzQ>7`m3z~WWqMosAe7C?Nbcsm6x)hJ4y zaZF(U1zW;tRF&;CHD@U zWJ_kSXC|!Hs6B}X0IYRAr(orYt{7b8f41q!d!Q|D!L6J#RBL|LkuTw5x|X2eKm@(L z(;qn#G6ZBGI4$`yY=wziI1J}d_dX<2I?!E>EOYz)Ipchjmx?v?&z0!>7msYd3(5Sp z>_&Cf%_SRGUsON^P9G$s^Q0T-b=^oF4PX1}W)6JlL&`YA&*^b)UKE9e3|n}+&s$Im zq(9URrskiKA~eW4#4V}{TCkoU3YdrYJ8dHEt;#Wt;Q4aHuPe)j*vsvJ%h?Ip@vR?~ zp{x?Nc7DhVvLdkc)ry!W8+6xt;m+SwN#(dE0|yTdP<%j+f)6o6BX$qtILG%nIqWSIur5k)O6zHw-j9-pdfCf`7tAY z#1iVlvv7WRfy~;V=8-G^YO0K6l+AZNbykuhQB}Q$GhG}de!AYYz+$j2Zu6CP8`lV< zs)-p*4apIu$)VBJnR`3|41q4+3-!KzhGh`#4MB1xIqhIP-(>cN6=jH};pF$`Qa z!elCT0ji604%Z9ouL06#G_X#LFOIx8y1`%Jm@0Dg8CA zP_T0jOF4MDF8Fz1gk=KCBd^H*4l%fWmq@4MitT6u*1Hn-c+N2jALc)GsC}VK`B7Y;%}KdEBUV1;k@QLjd`@2N5+U_kWM=_T2e^qU-aS8n9EKEdq_is-zM2za6Q zhTd0Yxurs`ZK7o9-aofAbY((cUE6TMfXl0FsNP^xlp0U)IbxMr0_t;(Z(95uU$<0C z8ch?)iUh14%bn-gPCZ|ZPtyWZ-d}VeUd`mD2{b{qi52UFt+62qc($^T{d=4$a!))Y zVOq1o^^2-2He2bsgxPY0nXYJPP5*5WF^~oZaC(ZX@yq3Kn>JV1P4Q|8+TweehEfx1 zYZY+rK#0>5XPN24W9R|QXhgcqBmr$IgtVb^XKKOWF^~pV*0h>Mfmqc9p((s0pU!Z=t`1Ccj z&o676o#2*BWx9$9wPD@0CYOP?W8!bc`eH_xNl}+#bMNl|{=~}6j~b6>F=Bh=3 zysI%~MoX?0Z+z|jP*zM>y-XF{R@5O}{Qud!PPLdOZ>{ex?eqeo8guoyq!j zs=V^KKZHKVELwPSKkB_dct*8!EwL*6R0IZ%WjeH%vKAj`TlSKkhL{@0FvS4Rs9*G} z40?)IGzH21RP&Yn)XP;dBL~g)@{6>|-K^QnFwea;bJ%@*-^3Ib|VebzX9RIF>Jjq*{}SiUXk z>sF}L<#dZcH3@s~OOkCa|EH3Yxk}1ccGxPoN>73_+CrsTEoV#hTuAF}=d6q1efxl@ zO0?r8Q=@JVz4mBfx*@hr<8a<&_a)ryg2P8ndkh-4mZfR;8o|t{E@<@@>2mYS*?@Lm z5%16|djaZI3c!Z-^{*1ja^>6ogLCdRqg<>iZ54bS(^#IpcFP%I-mzMJ@TB1i?(P=- z!JX*78_Tbi8r0pfCPIRbH7L9w{194^*Ak+X;qPV;lbe!iGdh?od&m}e--h+e6OhB& z^O@#<=e9pHi+TsKb8Fs04k9iuCWs#CU@j6F@*qVe^=tvvth<_*9KZDaBDH{CTV=|b+ymQ~A`!EV;cJj+PhPfuOr_Jwt~W@SF0sv1 zSqHAyaABmx!^- zxpKJkXs7W<%lSwB7~ZZPt%jolI2c^#Wi!5-X7fz*P?OFjUArKgM(Ib{Dc)y}4Tm9s zv3*f%dZnIWl&)<`_5dVy+sfKw4s-E)Umm4)aBo>mp0lQ#Wsjc?4Nv{LXhk&jA>bco zUaUV3zKxlPv&)n)cOY`g@KYAdCg2$bUT-*B)Pg|&&~D^kFcf?vN#(FxxjPZRAXKnq}rNEs%sDmROet2ZBX6`LXs;7d2>>d0EVGhKCZLoE*?XS01t7 zgV@_UczE)0>iFcF>T9tiK6VJZPoNsa(u#@Sf`m#Mu+`EH&O)r;w}{H*k0)o?+sL4% zWs+*Io-c3@dkRu9-%5U^bJBh?l9!@p&!q~%$xH~AlKz*%{vQPC525t9Ij7hWJI!I# VY0mcaZ;SOm=>PwZ{$EGnzW{t`1;GFS literal 0 HcmV?d00001 diff --git a/packages/provider-utils/src/post-to-api.ts b/packages/provider-utils/src/post-to-api.ts index 0be7b65e27cd..bafc711eadfe 100644 --- a/packages/provider-utils/src/post-to-api.ts +++ b/packages/provider-utils/src/post-to-api.ts @@ -41,6 +41,36 @@ export const postJsonToApi = async ({ fetch, }); +export const postFormDataToApi = async ({ + url, + headers, + formData, + failedResponseHandler, + successfulResponseHandler, + abortSignal, + fetch, +}: { + url: string; + headers?: Record; + formData: FormData; + failedResponseHandler: ResponseHandler; + successfulResponseHandler: ResponseHandler; + abortSignal?: AbortSignal; + fetch?: FetchFunction; +}) => + postToApi({ + url, + headers, + body: { + content: formData, + values: Object.fromEntries((formData as any).entries()), + }, + failedResponseHandler, + successfulResponseHandler, + abortSignal, + fetch, + }); + export const postToApi = async ({ url, headers = {}, diff --git a/packages/provider-utils/src/test/test-server.ts b/packages/provider-utils/src/test/test-server.ts index b03798379c75..8857a745c717 100644 --- a/packages/provider-utils/src/test/test-server.ts +++ b/packages/provider-utils/src/test/test-server.ts @@ -58,10 +58,26 @@ export type UrlHandlers< class TestServerCall { constructor(private request: Request) {} + // TODO: rename to requestBodyJson get requestBody() { return this.request!.text().then(JSON.parse); } + get requestBodyMultipart() { + return this.request!.headers.get('content-type')?.startsWith( + 'multipart/form-data', + ) + ? // For multipart/form-data, return the form data entries as an object + this.request!.formData().then(formData => { + const entries: Record = {}; + formData.forEach((value, key) => { + entries[key] = value; + }); + return entries; + }) + : null; + } + get requestCredentials() { return this.request!.credentials; } diff --git a/packages/provider/src/index.ts b/packages/provider/src/index.ts index 117003c809d8..de7797a6133b 100644 --- a/packages/provider/src/index.ts +++ b/packages/provider/src/index.ts @@ -5,5 +5,6 @@ export * from './json-value/index'; export * from './language-model/index'; export * from './language-model-middleware/index'; export * from './provider/index'; +export * from './transcription-model/index'; export type { JSONSchema7, JSONSchema7Definition } from 'json-schema'; diff --git a/packages/provider/src/provider/v1/provider-v1.ts b/packages/provider/src/provider/v1/provider-v1.ts index 1e813d25497a..bf3383e1db33 100644 --- a/packages/provider/src/provider/v1/provider-v1.ts +++ b/packages/provider/src/provider/v1/provider-v1.ts @@ -1,7 +1,7 @@ import { EmbeddingModelV1 } from '../../embedding-model/v1/embedding-model-v1'; import { ImageModelV1 } from '../../image-model/v1/image-model-v1'; import { LanguageModelV1 } from '../../language-model/v1/language-model-v1'; - +import { TranscriptionModelV1 } from '../../transcription-model/v1/transcription-model-v1'; /** * Provider for language, text embedding, and image generation models. */ @@ -39,4 +39,14 @@ The model id is then passed to the provider function to get the model. @returns {ImageModel} The image model associated with the id */ readonly imageModel?: (modelId: string) => ImageModelV1; + + /** +Returns the transcription model with the given id. +The model id is then passed to the provider function to get the model. + +@param {string} modelId - The id of the model to return. + +@returns {TranscriptionModel} The transcription model associated with the id +*/ + readonly transcriptionModel?: (modelId: string) => TranscriptionModelV1; } diff --git a/packages/provider/src/transcription-model/index.ts b/packages/provider/src/transcription-model/index.ts new file mode 100644 index 000000000000..e69fb44d6835 --- /dev/null +++ b/packages/provider/src/transcription-model/index.ts @@ -0,0 +1 @@ +export * from './v1/index'; diff --git a/packages/provider/src/transcription-model/v1/index.ts b/packages/provider/src/transcription-model/v1/index.ts new file mode 100644 index 000000000000..ac8635de6935 --- /dev/null +++ b/packages/provider/src/transcription-model/v1/index.ts @@ -0,0 +1,3 @@ +export type { TranscriptionModelV1 } from './transcription-model-v1'; +export type { TranscriptionModelV1CallOptions } from './transcription-model-v1-call-options'; +export type { TranscriptionModelV1CallWarning } from './transcription-model-v1-call-warning'; diff --git a/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts b/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts new file mode 100644 index 000000000000..a2164a14bba6 --- /dev/null +++ b/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts @@ -0,0 +1,46 @@ +import { JSONValue } from '../../json-value/json-value'; + +type TranscriptionModelV1ProviderOptions = Record< + string, + Record +>; + +export type TranscriptionModelV1CallOptions = { + /** +Audio data to transcribe. +Accepts a `Uint8Array` or `string`, where `string` is a base64 encoded audio file. + */ + audio: Uint8Array | string; + + /** + The MIME type of the audio data. + */ + mimeType: string; + + /** +Additional provider-specific options that are passed through to the provider +as body parameters. + +The outer record is keyed by the provider name, and the inner +record is keyed by the provider-specific metadata key. +```ts +{ +"openai": { +"timestampGranularities": ["word"] +} +} +``` + */ + providerOptions?: TranscriptionModelV1ProviderOptions; + + /** +Abort signal for cancelling the operation. + */ + abortSignal?: AbortSignal; + + /** +Additional HTTP headers to be sent with the request. +Only applicable for HTTP-based providers. + */ + headers?: Record; +}; diff --git a/packages/provider/src/transcription-model/v1/transcription-model-v1-call-warning.ts b/packages/provider/src/transcription-model/v1/transcription-model-v1-call-warning.ts new file mode 100644 index 000000000000..4c4797309c25 --- /dev/null +++ b/packages/provider/src/transcription-model/v1/transcription-model-v1-call-warning.ts @@ -0,0 +1,16 @@ +import { TranscriptionModelV1CallOptions } from './transcription-model-v1-call-options'; + +/** +Warning from the model provider for this call. The call will proceed, but e.g. +some settings might not be supported, which can lead to suboptimal results. + */ +export type TranscriptionModelV1CallWarning = + | { + type: 'unsupported-setting'; + setting: keyof TranscriptionModelV1CallOptions; + details?: string; + } + | { + type: 'other'; + message: string; + }; diff --git a/packages/provider/src/transcription-model/v1/transcription-model-v1.ts b/packages/provider/src/transcription-model/v1/transcription-model-v1.ts new file mode 100644 index 000000000000..73f7811c24e4 --- /dev/null +++ b/packages/provider/src/transcription-model/v1/transcription-model-v1.ts @@ -0,0 +1,116 @@ +import { JSONValue } from '../../json-value'; +import { TranscriptionModelV1CallOptions } from './transcription-model-v1-call-options'; +import { TranscriptionModelV1CallWarning } from './transcription-model-v1-call-warning'; + +/** +Transcription model specification version 1. + */ +export type TranscriptionModelV1 = { + /** +The transcription model must specify which transcription model interface +version it implements. This will allow us to evolve the transcription +model interface and retain backwards compatibility. The different +implementation versions can be handled as a discriminated union +on our side. + */ + readonly specificationVersion: 'v1'; + + /** +Name of the provider for logging purposes. + */ + readonly provider: string; + + /** +Provider-specific model ID for logging purposes. + */ + readonly modelId: string; + + /** +Generates a transcript. + */ + doGenerate(options: TranscriptionModelV1CallOptions): PromiseLike<{ + /** + * The complete transcribed text from the audio. + */ + text: string; + + /** + * Array of transcript segments with timing information. + * Each segment represents a portion of the transcribed text with start and end times. + */ + segments: Array<{ + /** + * The text content of this segment. + */ + text: string; + /** + * The start time of this segment in seconds. + */ + startSecond: number; + /** + * The end time of this segment in seconds. + */ + endSecond: number; + }>; + + /** + * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English). + * May be undefined if the language couldn't be detected. + */ + language: string | undefined; + + /** + * The total duration of the audio file in seconds. + * May be undefined if the duration couldn't be determined. + */ + durationInSeconds: number | undefined; + + /** +Warnings for the call, e.g. unsupported settings. + */ + warnings: Array; + + /** +Optional request information for telemetry and debugging purposes. + */ + request?: { + /** +Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified). +Non-HTTP(s) providers should not set this. + */ + body?: string; + }; + + /** +Response information for telemetry and debugging purposes. + */ + response: { + /** +Timestamp for the start of the generated response. + */ + timestamp: Date; + + /** +The ID of the response model that was used to generate the response. + */ + modelId: string; + + /** +Response headers. + */ + headers: Record | undefined; + + /** +Response body. + */ + body?: unknown; + }; + + /** +Additional provider-specific metadata. They are passed through +from the provider to the AI SDK and enable provider-specific +results that can be fully encapsulated in the provider. + */ + providerMetadata?: Record>; + }>; +}; From 6dc848c417835fa7867156c069e8650519ee4cf7 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 8 Apr 2025 12:01:33 +0200 Subject: [PATCH 0047/1307] chore (provider): remove image parts (#5596) --- .changeset/selfish-rice-own.md | 5 + .../convert-to-language-model-prompt.test.ts | 21 ++-- .../convert-to-language-model-prompt.ts | 22 ++-- packages/ai/core/types/language-model.ts | 2 +- .../convert-to-bedrock-chat-messages.test.ts | 40 +++++-- .../src/convert-to-bedrock-chat-messages.ts | 77 ++++++------- ...nvert-to-anthropic-messages-prompt.test.ts | 35 ++---- .../convert-to-anthropic-messages-prompt.ts | 80 +++++++------- .../src/convert-to-cohere-chat-prompt.ts | 4 +- ...t-to-google-generative-ai-messages.test.ts | 6 +- ...onvert-to-google-generative-ai-messages.ts | 39 ++----- .../src/convert-to-groq-chat-messages.test.ts | 4 +- .../groq/src/convert-to-groq-chat-messages.ts | 25 ++--- .../convert-to-mistral-chat-messages.test.ts | 4 +- .../src/convert-to-mistral-chat-messages.ts | 49 ++++----- ...to-openai-compatible-chat-messages.test.ts | 24 ++--- ...vert-to-openai-compatible-chat-messages.ts | 37 +++---- ...-to-openai-compatible-completion-prompt.ts | 6 +- .../convert-to-openai-chat-messages.test.ts | 30 +++--- .../src/convert-to-openai-chat-messages.ts | 102 ++++++++++-------- .../convert-to-openai-completion-prompt.ts | 6 +- ...nvert-to-openai-responses-messages.test.ts | 22 ++-- .../convert-to-openai-responses-messages.ts | 59 +++++----- .../convert-to-perplexity-messages.test.ts | 50 --------- .../src/convert-to-perplexity-messages.ts | 24 +---- .../v2/language-model-v2-prompt.ts | 36 ++----- .../language-model/v2/language-model-v2.ts | 2 +- 27 files changed, 337 insertions(+), 474 deletions(-) create mode 100644 .changeset/selfish-rice-own.md diff --git a/.changeset/selfish-rice-own.md b/.changeset/selfish-rice-own.md new file mode 100644 index 000000000000..44f827260d50 --- /dev/null +++ b/.changeset/selfish-rice-own.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): remove image parts diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts index 6516c9ba841f..85bc5e35d05a 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts @@ -39,9 +39,9 @@ describe('convertToLanguageModelPrompt', () => { role: 'user', content: [ { - type: 'image', + type: 'file', mimeType: 'image/png', - image: new Uint8Array([0, 1, 2, 3]), + data: 'AAECAw==', }, ], }, @@ -80,9 +80,9 @@ describe('convertToLanguageModelPrompt', () => { role: 'user', content: [ { - type: 'image', + type: 'file', mimeType: 'image/png', - image: new Uint8Array([0, 1, 2, 3]), + data: 'AAECAw==', }, ], }, @@ -627,8 +627,9 @@ describe('convertToLanguageModelMessage', () => { role: 'user', content: [ { - type: 'image', - image: new URL('https://example.com/image.jpg'), + type: 'file', + data: new URL('https://example.com/image.jpg'), + mimeType: 'image/*', // wildcard since we don't know the exact type }, ], }); @@ -652,8 +653,8 @@ describe('convertToLanguageModelMessage', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([255, 216, 255, 221]), + type: 'file', + data: '/9j/3Q==', mimeType: 'image/jpeg', }, ], @@ -679,8 +680,8 @@ describe('convertToLanguageModelMessage', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([255, 216, 255, 221]), + type: 'file', + data: '/9j/3Q==', mimeType: 'image/jpeg', }, ], diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index a4a1d8da4de4..c4f806ac9308 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -1,10 +1,10 @@ import { LanguageModelV2FilePart, - LanguageModelV2ImagePart, LanguageModelV2Message, LanguageModelV2Prompt, LanguageModelV2TextPart, } from '@ai-sdk/provider'; +import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { download } from '../../util/download'; import { CoreMessage } from '../prompt/message'; import { @@ -261,10 +261,7 @@ function convertPartToLanguageModelPart( string, { mimeType: string | undefined; data: Uint8Array } >, -): - | LanguageModelV2TextPart - | LanguageModelV2ImagePart - | LanguageModelV2FilePart { +): LanguageModelV2TextPart | LanguageModelV2FilePart { if (part.type === 'text') { return { type: 'text', @@ -350,10 +347,15 @@ function convertPartToLanguageModelPart( signatures: imageMimeTypeSignatures, }) ?? mimeType; } + return { - type: 'image', - image: normalizedData, - mimeType, + type: 'file', + mimeType: mimeType ?? 'image/*', // any image + filename: undefined, + data: + normalizedData instanceof Uint8Array + ? convertUint8ArrayToBase64(normalizedData) // TODO prevent double conversion + : normalizedData, providerOptions: part.providerOptions ?? part.experimental_providerMetadata, }; @@ -367,12 +369,12 @@ function convertPartToLanguageModelPart( return { type: 'file', + mimeType, + filename: part.filename, data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData, - filename: part.filename, - mimeType, providerOptions: part.providerOptions ?? part.experimental_providerMetadata, }; diff --git a/packages/ai/core/types/language-model.ts b/packages/ai/core/types/language-model.ts index 808005b7d94d..a410bcc978dc 100644 --- a/packages/ai/core/types/language-model.ts +++ b/packages/ai/core/types/language-model.ts @@ -7,6 +7,7 @@ import { } from '@ai-sdk/provider'; // Re-export LanguageModelV2 types for the middleware: +// TODO remove in v5 export type { LanguageModelV2, LanguageModelV2CallOptions, @@ -14,7 +15,6 @@ export type { LanguageModelV2FilePart, LanguageModelV2FinishReason, LanguageModelV2FunctionToolCall, - LanguageModelV2ImagePart, LanguageModelV2Message, LanguageModelV2ObjectGenerationMode, LanguageModelV2Prompt, diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts index 2f79a06698bf..721e3337c4d7 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts @@ -36,23 +36,18 @@ describe('system messages', () => { }); describe('user messages', () => { - it('should convert messages with file, image, and text parts to multiple parts', async () => { - const fileData = new Uint8Array([0, 1, 2, 3]); + it('should convert messages with image parts', async () => { + const imageData = new Uint8Array([0, 1, 2, 3]); const { messages } = convertToBedrockChatMessages([ { role: 'user', content: [ { type: 'text', text: 'Hello' }, - { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), - mimeType: 'image/png', - }, { type: 'file', - data: Buffer.from(fileData).toString('base64'), - mimeType: 'application/pdf', + data: Buffer.from(imageData).toString('base64'), + mimeType: 'image/png', }, ], }, @@ -69,6 +64,33 @@ describe('user messages', () => { source: { bytes: 'AAECAw==' }, }, }, + ], + }, + ]); + }); + + it('should convert messages with document parts', async () => { + const fileData = new Uint8Array([0, 1, 2, 3]); + + const { messages } = convertToBedrockChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: Buffer.from(fileData).toString('base64'), + mimeType: 'application/pdf', + }, + ], + }, + ]); + + expect(messages).toEqual([ + { + role: 'user', + content: [ + { text: 'Hello' }, { document: { format: 'pdf', diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts index d957638a38e6..119e13450ad2 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts @@ -1,3 +1,11 @@ +import { + JSONObject, + LanguageModelV2Message, + LanguageModelV2Prompt, + LanguageModelV2ProviderMetadata, + UnsupportedFunctionalityError, +} from '@ai-sdk/provider'; +import { createIdGenerator } from '@ai-sdk/provider-utils'; import { BEDROCK_CACHE_POINT, BedrockAssistantMessage, @@ -8,17 +16,6 @@ import { BedrockSystemMessages, BedrockUserMessage, } from './bedrock-api-types'; -import { - JSONObject, - LanguageModelV2Message, - LanguageModelV2Prompt, - LanguageModelV2ProviderMetadata, - UnsupportedFunctionalityError, -} from '@ai-sdk/provider'; -import { - convertUint8ArrayToBase64, - createIdGenerator, -} from '@ai-sdk/provider-utils'; const generateFileId = createIdGenerator({ prefix: 'file', size: 16 }); @@ -78,48 +75,40 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { }); break; } - case 'image': { - if (part.image instanceof URL) { - // The AI SDK automatically downloads images for user image parts with URLs - throw new UnsupportedFunctionalityError({ - functionality: 'Image URLs in user messages', - }); - } - - bedrockContent.push({ - image: { - format: part.mimeType?.split( - '/', - )?.[1] as BedrockImageFormat, - source: { - bytes: convertUint8ArrayToBase64( - part.image ?? (part.image as Uint8Array), - ), - }, - }, - }); - break; - } case 'file': { if (part.data instanceof URL) { // The AI SDK automatically downloads files for user file parts with URLs throw new UnsupportedFunctionalityError({ - functionality: 'File URLs in user messages', + functionality: 'File URL data', }); } - bedrockContent.push({ - document: { - format: part.mimeType?.split( - '/', - )?.[1] as BedrockDocumentFormat, - name: generateFileId(), - source: { - bytes: part.data, + if (part.mimeType.startsWith('image/')) { + const bedrockImageFormat = + part.mimeType === 'image/*' + ? undefined + : part.mimeType?.split('/')?.[1]; + + bedrockContent.push({ + image: { + format: bedrockImageFormat as BedrockImageFormat, + source: { bytes: part.data }, }, - }, - }); + }); + } else { + bedrockContent.push({ + document: { + format: part.mimeType?.split( + '/', + )?.[1] as BedrockDocumentFormat, + name: generateFileId(), + source: { + bytes: part.data, + }, + }, + }); + } break; } diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts index b6fb61813ba2..9399c5271784 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts @@ -49,8 +49,8 @@ describe('user messages', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + data: 'AAECAw==', mimeType: 'image/png', }, ], @@ -90,9 +90,9 @@ describe('user messages', () => { role: 'user', content: [ { - type: 'image', - image: new URL('https://example.com/image.png'), - mimeType: 'image/png', + type: 'file', + data: new URL('https://example.com/image.png'), + mimeType: 'image/*', }, ], }, @@ -182,28 +182,9 @@ describe('user messages', () => { sendReasoning: true, warnings: [], }), - ).toThrow('Non-PDF files in user messages'); - }); - - it('should throw error for URL-based file parts', async () => { - expect(() => - convertToAnthropicMessagesPrompt({ - prompt: [ - { - role: 'user', - content: [ - { - type: 'file', - data: 'base64data', - mimeType: 'text/plain', - }, - ], - }, - ], - sendReasoning: true, - warnings: [], - }), - ).toThrow('Non-PDF files in user messages'); + ).toThrow( + "'unsupported file content type: text/plain' functionality not supported.", + ); }); }); diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts index b3efcb695c93..e4ad87e54dcf 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts @@ -5,7 +5,6 @@ import { LanguageModelV2ProviderMetadata, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { AnthropicAssistantMessage, AnthropicCacheControl, @@ -100,52 +99,51 @@ export function convertToAnthropicMessagesPrompt({ break; } - case 'image': { - anthropicContent.push({ - type: 'image', - source: - part.image instanceof URL - ? { - type: 'url', - url: part.image.toString(), - } - : { - type: 'base64', - media_type: part.mimeType ?? 'image/jpeg', - data: convertUint8ArrayToBase64(part.image), - }, - cache_control: cacheControl, - }); - - break; - } - case 'file': { - if (part.data instanceof URL) { - // The AI SDK automatically downloads files for user file parts with URLs - throw new UnsupportedFunctionalityError({ - functionality: 'Image URLs in user messages', + if (part.mimeType.startsWith('image/')) { + anthropicContent.push({ + type: 'image', + source: + part.data instanceof URL + ? { + type: 'url', + url: part.data.toString(), + } + : { + type: 'base64', + media_type: + part.mimeType === 'image/*' + ? 'image/jpeg' + : part.mimeType, + data: part.data, + }, + cache_control: cacheControl, }); - } - - if (part.mimeType !== 'application/pdf') { + } else if (part.mimeType === 'application/pdf') { + if (part.data instanceof URL) { + // The AI SDK automatically downloads files for user file parts with URLs + throw new UnsupportedFunctionalityError({ + functionality: 'PDF File URLs in user messages', + }); + } + + betas.add('pdfs-2024-09-25'); + + anthropicContent.push({ + type: 'document', + source: { + type: 'base64', + media_type: 'application/pdf', + data: part.data, + }, + cache_control: cacheControl, + }); + } else { throw new UnsupportedFunctionalityError({ - functionality: 'Non-PDF files in user messages', + functionality: `unsupported file content type: ${part.mimeType}`, }); } - betas.add('pdfs-2024-09-25'); - - anthropicContent.push({ - type: 'document', - source: { - type: 'base64', - media_type: 'application/pdf', - data: part.data, - }, - cache_control: cacheControl, - }); - break; } } diff --git a/packages/cohere/src/convert-to-cohere-chat-prompt.ts b/packages/cohere/src/convert-to-cohere-chat-prompt.ts index 80f9d8c5a1c4..5ff6bc1cdd44 100644 --- a/packages/cohere/src/convert-to-cohere-chat-prompt.ts +++ b/packages/cohere/src/convert-to-cohere-chat-prompt.ts @@ -25,9 +25,9 @@ export function convertToCohereChatPrompt( case 'text': { return part.text; } - case 'image': { + case 'file': { throw new UnsupportedFunctionalityError({ - functionality: 'image-part', + functionality: 'File URL data', }); } } diff --git a/packages/google/src/convert-to-google-generative-ai-messages.test.ts b/packages/google/src/convert-to-google-generative-ai-messages.test.ts index 8b42a186406c..6741e71c01e1 100644 --- a/packages/google/src/convert-to-google-generative-ai-messages.test.ts +++ b/packages/google/src/convert-to-google-generative-ai-messages.test.ts @@ -25,14 +25,14 @@ describe('system messages', () => { }); describe('user messages', () => { - it('should add image parts for UInt8Array images', async () => { + it('should add image parts', async () => { const result = convertToGoogleGenerativeAIMessages([ { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + data: 'AAECAw==', mimeType: 'image/png', }, ], diff --git a/packages/google/src/convert-to-google-generative-ai-messages.ts b/packages/google/src/convert-to-google-generative-ai-messages.ts index 5ded69ff3e67..48ad729ca1e1 100644 --- a/packages/google/src/convert-to-google-generative-ai-messages.ts +++ b/packages/google/src/convert-to-google-generative-ai-messages.ts @@ -2,7 +2,6 @@ import { LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { GoogleGenerativeAIContent, GoogleGenerativeAIContentPart, @@ -42,41 +41,15 @@ export function convertToGoogleGenerativeAIMessages( break; } - case 'image': { - parts.push( - part.image instanceof URL - ? { - fileData: { - mimeType: part.mimeType ?? 'image/jpeg', - fileUri: part.image.toString(), - }, - } - : { - inlineData: { - mimeType: part.mimeType ?? 'image/jpeg', - data: convertUint8ArrayToBase64(part.image), - }, - }, - ); - - break; - } - case 'file': { + // default to image/jpeg for unknown image/* types + const mimeType = + part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + parts.push( part.data instanceof URL - ? { - fileData: { - mimeType: part.mimeType, - fileUri: part.data.toString(), - }, - } - : { - inlineData: { - mimeType: part.mimeType, - data: part.data, - }, - }, + ? { fileData: { mimeType, fileUri: part.data.toString() } } + : { inlineData: { mimeType, data: part.data } }, ); break; diff --git a/packages/groq/src/convert-to-groq-chat-messages.test.ts b/packages/groq/src/convert-to-groq-chat-messages.test.ts index 52bc7830848f..3d124c860401 100644 --- a/packages/groq/src/convert-to-groq-chat-messages.test.ts +++ b/packages/groq/src/convert-to-groq-chat-messages.test.ts @@ -8,8 +8,8 @@ describe('user messages', () => { content: [ { type: 'text', text: 'Hello' }, { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + data: 'AAECAw==', mimeType: 'image/png', }, ], diff --git a/packages/groq/src/convert-to-groq-chat-messages.ts b/packages/groq/src/convert-to-groq-chat-messages.ts index ed23555c3733..81535300e54c 100644 --- a/packages/groq/src/convert-to-groq-chat-messages.ts +++ b/packages/groq/src/convert-to-groq-chat-messages.ts @@ -2,7 +2,6 @@ import { LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { GroqChatPrompt } from './groq-api-types'; export function convertToGroqChatMessages( @@ -30,24 +29,26 @@ export function convertToGroqChatMessages( case 'text': { return { type: 'text', text: part.text }; } - case 'image': { + case 'file': { + if (!part.mimeType.startsWith('image/')) { + throw new UnsupportedFunctionalityError({ + functionality: 'Non-image file content parts', + }); + } + + const mimeType = + part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + return { type: 'image_url', image_url: { url: - part.image instanceof URL - ? part.image.toString() - : `data:${ - part.mimeType ?? 'image/jpeg' - };base64,${convertUint8ArrayToBase64(part.image)}`, + part.data instanceof URL + ? part.data.toString() + : `data:${mimeType};base64,${part.data}`, }, }; } - case 'file': { - throw new UnsupportedFunctionalityError({ - functionality: 'File content parts in user messages', - }); - } } }), }); diff --git a/packages/mistral/src/convert-to-mistral-chat-messages.test.ts b/packages/mistral/src/convert-to-mistral-chat-messages.test.ts index af37c30b0c94..82808c3acef3 100644 --- a/packages/mistral/src/convert-to-mistral-chat-messages.test.ts +++ b/packages/mistral/src/convert-to-mistral-chat-messages.test.ts @@ -8,8 +8,8 @@ describe('user messages', () => { content: [ { type: 'text', text: 'Hello' }, { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + data: 'AAECAw==', mimeType: 'image/png', }, ], diff --git a/packages/mistral/src/convert-to-mistral-chat-messages.ts b/packages/mistral/src/convert-to-mistral-chat-messages.ts index 1f2e80aa3892..4dc1ca6bb89a 100644 --- a/packages/mistral/src/convert-to-mistral-chat-messages.ts +++ b/packages/mistral/src/convert-to-mistral-chat-messages.ts @@ -2,7 +2,6 @@ import { LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { MistralPrompt } from './mistral-chat-prompt'; export function convertToMistralChatMessages( @@ -28,38 +27,30 @@ export function convertToMistralChatMessages( case 'text': { return { type: 'text', text: part.text }; } - case 'image': { - return { - type: 'image_url', - image_url: - part.image instanceof URL - ? part.image.toString() - : `data:${ - part.mimeType ?? 'image/jpeg' - };base64,${convertUint8ArrayToBase64(part.image)}`, - }; - } + case 'file': { - if (!(part.data instanceof URL)) { + if (part.mimeType.startsWith('image/')) { + const mimeType = + part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + + return { + type: 'image_url', + image_url: + part.data instanceof URL + ? part.data.toString() + : `data:${mimeType};base64,${part.data}`, + }; + } else if (part.mimeType === 'application/pdf') { + return { + type: 'document_url', + document_url: part.data.toString(), + }; + } else { throw new UnsupportedFunctionalityError({ - functionality: 'File content parts in user messages', + functionality: + 'Only images and PDF file parts are supported', }); } - - switch (part.mimeType) { - case 'application/pdf': { - return { - type: 'document_url', - document_url: part.data.toString(), - }; - } - default: { - throw new UnsupportedFunctionalityError({ - functionality: - 'Only PDF files are supported in user messages', - }); - } - } } } }), diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts index 27d6ca3e9447..3737dc26f2e5 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts @@ -19,8 +19,8 @@ describe('user messages', () => { content: [ { type: 'text', text: 'Hello' }, { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), mimeType: 'image/png', }, ], @@ -47,9 +47,9 @@ describe('user messages', () => { role: 'user', content: [ { - type: 'image', - image: new URL('https://example.com/image.jpg'), - mimeType: 'image/jpeg', + type: 'file', + data: new URL('https://example.com/image.jpg'), + mimeType: 'image/*', }, ], }, @@ -248,9 +248,9 @@ describe('provider-specific metadata merging', () => { role: 'user', content: [ { - type: 'image', - image: imageUrl, - mimeType: 'image/jpeg', + type: 'file', + data: imageUrl, + mimeType: 'image/*', providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, @@ -310,8 +310,8 @@ describe('provider-specific metadata merging', () => { }, }, { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), mimeType: 'image/png', providerOptions: { openaiCompatible: { alt_text: 'A sample image' }, @@ -483,8 +483,8 @@ describe('provider-specific metadata merging', () => { }, }, { - type: 'image', - image: new Uint8Array([9, 8, 7, 6]), + type: 'file', + data: Buffer.from([9, 8, 7, 6]).toString('base64'), mimeType: 'image/png', providerOptions: { openaiCompatible: { imagePartLevel: 'image-data' }, diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts index e428e9423c74..6ff96e3edc23 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts @@ -3,7 +3,6 @@ import { LanguageModelV2ProviderMetadata, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { OpenAICompatibleChatPrompt } from './openai-compatible-api-types'; function getOpenAIMetadata(message: { @@ -42,24 +41,26 @@ export function convertToOpenAICompatibleChatMessages( case 'text': { return { type: 'text', text: part.text, ...partMetadata }; } - case 'image': { - return { - type: 'image_url', - image_url: { - url: - part.image instanceof URL - ? part.image.toString() - : `data:${ - part.mimeType ?? 'image/jpeg' - };base64,${convertUint8ArrayToBase64(part.image)}`, - }, - ...partMetadata, - }; - } case 'file': { - throw new UnsupportedFunctionalityError({ - functionality: 'File content parts in user messages', - }); + if (part.mimeType.startsWith('image/')) { + const mimeType = + part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + + return { + type: 'image_url', + image_url: { + url: + part.data instanceof URL + ? part.data.toString() + : `data:${mimeType};base64,${part.data}`, + }, + ...partMetadata, + }; + } else { + throw new UnsupportedFunctionalityError({ + functionality: `file part media type ${part.mimeType}`, + }); + } } } }), diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts b/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts index fa473118510e..a3869236f6ce 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-completion-prompt.ts @@ -54,13 +54,9 @@ export function convertToOpenAICompatibleCompletionPrompt({ case 'text': { return part.text; } - case 'image': { - throw new UnsupportedFunctionalityError({ - functionality: 'images', - }); - } } }) + .filter(Boolean) .join(''); text += `${user}:\n${userMessage}\n\n`; diff --git a/packages/openai/src/convert-to-openai-chat-messages.test.ts b/packages/openai/src/convert-to-openai-chat-messages.test.ts index 856f3f8c3a92..811f6a5bbea6 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.test.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.test.ts @@ -54,9 +54,9 @@ describe('user messages', () => { content: [ { type: 'text', text: 'Hello' }, { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', mimeType: 'image/png', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), }, ], }, @@ -84,9 +84,9 @@ describe('user messages', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', mimeType: 'image/png', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), providerOptions: { openai: { imageDetail: 'low', @@ -122,14 +122,16 @@ describe('user messages', () => { { role: 'user', content: [ - { type: 'file', data: 'AAECAw==', mimeType: 'image/png' }, + { + type: 'file', + data: 'AAECAw==', + mimeType: 'application/something', + }, ], }, ], }), - ).toThrow( - "'File content part type image/png in user messages' functionality not supported.", - ); + ).toThrow('file part media type application/something'); }); it('should throw for URL data', () => { @@ -148,9 +150,7 @@ describe('user messages', () => { }, ], }), - ).toThrow( - "'File content parts with URL data' functionality not supported.", - ); + ).toThrow('audio file parts with URLs'); }); it('should add audio content for audio/wav file parts', () => { @@ -330,9 +330,7 @@ describe('user messages', () => { ], systemMessageMode: 'system', }); - }).toThrow( - "'File content part type text/plain in user messages' functionality not supported.", - ); + }).toThrow('file part media type text/plain'); }); it('should throw error for file URLs', async () => { @@ -352,9 +350,7 @@ describe('user messages', () => { ], systemMessageMode: 'system', }); - }).toThrow( - "'File content parts with URL data' functionality not supported.", - ); + }).toThrow('PDF file parts with URLs'); }); }); }); diff --git a/packages/openai/src/convert-to-openai-chat-messages.ts b/packages/openai/src/convert-to-openai-chat-messages.ts index 1bd4c37fd5dc..3320d303a036 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.ts @@ -3,7 +3,6 @@ import { LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { OpenAIChatPrompt } from './openai-chat-prompt'; export function convertToOpenAIChatMessages({ @@ -63,58 +62,69 @@ export function convertToOpenAIChatMessages({ case 'text': { return { type: 'text', text: part.text }; } - case 'image': { - return { - type: 'image_url', - image_url: { - url: - part.image instanceof URL - ? part.image.toString() - : `data:${ - part.mimeType ?? 'image/jpeg' - };base64,${convertUint8ArrayToBase64(part.image)}`, - - // OpenAI specific extension: image detail - detail: part.providerOptions?.openai?.imageDetail, - }, - }; - } case 'file': { - if (part.data instanceof URL) { - throw new UnsupportedFunctionalityError({ - functionality: - "'File content parts with URL data' functionality not supported.", - }); - } + if (part.mimeType.startsWith('image/')) { + const mimeType = + part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; - switch (part.mimeType) { - case 'audio/wav': { - return { - type: 'input_audio', - input_audio: { data: part.data, format: 'wav' }, - }; - } - case 'audio/mp3': - case 'audio/mpeg': { - return { - type: 'input_audio', - input_audio: { data: part.data, format: 'mp3' }, - }; + return { + type: 'image_url', + image_url: { + url: + part.data instanceof URL + ? part.data.toString() + : `data:${mimeType};base64,${part.data}`, + + // OpenAI specific extension: image detail + detail: part.providerOptions?.openai?.imageDetail, + }, + }; + } else if (part.mimeType.startsWith('audio/')) { + if (part.data instanceof URL) { + throw new UnsupportedFunctionalityError({ + functionality: 'audio file parts with URLs', + }); } - case 'application/pdf': { - return { - type: 'file', - file: { - filename: part.filename ?? `part-${index}.pdf`, - file_data: `data:application/pdf;base64,${part.data}`, - }, - }; + + switch (part.mimeType) { + case 'audio/wav': { + return { + type: 'input_audio', + input_audio: { data: part.data, format: 'wav' }, + }; + } + case 'audio/mp3': + case 'audio/mpeg': { + return { + type: 'input_audio', + input_audio: { data: part.data, format: 'mp3' }, + }; + } + + default: { + throw new UnsupportedFunctionalityError({ + functionality: `audio content parts with media type ${part.mimeType}`, + }); + } } - default: { + } else if (part.mimeType === 'application/pdf') { + if (part.data instanceof URL) { throw new UnsupportedFunctionalityError({ - functionality: `File content part type ${part.mimeType} in user messages`, + functionality: 'PDF file parts with URLs', }); } + + return { + type: 'file', + file: { + filename: part.filename ?? `part-${index}.pdf`, + file_data: `data:application/pdf;base64,${part.data}`, + }, + }; + } else { + throw new UnsupportedFunctionalityError({ + functionality: `file part media type ${part.mimeType}`, + }); } } } diff --git a/packages/openai/src/convert-to-openai-completion-prompt.ts b/packages/openai/src/convert-to-openai-completion-prompt.ts index fc41184022ad..185db92e7a1f 100644 --- a/packages/openai/src/convert-to-openai-completion-prompt.ts +++ b/packages/openai/src/convert-to-openai-completion-prompt.ts @@ -54,13 +54,9 @@ export function convertToOpenAICompletionPrompt({ case 'text': { return part.text; } - case 'image': { - throw new UnsupportedFunctionalityError({ - functionality: 'images', - }); - } } }) + .filter(Boolean) .join(''); text += `${user}:\n${userMessage}\n\n`; diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts index 30026149fd3e..fc47b68c34f2 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts @@ -57,8 +57,9 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'text', text: 'Hello' }, { - type: 'image', - image: new URL('https://example.com/image.jpg'), + type: 'file', + mimeType: 'image/*', + data: new URL('https://example.com/image.jpg'), }, ], }, @@ -87,9 +88,9 @@ describe('convertToOpenAIResponsesMessages', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', mimeType: 'image/png', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), }, ], }, @@ -117,8 +118,9 @@ describe('convertToOpenAIResponsesMessages', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', + mimeType: 'image/*', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), }, ], }, @@ -146,9 +148,9 @@ describe('convertToOpenAIResponsesMessages', () => { role: 'user', content: [ { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), + type: 'file', mimeType: 'image/png', + data: Buffer.from([0, 1, 2, 3]).toString('base64'), providerOptions: { openai: { imageDetail: 'low', @@ -261,7 +263,7 @@ describe('convertToOpenAIResponsesMessages', () => { ], systemMessageMode: 'system', }); - }).toThrow('Only PDF files are supported in user messages'); + }).toThrow('file part media type text/plain'); }); it('should throw error for file URLs', async () => { @@ -281,7 +283,7 @@ describe('convertToOpenAIResponsesMessages', () => { ], systemMessageMode: 'system', }); - }).toThrow('File URLs in user messages'); + }).toThrow('PDF file parts with URLs'); }); }); diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.ts index f662cb6d96c6..33b87266dd2d 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.ts @@ -3,7 +3,6 @@ import { LanguageModelV2Prompt, UnsupportedFunctionalityError, } from '@ai-sdk/provider'; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { OpenAIResponsesPrompt } from './openai-responses-api-types'; export function convertToOpenAIResponsesMessages({ @@ -56,42 +55,38 @@ export function convertToOpenAIResponsesMessages({ case 'text': { return { type: 'input_text', text: part.text }; } - case 'image': { - return { - type: 'input_image', - image_url: - part.image instanceof URL - ? part.image.toString() - : `data:${ - part.mimeType ?? 'image/jpeg' - };base64,${convertUint8ArrayToBase64(part.image)}`, - - // OpenAI specific extension: image detail - detail: part.providerOptions?.openai?.imageDetail, - }; - } case 'file': { - if (part.data instanceof URL) { - // The AI SDK automatically downloads files for user file parts with URLs - throw new UnsupportedFunctionalityError({ - functionality: 'File URLs in user messages', - }); - } + if (part.mimeType.startsWith('image/')) { + const mimeType = + part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; - switch (part.mimeType) { - case 'application/pdf': { - return { - type: 'input_file', - filename: part.filename ?? `part-${index}.pdf`, - file_data: `data:application/pdf;base64,${part.data}`, - }; - } - default: { + return { + type: 'input_image', + image_url: + part.data instanceof URL + ? part.data.toString() + : `data:${mimeType};base64,${part.data}`, + + // OpenAI specific extension: image detail + detail: part.providerOptions?.openai?.imageDetail, + }; + } else if (part.mimeType === 'application/pdf') { + if (part.data instanceof URL) { + // The AI SDK automatically downloads files for user file parts with URLs throw new UnsupportedFunctionalityError({ - functionality: - 'Only PDF files are supported in user messages', + functionality: 'PDF file parts with URLs', }); } + + return { + type: 'input_file', + filename: part.filename ?? `part-${index}.pdf`, + file_data: `data:application/pdf;base64,${part.data}`, + }; + } else { + throw new UnsupportedFunctionalityError({ + functionality: `file part media type ${part.mimeType}`, + }); } } } diff --git a/packages/perplexity/src/convert-to-perplexity-messages.test.ts b/packages/perplexity/src/convert-to-perplexity-messages.test.ts index cef836f255a3..d9f6d5a9670e 100644 --- a/packages/perplexity/src/convert-to-perplexity-messages.test.ts +++ b/packages/perplexity/src/convert-to-perplexity-messages.test.ts @@ -29,38 +29,6 @@ describe('convertToPerplexityMessages', () => { ]), ).toMatchSnapshot(); }); - - it('should throw an error for user messages with image parts', () => { - expect(() => { - convertToPerplexityMessages([ - { - role: 'user', - content: [ - { type: 'text', text: 'Hello ' }, - { - type: 'image', - image: new Uint8Array([0, 1, 2, 3]), - mimeType: 'image/png', - }, - ], - }, - ]); - }).toThrow(UnsupportedFunctionalityError); - }); - - it('should throw an error for user messages with file parts', () => { - expect(() => { - convertToPerplexityMessages([ - { - role: 'user', - content: [ - { type: 'text', text: 'Document: ' }, - { type: 'file', data: 'dummy-data', mimeType: 'text/plain' }, - ], - }, - ]); - }).toThrow(UnsupportedFunctionalityError); - }); }); describe('assistant messages', () => { @@ -74,24 +42,6 @@ describe('convertToPerplexityMessages', () => { ]), ).toMatchSnapshot(); }); - - it('should throw an error for assistant messages with tool-call parts', () => { - expect(() => { - convertToPerplexityMessages([ - { - role: 'assistant', - content: [ - { - type: 'tool-call', - args: { key: 'value' }, - toolCallId: 'call-1', - toolName: 'example-tool', - }, - ], - }, - ]); - }).toThrow(UnsupportedFunctionalityError); - }); }); describe('tool messages', () => { diff --git a/packages/perplexity/src/convert-to-perplexity-messages.ts b/packages/perplexity/src/convert-to-perplexity-messages.ts index 14b41ebd1c16..6d233a3269fe 100644 --- a/packages/perplexity/src/convert-to-perplexity-messages.ts +++ b/packages/perplexity/src/convert-to-perplexity-messages.ts @@ -21,36 +21,14 @@ export function convertToPerplexityMessages( messages.push({ role, content: content - .filter( - part => - part.type !== 'reasoning' && part.type !== 'redacted-reasoning', - ) .map(part => { switch (part.type) { case 'text': { return part.text; } - case 'image': { - throw new UnsupportedFunctionalityError({ - functionality: 'Image content parts in user messages', - }); - } - case 'file': { - throw new UnsupportedFunctionalityError({ - functionality: 'File content parts in user messages', - }); - } - case 'tool-call': { - throw new UnsupportedFunctionalityError({ - functionality: 'Tool calls in assistant messages', - }); - } - default: { - const _exhaustiveCheck: never = part; - throw new Error(`Unsupported part: ${_exhaustiveCheck}`); - } } }) + .filter(Boolean) .join(''), }); break; diff --git a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts index f7beaa4c2133..c233c7c777db 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts @@ -22,11 +22,7 @@ export type LanguageModelV2Message = } | { role: 'user'; - content: Array< - | LanguageModelV2TextPart - | LanguageModelV2ImagePart - | LanguageModelV2FilePart - >; + content: Array; } | { role: 'assistant'; @@ -113,31 +109,6 @@ Redacted reasoning data. providerOptions?: LanguageModelV2ProviderOptions; } -/** -Image content part of a prompt. It contains an image. - */ -// TODO merge into file part in language model v2 -export interface LanguageModelV2ImagePart { - type: 'image'; - - /** -Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL. - */ - image: Uint8Array | URL; - - /** -Optional mime type of the image. - */ - mimeType?: string; - - /** - * Additional provider-specific options. They are passed through - * to the provider from the AI SDK and enable provider-specific - * functionality that can be fully encapsulated in the provider. - */ - providerOptions?: LanguageModelV2ProviderOptions; -} - /** File content part of a prompt. It contains a file. */ @@ -154,11 +125,16 @@ File data as base64 encoded string or as a URL. */ // Note: base64-encoded strings are used to prevent // unnecessary conversions from string to buffer to string + // TODO support Uint8Array | string | URL data: string | URL; /** Mime type of the file. + +Can support wildcards, e.g. `image/*` (in which case the provider needs to take appropriate action). */ + // TODO rename to mediaType or contentType + // https://www.iana.org/assignments/media-types/media-types.xhtml mimeType: string; /** diff --git a/packages/provider/src/language-model/v2/language-model-v2.ts b/packages/provider/src/language-model/v2/language-model-v2.ts index 82f960971605..894108d80319 100644 --- a/packages/provider/src/language-model/v2/language-model-v2.ts +++ b/packages/provider/src/language-model/v2/language-model-v2.ts @@ -64,7 +64,7 @@ use further optimizations if this flag is set to `true`. Defaults to `false`. */ - // TODO v2: rename to supportsGrammarGuidedGeneration? + // TODO v2: rename to supportsGrammarGuidedGeneration? supports output schemas? readonly supportsStructuredOutputs?: boolean; /** From 0a87932a297ea225034a489664b0d53ef0c52333 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 8 Apr 2025 12:44:14 +0200 Subject: [PATCH 0048/1307] fix (provider/openai): increase transcription model resilience (#5600) --- .changeset/nasty-spiders-sparkle.md | 7 +++ .changeset/tender-buses-glow.md | 5 ++ .../ai-core/src/transcribe/openai-string.ts | 4 +- examples/ai-core/src/transcribe/openai-url.ts | 3 +- examples/ai-core/src/transcribe/openai.ts | 2 +- .../ai/core/transcribe/transcribe.test.ts | 2 +- packages/ai/core/transcribe/transcribe.ts | 16 ++--- .../src/openai-transcription-model.test.ts | 59 ++++++++++++++++-- .../openai/src/openai-transcription-model.ts | 61 +++++++++---------- .../v1/transcription-model-v1-call-options.ts | 6 +- 10 files changed, 112 insertions(+), 53 deletions(-) create mode 100644 .changeset/nasty-spiders-sparkle.md create mode 100644 .changeset/tender-buses-glow.md diff --git a/.changeset/nasty-spiders-sparkle.md b/.changeset/nasty-spiders-sparkle.md new file mode 100644 index 000000000000..7245e14bdfbf --- /dev/null +++ b/.changeset/nasty-spiders-sparkle.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +core (ai): change transcription model mimeType to mediaType diff --git a/.changeset/tender-buses-glow.md b/.changeset/tender-buses-glow.md new file mode 100644 index 000000000000..adea7e261eac --- /dev/null +++ b/.changeset/tender-buses-glow.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix (provider/openai): increase transcription model resilience diff --git a/examples/ai-core/src/transcribe/openai-string.ts b/examples/ai-core/src/transcribe/openai-string.ts index 5376c79e799b..5c1f1ea2b72d 100644 --- a/examples/ai-core/src/transcribe/openai-string.ts +++ b/examples/ai-core/src/transcribe/openai-string.ts @@ -6,9 +6,7 @@ import { readFile } from 'fs/promises'; async function main() { const result = await transcribe({ model: openai.transcription('whisper-1'), - audio: Buffer.from( - await readFile('examples/ai-core/data/galileo.mp3'), - ).toString('base64'), + audio: Buffer.from(await readFile('./data/galileo.mp3')).toString('base64'), }); console.log('Text:', result.text); diff --git a/examples/ai-core/src/transcribe/openai-url.ts b/examples/ai-core/src/transcribe/openai-url.ts index 7c64932cd102..d8d7a5830d85 100644 --- a/examples/ai-core/src/transcribe/openai-url.ts +++ b/examples/ai-core/src/transcribe/openai-url.ts @@ -6,8 +6,7 @@ async function main() { const result = await transcribe({ model: openai.transcription('whisper-1'), audio: new URL( - '/vercel/ai/raw/refs/heads/main/examples/ai-core/data/galileo.mp3', - 'https://github.com', + 'https://github.com/vercel/ai/raw/refs/heads/main/examples/ai-core/data/galileo.mp3', ), }); diff --git a/examples/ai-core/src/transcribe/openai.ts b/examples/ai-core/src/transcribe/openai.ts index fafa586c6498..56ccca73330f 100644 --- a/examples/ai-core/src/transcribe/openai.ts +++ b/examples/ai-core/src/transcribe/openai.ts @@ -6,7 +6,7 @@ import { readFile } from 'fs/promises'; async function main() { const result = await transcribe({ model: openai.transcription('whisper-1'), - audio: await readFile('examples/ai-core/data/galileo.mp3'), + audio: await readFile('data/galileo.mp3'), }); console.log('Text:', result.text); diff --git a/packages/ai/core/transcribe/transcribe.test.ts b/packages/ai/core/transcribe/transcribe.test.ts index 72a555783a78..91d843178031 100644 --- a/packages/ai/core/transcribe/transcribe.test.ts +++ b/packages/ai/core/transcribe/transcribe.test.ts @@ -78,7 +78,7 @@ describe('transcribe', () => { expect(capturedArgs).toStrictEqual({ audio: audioData, - mimeType: 'audio/wav', + mediaType: 'audio/wav', headers: { 'custom-request-header': 'request-header-value' }, abortSignal, providerOptions: {}, diff --git a/packages/ai/core/transcribe/transcribe.ts b/packages/ai/core/transcribe/transcribe.ts index 2114522845bc..1c7029102fc2 100644 --- a/packages/ai/core/transcribe/transcribe.ts +++ b/packages/ai/core/transcribe/transcribe.ts @@ -1,17 +1,17 @@ -import { TranscriptionModelV1, JSONValue } from '@ai-sdk/provider'; +import { JSONValue, TranscriptionModelV1 } from '@ai-sdk/provider'; import { NoTranscriptGeneratedError } from '../../errors/no-transcript-generated-error'; +import { download } from '../../util/download'; +import { DataContent } from '../prompt'; +import { convertDataContentToUint8Array } from '../prompt/data-content'; import { prepareRetries } from '../prompt/prepare-retries'; +import { ProviderOptions } from '../types/provider-metadata'; import { TranscriptionWarning } from '../types/transcription-model'; import { TranscriptionModelResponseMetadata } from '../types/transcription-model-response-metadata'; -import { TranscriptionResult } from './transcribe-result'; -import { DataContent } from '../prompt'; -import { convertDataContentToUint8Array } from '../prompt/data-content'; -import { download } from '../../util/download'; import { audioMimeTypeSignatures, detectMimeType, } from '../util/detect-mimetype'; -import { ProviderOptions } from '../types/provider-metadata'; +import { TranscriptionResult } from './transcribe-result'; /** Generates transcripts using a transcription model. @@ -81,7 +81,7 @@ Only applicable for HTTP-based providers. const { retry } = prepareRetries({ maxRetries: maxRetriesArg }); const audioData = audio instanceof URL - ? new Uint8Array((await download({ url: audio })).data) + ? (await download({ url: audio })).data : convertDataContentToUint8Array(audio); const result = await retry(() => @@ -90,7 +90,7 @@ Only applicable for HTTP-based providers. abortSignal, headers, providerOptions, - mimeType: + mediaType: detectMimeType({ data: audioData, signatures: audioMimeTypeSignatures, diff --git a/packages/openai/src/openai-transcription-model.test.ts b/packages/openai/src/openai-transcription-model.test.ts index cee508de293a..61c2be77adb0 100644 --- a/packages/openai/src/openai-transcription-model.test.ts +++ b/packages/openai/src/openai-transcription-model.test.ts @@ -73,7 +73,7 @@ describe('doGenerate', () => { await model.doGenerate({ audio: audioData, - mimeType: 'audio/wav', + mediaType: 'audio/wav', }); expect(await server.calls[0].requestBodyMultipart).toMatchObject({ @@ -95,7 +95,7 @@ describe('doGenerate', () => { await provider.transcription('whisper-1').doGenerate({ audio: audioData, - mimeType: 'audio/wav', + mediaType: 'audio/wav', headers: { 'Custom-Request-Header': 'request-header-value', }, @@ -118,7 +118,7 @@ describe('doGenerate', () => { const result = await model.doGenerate({ audio: audioData, - mimeType: 'audio/wav', + mediaType: 'audio/wav', }); expect(result.text).toBe('Hello from the Vercel AI SDK!'); @@ -144,7 +144,7 @@ describe('doGenerate', () => { const result = await customModel.doGenerate({ audio: audioData, - mimeType: 'audio/wav', + mediaType: 'audio/wav', }); expect(result.response).toMatchObject({ @@ -173,10 +173,59 @@ describe('doGenerate', () => { const result = await customModel.doGenerate({ audio: audioData, - mimeType: 'audio/wav', + mediaType: 'audio/wav', }); expect(result.response.timestamp.getTime()).toEqual(testDate.getTime()); expect(result.response.modelId).toBe('whisper-1'); }); + + it('should work when no words, language, or duration are returned', async () => { + server.urls['https://api.openai.com/v1/audio/transcriptions'].response = { + type: 'json-value', + body: { + task: 'transcribe', + text: 'Hello from the Vercel AI SDK!', + _request_id: 'req_1234', + }, + }; + + const testDate = new Date(0); + const customModel = new OpenAITranscriptionModel('whisper-1', { + provider: 'test-provider', + url: () => 'https://api.openai.com/v1/audio/transcriptions', + headers: () => ({}), + _internal: { + currentDate: () => testDate, + }, + }); + + const result = await customModel.doGenerate({ + audio: audioData, + mediaType: 'audio/wav', + }); + + expect(result).toMatchInlineSnapshot(` + { + "durationInSeconds": undefined, + "language": undefined, + "response": { + "body": { + "_request_id": "req_1234", + "task": "transcribe", + "text": "Hello from the Vercel AI SDK!", + }, + "headers": { + "content-length": "85", + "content-type": "application/json", + }, + "modelId": "whisper-1", + "timestamp": 1970-01-01T00:00:00.000Z, + }, + "segments": [], + "text": "Hello from the Vercel AI SDK!", + "warnings": [], + } + `); + }); }); diff --git a/packages/openai/src/openai-transcription-model.ts b/packages/openai/src/openai-transcription-model.ts index 5720b4932081..8a1a91caf77e 100644 --- a/packages/openai/src/openai-transcription-model.ts +++ b/packages/openai/src/openai-transcription-model.ts @@ -142,7 +142,7 @@ export class OpenAITranscriptionModel implements TranscriptionModelV1 { private getArgs({ audio, - mimeType, + mediaType, providerOptions, }: OpenAITranscriptionCallOptions) { const warnings: TranscriptionModelV1CallWarning[] = []; @@ -162,7 +162,7 @@ export class OpenAITranscriptionModel implements TranscriptionModelV1 { : new Blob([convertBase64ToUint8Array(audio)]); formData.append('model', this.modelId); - formData.append('file', new File([blob], 'audio', { type: mimeType })); + formData.append('file', new File([blob], 'audio', { type: mediaType })); // Add provider-specific options if (openAIOptions) { @@ -197,7 +197,11 @@ export class OpenAITranscriptionModel implements TranscriptionModelV1 { const currentDate = this.config._internal?.currentDate?.() ?? new Date(); const { formData, warnings } = this.getArgs(options); - const { value: response, responseHeaders } = await postFormDataToApi({ + const { + value: response, + responseHeaders, + rawValue: rawResponse, + } = await postFormDataToApi({ url: this.config.url({ path: '/audio/transcriptions', modelId: this.modelId, @@ -212,34 +216,27 @@ export class OpenAITranscriptionModel implements TranscriptionModelV1 { fetch: this.config.fetch, }); - let language: string | undefined; - - if (response.language && response.language in languageMap) { - language = languageMap[response.language as keyof typeof languageMap]; - } + const language = + response.language != null && response.language in languageMap + ? languageMap[response.language as keyof typeof languageMap] + : undefined; return { text: response.text, - segments: response.words.map(word => ({ - text: word.word, - startSecond: word.start, - endSecond: word.end, - })), + segments: + response.words?.map(word => ({ + text: word.word, + startSecond: word.start, + endSecond: word.end, + })) ?? [], language, - durationInSeconds: response.duration, + durationInSeconds: response.duration ?? undefined, warnings, response: { timestamp: currentDate, modelId: this.modelId, headers: responseHeaders, - body: response, - }, - - // When using format `verbose_json` on `whisper-1`, OpenAI includes the things like `task` and enhanced `segments` information. - providerMetadata: { - openai: { - transcript: response, - }, + body: rawResponse, }, }; } @@ -247,13 +244,15 @@ export class OpenAITranscriptionModel implements TranscriptionModelV1 { const openaiTranscriptionResponseSchema = z.object({ text: z.string(), - language: z.string().optional(), - duration: z.number().optional(), - words: z.array( - z.object({ - word: z.string(), - start: z.number(), - end: z.number(), - }), - ), + language: z.string().nullish(), + duration: z.number().nullish(), + words: z + .array( + z.object({ + word: z.string(), + start: z.number(), + end: z.number(), + }), + ) + .nullish(), }); diff --git a/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts b/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts index a2164a14bba6..cd0e94b668c1 100644 --- a/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts +++ b/packages/provider/src/transcription-model/v1/transcription-model-v1-call-options.ts @@ -13,9 +13,11 @@ Accepts a `Uint8Array` or `string`, where `string` is a base64 encoded audio fil audio: Uint8Array | string; /** - The MIME type of the audio data. +The IANA media type of the audio data. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml */ - mimeType: string; + mediaType: string; /** Additional provider-specific options that are passed through to the provider From abf9a792c25f408bafc7e0c277ceafda5ac69b35 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 8 Apr 2025 15:54:02 +0200 Subject: [PATCH 0049/1307] chore: rename mimeType to mediaType (#5602) --- .changeset/hungry-pets-hear.md | 5 + .../31-generate-object-with-file-prompt.mdx | 2 +- .../23-stream-text-with-file-prompt.mdx | 2 +- content/docs/02-foundations/03-prompts.mdx | 8 +- content/docs/02-guides/05-computer-use.mdx | 2 +- .../15-tools-and-tool-calling.mdx | 2 +- .../03-ai-sdk-core/35-image-generation.mdx | 4 +- content/docs/04-ai-sdk-ui/02-chatbot.mdx | 4 +- .../01-ai-sdk-core/01-generate-text.mdx | 17 +- .../01-ai-sdk-core/02-stream-text.mdx | 28 +-- .../01-ai-sdk-core/03-generate-object.mdx | 9 +- .../01-ai-sdk-core/04-stream-object.mdx | 9 +- .../01-ai-sdk-core/10-generate-image.mdx | 8 +- .../07-reference/01-ai-sdk-core/20-tool.mdx | 4 +- .../01-ai-sdk-core/30-core-message.mdx | 10 +- .../03-ai-sdk-rsc/01-stream-ui.mdx | 9 +- .../27-migration-guide-4-2.mdx | 2 +- .../01-ai-sdk-providers/02-openai.mdx | 10 +- .../01-ai-sdk-providers/03-azure.mdx | 4 +- .../01-ai-sdk-providers/05-anthropic.mdx | 6 +- .../01-ai-sdk-providers/08-amazon-bedrock.mdx | 2 +- .../15-google-generative-ai.mdx | 4 +- .../01-ai-sdk-providers/16-google-vertex.mdx | 4 +- .../01-ai-sdk-providers/20-mistral.mdx | 2 +- .../ai-core/src/e2e/feature-test-suite.ts | 4 +- .../src/e2e/google-vertex-anthropic.test.ts | 2 +- .../ai-core/src/e2e/google-vertex.test.ts | 20 +- .../generate-object/google-gemini-files.ts | 2 +- .../src/generate-object/google-pdf-url.ts | 2 +- .../amazon-bedrock-tool-call-image-result.ts | 2 +- .../anthropic-computer-use-computer.ts | 2 +- .../src/generate-text/anthropic-pdf.ts | 2 +- .../ai-core/src/generate-text/google-audio.ts | 2 +- .../google-chatbot-image-output.ts | 2 +- .../src/generate-text/google-image-output.ts | 2 +- .../ai-core/src/generate-text/google-pdf.ts | 2 +- ...-vertex-anthropic-computer-use-computer.ts | 2 +- .../google-vertex-anthropic-pdf.ts | 2 +- .../src/generate-text/google-vertex-audio.ts | 2 +- .../generate-text/google-vertex-pdf-url.ts | 2 +- .../src/generate-text/google-vertex-pdf.ts | 2 +- .../src/generate-text/mistral-pdf-url.ts | 2 +- .../ai-core/src/generate-text/openai-audio.ts | 2 +- .../src/generate-text/openai-pdf-url.ts | 2 +- .../ai-core/src/generate-text/openai-pdf.ts | 2 +- .../generate-text/openai-responses-pdf-url.ts | 2 +- .../src/generate-text/openai-responses-pdf.ts | 2 +- .../src/stream-text/amazon-bedrock-pdf.ts | 2 +- .../ai-core/src/stream-text/anthropic-pdf.ts | 2 +- .../google-chatbot-image-output.ts | 2 +- .../src/stream-text/google-image-output.ts | 2 +- .../google-vertex-anthropic-pdf.ts | 2 +- .../src/stream-text/google-vertex-pdf-url.ts | 2 +- .../ai-core/src/stream-text/openai-audio.ts | 2 +- .../app/use-chat-image-output/page.tsx | 4 +- .../[id]/chat.tsx | 4 +- .../generate-image/generate-image.test.ts | 10 +- .../ai/core/generate-image/generate-image.ts | 12 +- .../__snapshots__/generate-text.test.ts.snap | 22 +-- .../__snapshots__/stream-text.test.ts.snap | 32 ++-- .../core/generate-text/generate-text.test.ts | 8 +- .../ai/core/generate-text/generate-text.ts | 2 +- .../ai/core/generate-text/generated-file.ts | 16 +- .../generate-text/run-tools-transformation.ts | 2 +- .../ai/core/generate-text/stream-text.test.ts | 4 +- packages/ai/core/generate-text/stream-text.ts | 2 +- .../to-response-messages.test.ts | 30 +-- .../generate-text/to-response-messages.ts | 2 +- .../append-response-messages.test.ts.snap | 2 +- .../convert-to-core-messages.test.ts.snap | 2 +- .../prompt/append-response-messages.test.ts | 2 +- .../core/prompt/append-response-messages.ts | 2 +- .../ai/core/prompt/attachments-to-parts.ts | 10 +- packages/ai/core/prompt/content-part.ts | 24 ++- .../prompt/convert-to-core-messages.test.ts | 6 +- .../core/prompt/convert-to-core-messages.ts | 9 +- .../convert-to-language-model-prompt.test.ts | 98 +++++----- .../convert-to-language-model-prompt.ts | 50 ++--- packages/ai/core/prompt/split-data-url.ts | 6 +- .../ai/core/prompt/tool-result-content.ts | 7 +- packages/ai/core/transcribe/transcribe.ts | 10 +- ...type.test.ts => detect-media-type.test.ts} | 172 ++++++++++-------- ...etect-mimetype.ts => detect-media-type.ts} | 49 ++--- packages/ai/util/download.test.ts | 2 +- packages/ai/util/download.ts | 4 +- .../src/bedrock-prepare-tools.ts | 2 +- .../convert-to-bedrock-chat-messages.test.ts | 10 +- .../src/convert-to-bedrock-chat-messages.ts | 12 +- .../anthropic/src/anthropic-prepare-tools.ts | 2 +- packages/anthropic/src/anthropic-tools.ts | 2 +- ...nvert-to-anthropic-messages-prompt.test.ts | 14 +- .../convert-to-anthropic-messages-prompt.ts | 14 +- packages/cohere/src/cohere-prepare-tools.ts | 2 +- ...t-to-google-generative-ai-messages.test.ts | 12 +- ...onvert-to-google-generative-ai-messages.ts | 17 +- ...oogle-generative-ai-language-model.test.ts | 14 +- .../google-generative-ai-language-model.ts | 4 +- packages/google/src/google-prepare-tools.ts | 2 +- .../src/convert-to-groq-chat-messages.test.ts | 2 +- .../groq/src/convert-to-groq-chat-messages.ts | 8 +- packages/groq/src/groq-prepare-tools.ts | 2 +- .../convert-to-mistral-chat-messages.test.ts | 4 +- .../src/convert-to-mistral-chat-messages.ts | 12 +- packages/mistral/src/mistral-prepare-tools.ts | 2 +- ...to-openai-compatible-chat-messages.test.ts | 10 +- ...vert-to-openai-compatible-chat-messages.ts | 12 +- .../src/openai-compatible-prepare-tools.ts | 2 +- .../convert-to-openai-chat-messages.test.ts | 22 +-- .../src/convert-to-openai-chat-messages.ts | 20 +- packages/openai/src/openai-prepare-tools.ts | 2 +- ...nvert-to-openai-responses-messages.test.ts | 16 +- .../convert-to-openai-responses-messages.ts | 14 +- .../openai-responses-prepare-tools.ts | 2 +- .../v2/language-model-v2-prompt.ts | 14 +- .../language-model/v2/language-model-v2.ts | 26 ++- .../process-chat-response.test.ts.snap | 12 +- packages/ui-utils/src/data-url.ts | 4 +- .../ui-utils/src/process-chat-response.ts | 2 +- packages/ui-utils/src/types.ts | 14 +- 119 files changed, 634 insertions(+), 517 deletions(-) create mode 100644 .changeset/hungry-pets-hear.md rename packages/ai/core/util/{detect-mimetype.test.ts => detect-media-type.test.ts} (72%) rename packages/ai/core/util/{detect-mimetype.ts => detect-media-type.ts} (60%) diff --git a/.changeset/hungry-pets-hear.md b/.changeset/hungry-pets-hear.md new file mode 100644 index 000000000000..a971cdca9127 --- /dev/null +++ b/.changeset/hungry-pets-hear.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: rename mimeType to mediaType diff --git a/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx b/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx index 34c3eba7a223..737bfb1f6013 100644 --- a/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx +++ b/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx @@ -88,7 +88,7 @@ export async function POST(request: Request) { { type: 'file', data: await file.arrayBuffer(), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx b/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx index 5d735b004b23..f540e5749c34 100644 --- a/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx +++ b/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx @@ -28,7 +28,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/content/docs/02-foundations/03-prompts.mdx b/content/docs/02-foundations/03-prompts.mdx index a7ad75f94d8a..594fc6494b36 100644 --- a/content/docs/02-foundations/03-prompts.mdx +++ b/content/docs/02-foundations/03-prompts.mdx @@ -235,7 +235,7 @@ const result = await generateText({ { type: 'text', text: 'What is the file about?' }, { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: fs.readFileSync('./data/example.pdf'), filename: 'example.pdf', // optional, not used by all providers }, @@ -260,7 +260,7 @@ const result = await generateText({ { type: 'text', text: 'What is the audio saying?' }, { type: 'file', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', data: fs.readFileSync('./data/galileo.mp3'), }, ], @@ -341,7 +341,7 @@ const result = await generateText({ content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: fs.readFileSync('./data/roquefort.jpg'), }, ], @@ -448,7 +448,7 @@ const result = await generateText({ { type: 'image', data: fs.readFileSync('./data/roquefort-nutrition-data.png'), - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, diff --git a/content/docs/02-guides/05-computer-use.mdx b/content/docs/02-guides/05-computer-use.mdx index a3304b60fdb8..94251247c004 100644 --- a/content/docs/02-guides/05-computer-use.mdx +++ b/content/docs/02-guides/05-computer-use.mdx @@ -110,7 +110,7 @@ const computerTool = anthropic.tools.computer_20241022({ experimental_toToolResultContent(result) { return typeof result === 'string' ? [{ type: 'text', text: result }] - : [{ type: 'image', data: result.data, mimeType: 'image/png' }]; + : [{ type: 'image', data: result.data, mediaType: 'image/png' }]; }, }); ``` diff --git a/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx b/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx index 7b00f125f0b3..cf29f81ecd50 100644 --- a/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +++ b/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx @@ -592,7 +592,7 @@ const result = await generateText({ experimental_toToolResultContent(result) { return typeof result === 'string' ? [{ type: 'text', text: result }] - : [{ type: 'image', data: result.data, mimeType: 'image/png' }]; + : [{ type: 'image', data: result.data, mediaType: 'image/png' }]; }, }), }, diff --git a/content/docs/03-ai-sdk-core/35-image-generation.mdx b/content/docs/03-ai-sdk-core/35-image-generation.mdx index d85f138e2523..d0a3dcc68ac4 100644 --- a/content/docs/03-ai-sdk-core/35-image-generation.mdx +++ b/content/docs/03-ai-sdk-core/35-image-generation.mdx @@ -228,12 +228,12 @@ const result = await generateText({ }); for (const file of result.files) { - if (file.mimeType.startsWith('image/')) { + if (file.mediaType.startsWith('image/')) { // The file object provides multiple data formats: // Access images as base64 string, Uint8Array binary data, or check type // - file.base64: string (data URL format) // - file.uint8Array: Uint8Array (binary data) - // - file.mimeType: string (e.g. "image/png") + // - file.mediaType: string (e.g. "image/png") } } ``` diff --git a/content/docs/04-ai-sdk-ui/02-chatbot.mdx b/content/docs/04-ai-sdk-ui/02-chatbot.mdx index 8611213b06ba..222e6495b6a3 100644 --- a/content/docs/04-ai-sdk-ui/02-chatbot.mdx +++ b/content/docs/04-ai-sdk-ui/02-chatbot.mdx @@ -624,9 +624,9 @@ messages.map(message => ( {message.parts.map((part, index) => { if (part.type === 'text') { return
{part.text}
; - } else if (part.type === 'file' && part.mimeType.startsWith('image/')) { + } else if (part.type === 'file' && part.mediaType.startsWith('image/')) { return ( - + ); } })} diff --git a/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx b/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx index ebecf44467b9..1f7f2c66c896 100644 --- a/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx @@ -113,10 +113,11 @@ To see `generateText` in action, check out [these examples](#examples). 'The image content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', + description: + 'The IANA media type of the image. Optional.', isOptional: true, - description: 'The mime type of the image. Optional.', }, ], }, @@ -135,9 +136,9 @@ To see `generateText` in action, check out [these examples](#examples). 'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'The mime type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -849,9 +850,9 @@ To see `generateText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -1118,9 +1119,9 @@ To see `generateText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, diff --git a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx index 8b1215e5aba1..d0966bf3fc30 100644 --- a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx @@ -115,10 +115,10 @@ To see `streamText` in action, check out [these examples](#examples). 'The image content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', isOptional: true, - description: 'The mime type of the image. Optional.', + description: 'The IANA media type of the image.', }, ], }, @@ -137,9 +137,9 @@ To see `streamText` in action, check out [these examples](#examples). 'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'The mime type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -961,9 +961,9 @@ To see `streamText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -1205,9 +1205,9 @@ To see `streamText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -1447,9 +1447,9 @@ To see `streamText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -1621,9 +1621,9 @@ To see `streamText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, @@ -1866,9 +1866,9 @@ To see `streamText` in action, check out [these examples](#examples). description: 'File as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the file.', + description: 'The IANA media type of the file.', }, ], }, diff --git a/content/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx b/content/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx index 3d86128832ac..d2009b943db7 100644 --- a/content/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx @@ -224,10 +224,11 @@ To see `generateObject` in action, check out the [additional examples](#more-exa 'The image content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', + description: + 'The IANA media type of the image. Optional.', isOptional: true, - description: 'The mime type of the image. Optional.', }, ], }, @@ -246,9 +247,9 @@ To see `generateObject` in action, check out the [additional examples](#more-exa 'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'The mime type of the file.', + description: 'The IANA media type of the file.', }, ], }, diff --git a/content/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx b/content/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx index 4f52c8ce2aad..f70d42a33c30 100644 --- a/content/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx @@ -211,10 +211,11 @@ To see `streamObject` in action, check out the [additional examples](#more-examp 'The image content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', isOptional: true, - description: 'The mime type of the image. Optional.', + description: + 'The IANA media type of the image. Optional.', }, ], }, @@ -233,9 +234,9 @@ To see `streamObject` in action, check out the [additional examples](#more-examp 'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'The mime type of the file.', + description: 'The IANA media type of the file.', }, ], }, diff --git a/content/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx b/content/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx index a68a7e48fd3a..dcb0e91976ca 100644 --- a/content/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx @@ -124,9 +124,9 @@ console.log(images); description: 'Image as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the image.', + description: 'The IANA media type of the image.', }, ], }, @@ -151,9 +151,9 @@ console.log(images); description: 'Image as a Uint8Array.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'MIME type of the image.', + description: 'The IANA media type of the image.', }, ], }, diff --git a/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx b/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx index 6cad42435bb5..6117f26ec1be 100644 --- a/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx @@ -124,10 +124,10 @@ export const weatherTool = tool({ description: 'The base64 encoded png image.' }, { - name: 'mimeType', + name: 'mediaType', isOptional: true, type: 'string', - description: 'The mime type of the image.' + description: 'The IANA media type of the image.' } ] } diff --git a/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx b/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx index 51df4d27c62d..43fa20a218e6 100644 --- a/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx @@ -107,10 +107,10 @@ export interface ImagePart { image: DataContent | URL; /** - * Optional mime type of the image. + * Optional IANA media type of the image. * We recommend leaving this out as it will be detected automatically. */ - mimeType?: string; + mediaType?: string; } ``` @@ -130,9 +130,9 @@ export interface FilePart { data: DataContent | URL; /** - * Mime type of the file. + * IANA media type of the file. */ - mimeType: string; + mediaType: string; } ``` @@ -207,7 +207,7 @@ export type ToolResultContent = Array< | { type: 'image'; data: string; // base64 encoded png image, e.g. screenshot - mimeType?: string; // e.g. 'image/png'; + mediaType?: string; // e.g. 'image/png'; } >; ``` diff --git a/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx b/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx index 3f117be99e80..46b33733f4cf 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx @@ -109,10 +109,11 @@ To see `streamUI` in action, check out [these examples](#examples). 'The image content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', isOptional: true, - description: 'The mime type of the image. Optional.', + description: + 'The IANA media type of the image. Optional.', }, ], }, @@ -131,9 +132,9 @@ To see `streamUI` in action, check out [these examples](#examples). 'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.', }, { - name: 'mimeType', + name: 'mediaType', type: 'string', - description: 'The mime type of the file.', + description: 'The IANA media type of the file.', }, ], }, diff --git a/content/docs/08-migration-guides/27-migration-guide-4-2.mdx b/content/docs/08-migration-guides/27-migration-guide-4-2.mdx index 8ff88c5ce480..39e4f3b8b302 100644 --- a/content/docs/08-migration-guides/27-migration-guide-4-2.mdx +++ b/content/docs/08-migration-guides/27-migration-guide-4-2.mdx @@ -87,7 +87,7 @@ function Chat() { return ( ); } diff --git a/content/providers/01-ai-sdk-providers/02-openai.mdx b/content/providers/01-ai-sdk-providers/02-openai.mdx index c86f18f0fc4c..fec02f83dab9 100644 --- a/content/providers/01-ai-sdk-providers/02-openai.mdx +++ b/content/providers/01-ai-sdk-providers/02-openai.mdx @@ -336,7 +336,7 @@ const result = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'ai.pdf', // optional }, ], @@ -348,7 +348,7 @@ const result = await generateText({ The model will have access to the contents of the PDF file and respond to questions about it. The PDF file should be passed using the `data` field, -and the `mimeType` should be set to `'application/pdf'`. +and the `mediaType` should be set to `'application/pdf'`. #### Predicted Outputs @@ -506,7 +506,7 @@ const result = await generateText({ { type: 'text', text: 'What is the audio saying?' }, { type: 'file', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', data: fs.readFileSync('./data/galileo.mp3'), }, ], @@ -641,7 +641,7 @@ const result = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'ai.pdf', // optional }, ], @@ -653,7 +653,7 @@ const result = await generateText({ The model will have access to the contents of the PDF file and respond to questions about it. The PDF file should be passed using the `data` field, -and the `mimeType` should be set to `'application/pdf'`. +and the `mediaType` should be set to `'application/pdf'`. ### Completion Models diff --git a/content/providers/01-ai-sdk-providers/03-azure.mdx b/content/providers/01-ai-sdk-providers/03-azure.mdx index 46e03042a833..2b52aefa25cd 100644 --- a/content/providers/01-ai-sdk-providers/03-azure.mdx +++ b/content/providers/01-ai-sdk-providers/03-azure.mdx @@ -349,7 +349,7 @@ const result = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'ai.pdf', // optional }, ], @@ -361,7 +361,7 @@ const result = await generateText({ The model will have access to the contents of the PDF file and respond to questions about it. The PDF file should be passed using the `data` field, -and the `mimeType` should be set to `'application/pdf'`. +and the `mediaType` should be set to `'application/pdf'`. ### Completion Models diff --git a/content/providers/01-ai-sdk-providers/05-anthropic.mdx b/content/providers/01-ai-sdk-providers/05-anthropic.mdx index 3f48eed03923..93b2580446a6 100644 --- a/content/providers/01-ai-sdk-providers/05-anthropic.mdx +++ b/content/providers/01-ai-sdk-providers/05-anthropic.mdx @@ -326,7 +326,7 @@ const computerTool = anthropic.tools.computer_20241022({ experimental_toToolResultContent(result) { return typeof result === 'string' ? [{ type: 'text', text: result }] - : [{ type: 'image', data: result.data, mimeType: 'image/png' }]; + : [{ type: 'image', data: result.data, mediaType: 'image/png' }]; }, }); ``` @@ -358,7 +358,7 @@ const result = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -369,7 +369,7 @@ const result = await generateText({ The model will have access to the contents of the PDF file and respond to questions about it. The PDF file should be passed using the `data` field, -and the `mimeType` should be set to `'application/pdf'`. +and the `mediaType` should be set to `'application/pdf'`. ### Model Capabilities diff --git a/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx b/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx index d32e5051d257..de7347a9c7b5 100644 --- a/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx +++ b/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx @@ -218,7 +218,7 @@ const result = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx b/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx index 2fba02438350..bf806fc4ed3d 100644 --- a/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx +++ b/content/providers/01-ai-sdk-providers/15-google-generative-ai.mdx @@ -191,7 +191,7 @@ const result = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -391,7 +391,7 @@ const result = await generateText({ }); for (const file of result.files) { - if (file.mimeType.startsWith('image/')) { + if (file.mediaType.startsWith('image/')) { // show the image } } diff --git a/content/providers/01-ai-sdk-providers/16-google-vertex.mdx b/content/providers/01-ai-sdk-providers/16-google-vertex.mdx index b65b0f6b0819..3e5710e997aa 100644 --- a/content/providers/01-ai-sdk-providers/16-google-vertex.mdx +++ b/content/providers/01-ai-sdk-providers/16-google-vertex.mdx @@ -337,7 +337,7 @@ const { text } = await generateText({ { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -1063,7 +1063,7 @@ const computerTool = vertexAnthropic.tools.computer_20241022({ experimental_toToolResultContent(result) { return typeof result === 'string' ? [{ type: 'text', text: result }] - : [{ type: 'image', data: result.data, mimeType: 'image/png' }]; + : [{ type: 'image', data: result.data, mediaType: 'image/png' }]; }, }); ``` diff --git a/content/providers/01-ai-sdk-providers/20-mistral.mdx b/content/providers/01-ai-sdk-providers/20-mistral.mdx index 7db1f0feb943..aab658fe59ad 100644 --- a/content/providers/01-ai-sdk-providers/20-mistral.mdx +++ b/content/providers/01-ai-sdk-providers/20-mistral.mdx @@ -113,7 +113,7 @@ const result = await generateText({ data: new URL( 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', ), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/e2e/feature-test-suite.ts b/examples/ai-core/src/e2e/feature-test-suite.ts index b9b768fa6917..fe369a9326de 100644 --- a/examples/ai-core/src/e2e/feature-test-suite.ts +++ b/examples/ai-core/src/e2e/feature-test-suite.ts @@ -863,7 +863,7 @@ export function createFeatureTestSuite({ data: fs .readFileSync('./data/ai.pdf') .toString('base64'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -897,7 +897,7 @@ export function createFeatureTestSuite({ data: Buffer.from( fs.readFileSync('./data/galileo.mp3'), ), - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', }, ], }, diff --git a/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts b/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts index 3fe37311c6d6..e550ca4c58d8 100644 --- a/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts +++ b/examples/ai-core/src/e2e/google-vertex-anthropic.test.ts @@ -144,7 +144,7 @@ const toolTests = (model: LanguageModelV2) => { { type: 'image', data: result.data, - mimeType: 'image/png', + mediaType: 'image/png', }, ]; }, diff --git a/examples/ai-core/src/e2e/google-vertex.test.ts b/examples/ai-core/src/e2e/google-vertex.test.ts index 505465624074..77212bb3b1ce 100644 --- a/examples/ai-core/src/e2e/google-vertex.test.ts +++ b/examples/ai-core/src/e2e/google-vertex.test.ts @@ -122,22 +122,22 @@ describe.each(Object.values(RUNTIME_VARIANTS))( }, ); -const mimeTypeSignatures = [ - { mimeType: 'image/gif' as const, bytes: [0x47, 0x49, 0x46] }, - { mimeType: 'image/png' as const, bytes: [0x89, 0x50, 0x4e, 0x47] }, - { mimeType: 'image/jpeg' as const, bytes: [0xff, 0xd8] }, - { mimeType: 'image/webp' as const, bytes: [0x52, 0x49, 0x46, 0x46] }, +const mediaTypeSignatures = [ + { mediaType: 'image/gif' as const, bytes: [0x47, 0x49, 0x46] }, + { mediaType: 'image/png' as const, bytes: [0x89, 0x50, 0x4e, 0x47] }, + { mediaType: 'image/jpeg' as const, bytes: [0xff, 0xd8] }, + { mediaType: 'image/webp' as const, bytes: [0x52, 0x49, 0x46, 0x46] }, ]; -function detectImageMimeType( +function detectImageMediaType( image: Uint8Array, ): 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp' | undefined { - for (const { bytes, mimeType } of mimeTypeSignatures) { + for (const { bytes, mediaType } of mediaTypeSignatures) { if ( image.length >= bytes.length && bytes.every((byte, index) => image[index] === byte) ) { - return mimeType; + return mediaType; } } @@ -166,8 +166,8 @@ const imageTest = (model: ImageModelV1) => { expect(image.uint8Array.length).toBeLessThan(10 * 1024 * 1024); // Verify PNG format - const mimeType = detectImageMimeType(image.uint8Array); - expect(mimeType).toBe('image/png'); + const mediaType = detectImageMediaType(image.uint8Array); + expect(mediaType).toBe('image/png'); // Create a temporary buffer to verify image dimensions const tempBuffer = Buffer.from(image.uint8Array); diff --git a/examples/ai-core/src/generate-object/google-gemini-files.ts b/examples/ai-core/src/generate-object/google-gemini-files.ts index 3293ddae6be4..04163bb357c5 100644 --- a/examples/ai-core/src/generate-object/google-gemini-files.ts +++ b/examples/ai-core/src/generate-object/google-gemini-files.ts @@ -34,7 +34,7 @@ async function main() { { type: 'file', data: geminiFile.file.uri, - mimeType: geminiFile.file.mimeType, + mediaType: geminiFile.file.mimeType, }, ], }, diff --git a/examples/ai-core/src/generate-object/google-pdf-url.ts b/examples/ai-core/src/generate-object/google-pdf-url.ts index 3eb557d454c4..3d44ad8b1262 100644 --- a/examples/ai-core/src/generate-object/google-pdf-url.ts +++ b/examples/ai-core/src/generate-object/google-pdf-url.ts @@ -24,7 +24,7 @@ async function main() { data: 'https://user.phil.hhu.de/~cwurm/wp-content/uploads/' + '2020/01/7181-attention-is-all-you-need.pdf', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/amazon-bedrock-tool-call-image-result.ts b/examples/ai-core/src/generate-text/amazon-bedrock-tool-call-image-result.ts index 0b1184025191..c5daed8d4709 100644 --- a/examples/ai-core/src/generate-text/amazon-bedrock-tool-call-image-result.ts +++ b/examples/ai-core/src/generate-text/amazon-bedrock-tool-call-image-result.ts @@ -34,7 +34,7 @@ async function main() { { type: 'image', data: Buffer.from(result.bytes).toString('base64'), - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, ]; }, diff --git a/examples/ai-core/src/generate-text/anthropic-computer-use-computer.ts b/examples/ai-core/src/generate-text/anthropic-computer-use-computer.ts index b5145f6f8a75..c26869c04942 100644 --- a/examples/ai-core/src/generate-text/anthropic-computer-use-computer.ts +++ b/examples/ai-core/src/generate-text/anthropic-computer-use-computer.ts @@ -36,7 +36,7 @@ async function main() { experimental_toToolResultContent(result) { return typeof result === 'string' ? [{ type: 'text', text: result }] - : [{ type: 'image', data: result.data, mimeType: 'image/png' }]; + : [{ type: 'image', data: result.data, mediaType: 'image/png' }]; }, }), }, diff --git a/examples/ai-core/src/generate-text/anthropic-pdf.ts b/examples/ai-core/src/generate-text/anthropic-pdf.ts index 1c7e94a2dc44..9b1128f1484f 100644 --- a/examples/ai-core/src/generate-text/anthropic-pdf.ts +++ b/examples/ai-core/src/generate-text/anthropic-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/google-audio.ts b/examples/ai-core/src/generate-text/google-audio.ts index 491f06537477..6cb9d82d2ec9 100644 --- a/examples/ai-core/src/generate-text/google-audio.ts +++ b/examples/ai-core/src/generate-text/google-audio.ts @@ -13,7 +13,7 @@ async function main() { { type: 'text', text: 'What is the audio saying?' }, { type: 'file', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', data: fs.readFileSync('./data/galileo.mp3'), }, ], diff --git a/examples/ai-core/src/generate-text/google-chatbot-image-output.ts b/examples/ai-core/src/generate-text/google-chatbot-image-output.ts index d18bd9fbd935..1a4211672a64 100644 --- a/examples/ai-core/src/generate-text/google-chatbot-image-output.ts +++ b/examples/ai-core/src/generate-text/google-chatbot-image-output.ts @@ -28,7 +28,7 @@ async function main() { } for (const file of result.files) { - if (file.mimeType.startsWith('image/')) { + if (file.mediaType.startsWith('image/')) { await presentImages([file]); } } diff --git a/examples/ai-core/src/generate-text/google-image-output.ts b/examples/ai-core/src/generate-text/google-image-output.ts index 408cdc0a594c..d2d52e19da62 100644 --- a/examples/ai-core/src/generate-text/google-image-output.ts +++ b/examples/ai-core/src/generate-text/google-image-output.ts @@ -15,7 +15,7 @@ async function main() { console.log(result.text); for (const file of result.files) { - if (file.mimeType.startsWith('image/')) { + if (file.mediaType.startsWith('image/')) { await presentImages([file]); } } diff --git a/examples/ai-core/src/generate-text/google-pdf.ts b/examples/ai-core/src/generate-text/google-pdf.ts index 56e7a40e2572..6a15c19e12b2 100644 --- a/examples/ai-core/src/generate-text/google-pdf.ts +++ b/examples/ai-core/src/generate-text/google-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/google-vertex-anthropic-computer-use-computer.ts b/examples/ai-core/src/generate-text/google-vertex-anthropic-computer-use-computer.ts index 84c305b97fef..ce0963f7460b 100644 --- a/examples/ai-core/src/generate-text/google-vertex-anthropic-computer-use-computer.ts +++ b/examples/ai-core/src/generate-text/google-vertex-anthropic-computer-use-computer.ts @@ -36,7 +36,7 @@ async function main() { experimental_toToolResultContent(result) { return typeof result === 'string' ? [{ type: 'text', text: result }] - : [{ type: 'image', data: result.data, mimeType: 'image/png' }]; + : [{ type: 'image', data: result.data, mediaType: 'image/png' }]; }, }), }, diff --git a/examples/ai-core/src/generate-text/google-vertex-anthropic-pdf.ts b/examples/ai-core/src/generate-text/google-vertex-anthropic-pdf.ts index 99a536aa7f1f..644a4df9e2d5 100644 --- a/examples/ai-core/src/generate-text/google-vertex-anthropic-pdf.ts +++ b/examples/ai-core/src/generate-text/google-vertex-anthropic-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/google-vertex-audio.ts b/examples/ai-core/src/generate-text/google-vertex-audio.ts index 174c13c3f098..910668dcc4f7 100644 --- a/examples/ai-core/src/generate-text/google-vertex-audio.ts +++ b/examples/ai-core/src/generate-text/google-vertex-audio.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: Buffer.from(fs.readFileSync('./data/galileo.mp3')), - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', }, ], }, diff --git a/examples/ai-core/src/generate-text/google-vertex-pdf-url.ts b/examples/ai-core/src/generate-text/google-vertex-pdf-url.ts index e14116c91861..c020a199d928 100644 --- a/examples/ai-core/src/generate-text/google-vertex-pdf-url.ts +++ b/examples/ai-core/src/generate-text/google-vertex-pdf-url.ts @@ -16,7 +16,7 @@ async function main() { { type: 'file', data: 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/google-vertex-pdf.ts b/examples/ai-core/src/generate-text/google-vertex-pdf.ts index 0b82cf84dfcb..c6420705d90f 100644 --- a/examples/ai-core/src/generate-text/google-vertex-pdf.ts +++ b/examples/ai-core/src/generate-text/google-vertex-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/mistral-pdf-url.ts b/examples/ai-core/src/generate-text/mistral-pdf-url.ts index 25c82924dcf1..85fda1b29659 100644 --- a/examples/ai-core/src/generate-text/mistral-pdf-url.ts +++ b/examples/ai-core/src/generate-text/mistral-pdf-url.ts @@ -18,7 +18,7 @@ async function main() { data: new URL( 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', ), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/generate-text/openai-audio.ts b/examples/ai-core/src/generate-text/openai-audio.ts index 3cb063906bbd..0535787d06e1 100644 --- a/examples/ai-core/src/generate-text/openai-audio.ts +++ b/examples/ai-core/src/generate-text/openai-audio.ts @@ -13,7 +13,7 @@ async function main() { { type: 'text', text: 'What is the audio saying?' }, { type: 'file', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', data: fs.readFileSync('./data/galileo.mp3'), }, ], diff --git a/examples/ai-core/src/generate-text/openai-pdf-url.ts b/examples/ai-core/src/generate-text/openai-pdf-url.ts index 0285489f515c..2e93127b09f2 100644 --- a/examples/ai-core/src/generate-text/openai-pdf-url.ts +++ b/examples/ai-core/src/generate-text/openai-pdf-url.ts @@ -18,7 +18,7 @@ async function main() { data: new URL( 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', ), - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'ai.pdf', }, ], diff --git a/examples/ai-core/src/generate-text/openai-pdf.ts b/examples/ai-core/src/generate-text/openai-pdf.ts index 348738ce18ce..08871bc6a2f5 100644 --- a/examples/ai-core/src/generate-text/openai-pdf.ts +++ b/examples/ai-core/src/generate-text/openai-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', // filename: 'ai.pdf', }, ], diff --git a/examples/ai-core/src/generate-text/openai-responses-pdf-url.ts b/examples/ai-core/src/generate-text/openai-responses-pdf-url.ts index fb2db083b9ec..6e5168fb1d1e 100644 --- a/examples/ai-core/src/generate-text/openai-responses-pdf-url.ts +++ b/examples/ai-core/src/generate-text/openai-responses-pdf-url.ts @@ -18,7 +18,7 @@ async function main() { data: new URL( 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', ), - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'ai.pdf', }, ], diff --git a/examples/ai-core/src/generate-text/openai-responses-pdf.ts b/examples/ai-core/src/generate-text/openai-responses-pdf.ts index 75943c666509..592f39dd1362 100644 --- a/examples/ai-core/src/generate-text/openai-responses-pdf.ts +++ b/examples/ai-core/src/generate-text/openai-responses-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', // filename: 'ai.pdf', }, ], diff --git a/examples/ai-core/src/stream-text/amazon-bedrock-pdf.ts b/examples/ai-core/src/stream-text/amazon-bedrock-pdf.ts index e1409b62574e..a9e109bef38b 100644 --- a/examples/ai-core/src/stream-text/amazon-bedrock-pdf.ts +++ b/examples/ai-core/src/stream-text/amazon-bedrock-pdf.ts @@ -14,7 +14,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/stream-text/anthropic-pdf.ts b/examples/ai-core/src/stream-text/anthropic-pdf.ts index b300f1bbd975..8b59bdb79deb 100644 --- a/examples/ai-core/src/stream-text/anthropic-pdf.ts +++ b/examples/ai-core/src/stream-text/anthropic-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/stream-text/google-chatbot-image-output.ts b/examples/ai-core/src/stream-text/google-chatbot-image-output.ts index 8f917e525096..1370e134be97 100644 --- a/examples/ai-core/src/stream-text/google-chatbot-image-output.ts +++ b/examples/ai-core/src/stream-text/google-chatbot-image-output.ts @@ -32,7 +32,7 @@ async function main() { } case 'file': { - if (delta.mimeType.startsWith('image/')) { + if (delta.mediaType.startsWith('image/')) { console.log(); await presentImages([delta]); } diff --git a/examples/ai-core/src/stream-text/google-image-output.ts b/examples/ai-core/src/stream-text/google-image-output.ts index 5a9991bafb2c..98f9852acfd7 100644 --- a/examples/ai-core/src/stream-text/google-image-output.ts +++ b/examples/ai-core/src/stream-text/google-image-output.ts @@ -20,7 +20,7 @@ async function main() { } case 'file': { - if (part.mimeType.startsWith('image/')) { + if (part.mediaType.startsWith('image/')) { await presentImages([part]); } diff --git a/examples/ai-core/src/stream-text/google-vertex-anthropic-pdf.ts b/examples/ai-core/src/stream-text/google-vertex-anthropic-pdf.ts index 0441b560f09c..fe32ce1cbd10 100644 --- a/examples/ai-core/src/stream-text/google-vertex-anthropic-pdf.ts +++ b/examples/ai-core/src/stream-text/google-vertex-anthropic-pdf.ts @@ -17,7 +17,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/stream-text/google-vertex-pdf-url.ts b/examples/ai-core/src/stream-text/google-vertex-pdf-url.ts index a9ec56292dc4..7afafde9a6c7 100644 --- a/examples/ai-core/src/stream-text/google-vertex-pdf-url.ts +++ b/examples/ai-core/src/stream-text/google-vertex-pdf-url.ts @@ -16,7 +16,7 @@ async function main() { { type: 'file', data: 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/examples/ai-core/src/stream-text/openai-audio.ts b/examples/ai-core/src/stream-text/openai-audio.ts index 3cc97dd3febd..6d2cec39ffde 100644 --- a/examples/ai-core/src/stream-text/openai-audio.ts +++ b/examples/ai-core/src/stream-text/openai-audio.ts @@ -13,7 +13,7 @@ async function main() { { type: 'text', text: 'What is the audio saying?' }, { type: 'file', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', data: fs.readFileSync('./data/galileo.mp3'), }, ], diff --git a/examples/next-openai/app/use-chat-image-output/page.tsx b/examples/next-openai/app/use-chat-image-output/page.tsx index 8e33d4dc3079..33f1583fcc41 100644 --- a/examples/next-openai/app/use-chat-image-output/page.tsx +++ b/examples/next-openai/app/use-chat-image-output/page.tsx @@ -17,13 +17,13 @@ export default function Chat() { return
{part.text}
; } else if ( part.type === 'file' && - part.mimeType.startsWith('image/') + part.mediaType.startsWith('image/') ) { return ( // eslint-disable-next-line @next/next/no-img-element, jsx-a11y/alt-text ); } diff --git a/examples/next-openai/app/use-chat-persistence-single-message-image-output/[id]/chat.tsx b/examples/next-openai/app/use-chat-persistence-single-message-image-output/[id]/chat.tsx index acfce0ae23e8..3cebb5cce1c8 100644 --- a/examples/next-openai/app/use-chat-persistence-single-message-image-output/[id]/chat.tsx +++ b/examples/next-openai/app/use-chat-persistence-single-message-image-output/[id]/chat.tsx @@ -30,13 +30,13 @@ export default function Chat({ return
{part.text}
; } else if ( part.type === 'file' && - part.mimeType.startsWith('image/') + part.mediaType.startsWith('image/') ) { return ( // eslint-disable-next-line @next/next/no-img-element, jsx-a11y/alt-text ); } diff --git a/packages/ai/core/generate-image/generate-image.test.ts b/packages/ai/core/generate-image/generate-image.test.ts index ded347131aa8..25437df7dc40 100644 --- a/packages/ai/core/generate-image/generate-image.test.ts +++ b/packages/ai/core/generate-image/generate-image.test.ts @@ -109,18 +109,18 @@ describe('generateImage', () => { result.images.map(image => ({ base64: image.base64, uint8Array: image.uint8Array, - mimeType: image.mimeType, + mediaType: image.mediaType, })), ).toStrictEqual([ { base64: pngBase64, uint8Array: convertBase64ToUint8Array(pngBase64), - mimeType: 'image/png', + mediaType: 'image/png', }, { base64: jpegBase64, uint8Array: convertBase64ToUint8Array(jpegBase64), - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, ]); }); @@ -139,11 +139,11 @@ describe('generateImage', () => { expect({ base64: result.image.base64, uint8Array: result.image.uint8Array, - mimeType: result.image.mimeType, + mediaType: result.image.mediaType, }).toStrictEqual({ base64: pngBase64, uint8Array: convertBase64ToUint8Array(pngBase64), - mimeType: 'image/png', + mediaType: 'image/png', }); }); }); diff --git a/packages/ai/core/generate-image/generate-image.ts b/packages/ai/core/generate-image/generate-image.ts index ae0a79a550ec..f5847d586a1b 100644 --- a/packages/ai/core/generate-image/generate-image.ts +++ b/packages/ai/core/generate-image/generate-image.ts @@ -9,9 +9,9 @@ import { ImageGenerationWarning } from '../types/image-model'; import { ImageModelResponseMetadata } from '../types/image-model-response-metadata'; import { GenerateImageResult } from './generate-image-result'; import { - detectMimeType, - imageMimeTypeSignatures, -} from '../util/detect-mimetype'; + detectMediaType, + imageMediaTypeSignatures, +} from '../util/detect-media-type'; /** Generates images using an image model. @@ -149,10 +149,10 @@ Only applicable for HTTP-based providers. image => new DefaultGeneratedFile({ data: image, - mimeType: - detectMimeType({ + mediaType: + detectMediaType({ data: image, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }) ?? 'image/png', }), ), diff --git a/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap b/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap index 7a5e1fe83f5c..91610e90df58 100644 --- a/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap +++ b/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap @@ -411,7 +411,7 @@ exports[`options.maxSteps > 4 steps: initial, continue, continue, continue > onS "files": [ DefaultGeneratedFile { "base64Data": undefined, - "mimeType": "image/png", + "mediaType": "image/png", "uint8ArrayData": Uint8Array [ 1, 2, @@ -554,7 +554,7 @@ exports[`options.maxSteps > 4 steps: initial, continue, continue, continue > onS "files": [ DefaultGeneratedFile { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "uint8ArrayData": undefined, }, ], @@ -618,7 +618,7 @@ exports[`options.maxSteps > 4 steps: initial, continue, continue, continue > res [ DefaultGeneratedFile { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "uint8ArrayData": undefined, }, ] @@ -712,7 +712,7 @@ exports[`options.maxSteps > 4 steps: initial, continue, continue, continue > res "files": [ DefaultGeneratedFile { "base64Data": undefined, - "mimeType": "image/png", + "mediaType": "image/png", "uint8ArrayData": Uint8Array [ 1, 2, @@ -855,7 +855,7 @@ exports[`options.maxSteps > 4 steps: initial, continue, continue, continue > res "files": [ DefaultGeneratedFile { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "uint8ArrayData": undefined, }, ], @@ -919,7 +919,7 @@ exports[`result.files > should contain files 1`] = ` [ DefaultGeneratedFile { "base64Data": undefined, - "mimeType": "image/png", + "mediaType": "image/png", "uint8ArrayData": Uint8Array [ 1, 2, @@ -928,7 +928,7 @@ exports[`result.files > should contain files 1`] = ` }, DefaultGeneratedFile { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "uint8ArrayData": undefined, }, ] @@ -1155,7 +1155,7 @@ exports[`result.steps > should contain files 1`] = ` "files": [ DefaultGeneratedFile { "base64Data": undefined, - "mimeType": "image/png", + "mediaType": "image/png", "uint8ArrayData": Uint8Array [ 1, 2, @@ -1164,7 +1164,7 @@ exports[`result.steps > should contain files 1`] = ` }, DefaultGeneratedFile { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "uint8ArrayData": undefined, }, ], @@ -1184,12 +1184,12 @@ exports[`result.steps > should contain files 1`] = ` "content": [ { "data": "AQID", - "mimeType": "image/png", + "mediaType": "image/png", "type": "file", }, { "data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", }, { diff --git a/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap b/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap index 03ced770601a..72031e70239b 100644 --- a/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap +++ b/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap @@ -2396,13 +2396,13 @@ exports[`streamText > options.onFinish > should send files 1`] = ` "files": [ DefaultGeneratedFileWithType { "base64Data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", "uint8ArrayData": undefined, }, DefaultGeneratedFileWithType { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", "uint8ArrayData": undefined, }, @@ -2421,12 +2421,12 @@ exports[`streamText > options.onFinish > should send files 1`] = ` "content": [ { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, { "data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", }, { @@ -2448,13 +2448,13 @@ exports[`streamText > options.onFinish > should send files 1`] = ` "files": [ DefaultGeneratedFileWithType { "base64Data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", "uint8ArrayData": undefined, }, DefaultGeneratedFileWithType { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", "uint8ArrayData": undefined, }, @@ -2474,12 +2474,12 @@ exports[`streamText > options.onFinish > should send files 1`] = ` "content": [ { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, { "data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", }, { @@ -3217,13 +3217,13 @@ exports[`streamText > result.files > should contain files 1`] = ` [ DefaultGeneratedFileWithType { "base64Data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", "uint8ArrayData": undefined, }, DefaultGeneratedFileWithType { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", "uint8ArrayData": undefined, }, @@ -3430,7 +3430,7 @@ exports[`streamText > result.fullStream > should send files 1`] = ` }, DefaultGeneratedFileWithType { "base64Data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", "uint8ArrayData": undefined, }, @@ -3440,7 +3440,7 @@ exports[`streamText > result.fullStream > should send files 1`] = ` }, DefaultGeneratedFileWithType { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", "uint8ArrayData": undefined, }, @@ -4369,13 +4369,13 @@ exports[`streamText > result.steps > should add the files from the model respons "files": [ DefaultGeneratedFileWithType { "base64Data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", "uint8ArrayData": undefined, }, DefaultGeneratedFileWithType { "base64Data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", "uint8ArrayData": undefined, }, @@ -4395,12 +4395,12 @@ exports[`streamText > result.steps > should add the files from the model respons "content": [ { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, { "data": "QkFVRw==", - "mimeType": "image/jpeg", + "mediaType": "image/jpeg", "type": "file", }, { diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index e7df0eb97238..7789da54832b 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -46,11 +46,11 @@ const modelWithFiles = new MockLanguageModelV2({ files: [ { data: new Uint8Array([1, 2, 3]), - mimeType: 'image/png', + mediaType: 'image/png', }, { data: 'QkFVRw==', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, ], }), @@ -750,7 +750,7 @@ describe('options.maxSteps', () => { files: [ { data: new Uint8Array([1, 2, 3]), - mimeType: 'image/png', + mediaType: 'image/png', filename: 'test.png', }, ], @@ -856,7 +856,7 @@ describe('options.maxSteps', () => { files: [ { data: 'QkFVRw==', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', filename: 'test.jpeg', }, ], diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index c206a2dcc103..c8cb02e289dd 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -802,7 +802,7 @@ function asFiles( files: | Array<{ data: string | Uint8Array; - mimeType: string; + mediaType: string; }> | undefined, ): Array { diff --git a/packages/ai/core/generate-text/generated-file.ts b/packages/ai/core/generate-text/generated-file.ts index 20b817263a93..6dfbdd9b780b 100644 --- a/packages/ai/core/generate-text/generated-file.ts +++ b/packages/ai/core/generate-text/generated-file.ts @@ -18,28 +18,30 @@ File as a Uint8Array. readonly uint8Array: Uint8Array; /** -MIME type of the file +The IANA media type of the file. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml */ - readonly mimeType: string; + readonly mediaType: string; } export class DefaultGeneratedFile implements GeneratedFile { private base64Data: string | undefined; private uint8ArrayData: Uint8Array | undefined; - readonly mimeType: string; + readonly mediaType: string; constructor({ data, - mimeType, + mediaType, }: { data: string | Uint8Array; - mimeType: string; + mediaType: string; }) { const isUint8Array = data instanceof Uint8Array; this.base64Data = isUint8Array ? undefined : data; this.uint8ArrayData = isUint8Array ? data : undefined; - this.mimeType = mimeType; + this.mediaType = mediaType; } // lazy conversion with caching to avoid unnecessary conversion overhead: @@ -62,7 +64,7 @@ export class DefaultGeneratedFile implements GeneratedFile { export class DefaultGeneratedFileWithType extends DefaultGeneratedFile { readonly type = 'file'; - constructor(options: { data: string | Uint8Array; mimeType: string }) { + constructor(options: { data: string | Uint8Array; mediaType: string }) { super(options); } } diff --git a/packages/ai/core/generate-text/run-tools-transformation.ts b/packages/ai/core/generate-text/run-tools-transformation.ts index 3b0a4291e3cb..3109ac09ff9e 100644 --- a/packages/ai/core/generate-text/run-tools-transformation.ts +++ b/packages/ai/core/generate-text/run-tools-transformation.ts @@ -169,7 +169,7 @@ export function runToolsTransformation({ controller.enqueue( new DefaultGeneratedFileWithType({ data: chunk.data, - mimeType: chunk.mimeType, + mediaType: chunk.mediaType, }), ); break; diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index 7aa9565850b2..4c0beea11cfd 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -114,13 +114,13 @@ const modelWithFiles = new MockLanguageModelV2({ { type: 'file', data: 'Hello World', - mimeType: 'text/plain', + mediaType: 'text/plain', }, { type: 'text-delta', textDelta: 'Hello!' }, { type: 'file', data: 'QkFVRw==', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, { type: 'finish', diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index 2ab9cf41a06c..51308ce08605 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -1684,7 +1684,7 @@ However, the LLM results are expected to be small enough to not cause issues. case 'file': { controller.enqueue( formatDataStreamPart('file', { - mimeType: chunk.mimeType, + mimeType: chunk.mediaType, data: chunk.base64, }), ); diff --git a/packages/ai/core/generate-text/to-response-messages.test.ts b/packages/ai/core/generate-text/to-response-messages.test.ts index 0d74122b21c5..b66168d7783d 100644 --- a/packages/ai/core/generate-text/to-response-messages.test.ts +++ b/packages/ai/core/generate-text/to-response-messages.test.ts @@ -194,7 +194,7 @@ describe('toResponseMessages', () => { parameters: z.object({}), execute: async () => 'image-base64', experimental_toToolResultContent(result) { - return [{ type: 'image', data: result, mimeType: 'image/png' }]; + return [{ type: 'image', data: result, mediaType: 'image/png' }]; }, }), }, @@ -241,10 +241,10 @@ describe('toResponseMessages', () => { toolCallId: '123', toolName: 'testTool', result: [ - { type: 'image', data: 'image-base64', mimeType: 'image/png' }, + { type: 'image', data: 'image-base64', mediaType: 'image/png' }, ], experimental_content: [ - { type: 'image', data: 'image-base64', mimeType: 'image/png' }, + { type: 'image', data: 'image-base64', mediaType: 'image/png' }, ], }, ], @@ -256,7 +256,7 @@ describe('toResponseMessages', () => { it('should include images in the assistant message', () => { const pngFile = new DefaultGeneratedFile({ data: new Uint8Array([137, 80, 78, 71, 13, 10, 26, 10]), - mimeType: 'image/png', + mediaType: 'image/png', }); const result = toResponseMessages({ @@ -275,7 +275,7 @@ describe('toResponseMessages', () => { role: 'assistant', id: 'msg-123', content: [ - { type: 'file', data: pngFile.base64, mimeType: pngFile.mimeType }, + { type: 'file', data: pngFile.base64, mediaType: pngFile.mediaType }, { type: 'text', text: 'Here is an image' }, ], }, @@ -285,11 +285,11 @@ describe('toResponseMessages', () => { it('should handle multiple images in the assistant message', () => { const pngFile = new DefaultGeneratedFile({ data: new Uint8Array([137, 80, 78, 71, 13, 10, 26, 10]), - mimeType: 'image/png', + mediaType: 'image/png', }); const jpegFile = new DefaultGeneratedFile({ data: new Uint8Array([255, 216, 255]), - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }); const result = toResponseMessages({ @@ -308,8 +308,12 @@ describe('toResponseMessages', () => { role: 'assistant', id: 'msg-123', content: [ - { type: 'file', data: pngFile.base64, mimeType: pngFile.mimeType }, - { type: 'file', data: jpegFile.base64, mimeType: jpegFile.mimeType }, + { type: 'file', data: pngFile.base64, mediaType: pngFile.mediaType }, + { + type: 'file', + data: jpegFile.base64, + mediaType: jpegFile.mediaType, + }, { type: 'text', text: 'Here are multiple images' }, ], }, @@ -319,7 +323,7 @@ describe('toResponseMessages', () => { it('should handle Uint8Array images', () => { const pngFile = new DefaultGeneratedFile({ data: new Uint8Array([137, 80, 78, 71, 13, 10, 26, 10]), - mimeType: 'image/png', + mediaType: 'image/png', }); const result = toResponseMessages({ @@ -338,7 +342,7 @@ describe('toResponseMessages', () => { role: 'assistant', id: 'msg-123', content: [ - { type: 'file', data: pngFile.base64, mimeType: pngFile.mimeType }, + { type: 'file', data: pngFile.base64, mediaType: pngFile.mediaType }, { type: 'text', text: 'Here is a binary image' }, ], }, @@ -348,7 +352,7 @@ describe('toResponseMessages', () => { it('should include images, reasoning, and tool calls in the correct order', () => { const pngFile = new DefaultGeneratedFile({ data: new Uint8Array([137, 80, 78, 71, 13, 10, 26, 10]), - mimeType: 'image/png', + mediaType: 'image/png', }); const result = toResponseMessages({ @@ -380,7 +384,7 @@ describe('toResponseMessages', () => { id: 'msg-123', content: [ { type: 'reasoning', text: 'Thinking text', signature: 'sig' }, - { type: 'file', data: pngFile.base64, mimeType: pngFile.mimeType }, + { type: 'file', data: pngFile.base64, mediaType: pngFile.mediaType }, { type: 'text', text: 'Combined response' }, { type: 'tool-call', diff --git a/packages/ai/core/generate-text/to-response-messages.ts b/packages/ai/core/generate-text/to-response-messages.ts index 1cb3fe8c834d..db762fbf74d6 100644 --- a/packages/ai/core/generate-text/to-response-messages.ts +++ b/packages/ai/core/generate-text/to-response-messages.ts @@ -42,7 +42,7 @@ export function toResponseMessages({ ...files.map(file => ({ type: 'file' as const, data: file.base64, - mimeType: file.mimeType, + mediaType: file.mediaType, })), { type: 'text' as const, text }, ...toolCalls, diff --git a/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap b/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap index 3e1e35b082e2..52665e14e8b9 100644 --- a/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap +++ b/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap @@ -406,7 +406,7 @@ exports[`appendResponseMessages > after user message > appends assistant message }, { "data": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+ip1sAAAAASUVORK5CYII=", - "mimeType": "image/png", + "mediaType": "image/png", "type": "file", }, ], diff --git a/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap b/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap index b0b972070651..5271fc1f1817 100644 --- a/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap +++ b/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap @@ -755,7 +755,7 @@ exports[`convertToCoreMessages > user message > should handle user message with }, { "data": "dGVzdA==", - "mimeType": "application/pdf", + "mediaType": "application/pdf", "type": "file", }, ], diff --git a/packages/ai/core/prompt/append-response-messages.test.ts b/packages/ai/core/prompt/append-response-messages.test.ts index 75b4d6cea826..6f131e4fbafc 100644 --- a/packages/ai/core/prompt/append-response-messages.test.ts +++ b/packages/ai/core/prompt/append-response-messages.test.ts @@ -78,7 +78,7 @@ describe('appendResponseMessages', () => { content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+ip1sAAAAASUVORK5CYII=', }, ], diff --git a/packages/ai/core/prompt/append-response-messages.ts b/packages/ai/core/prompt/append-response-messages.ts index 0810307284a6..ca263687e44d 100644 --- a/packages/ai/core/prompt/append-response-messages.ts +++ b/packages/ai/core/prompt/append-response-messages.ts @@ -135,7 +135,7 @@ Internal. For test use only. May change without notice. } parts.push({ type: 'file' as const, - mimeType: part.mimeType, + mediaType: part.mediaType ?? part.mimeType, data: convertDataContentToBase64String(part.data), }); break; diff --git a/packages/ai/core/prompt/attachments-to-parts.ts b/packages/ai/core/prompt/attachments-to-parts.ts index 91adca52f2e2..fa825fd39a7a 100644 --- a/packages/ai/core/prompt/attachments-to-parts.ts +++ b/packages/ai/core/prompt/attachments-to-parts.ts @@ -39,7 +39,7 @@ export function attachmentsToParts(attachments: Attachment[]): ContentPart[] { parts.push({ type: 'file', data: url, - mimeType: attachment.contentType, + mediaType: attachment.contentType, }); } break; @@ -48,16 +48,16 @@ export function attachmentsToParts(attachments: Attachment[]): ContentPart[] { case 'data:': { let header; let base64Content; - let mimeType; + let mediaType; try { [header, base64Content] = attachment.url.split(','); - mimeType = header.split(';')[0].split(':')[1]; + mediaType = header.split(';')[0].split(':')[1]; } catch (error) { throw new Error(`Error processing data URL: ${attachment.url}`); } - if (mimeType == null || base64Content == null) { + if (mediaType == null || base64Content == null) { throw new Error(`Invalid data URL format: ${attachment.url}`); } @@ -83,7 +83,7 @@ export function attachmentsToParts(attachments: Attachment[]): ContentPart[] { parts.push({ type: 'file', data: base64Content, - mimeType: attachment.contentType, + mediaType: attachment.contentType, }); } diff --git a/packages/ai/core/prompt/content-part.ts b/packages/ai/core/prompt/content-part.ts index 9bcb93309755..c435f947ca14 100644 --- a/packages/ai/core/prompt/content-part.ts +++ b/packages/ai/core/prompt/content-part.ts @@ -59,7 +59,14 @@ Image data. Can either be: image: DataContent | URL; /** -Optional mime type of the image. +Optional IANA media type of the image. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml + */ + mediaType?: string; + + /** +@deprecated Use `mediaType` instead. */ mimeType?: string; @@ -82,6 +89,7 @@ functionality that can be fully encapsulated in the provider. export const imagePartSchema: z.ZodType = z.object({ type: z.literal('image'), image: z.union([dataContentSchema, z.instanceof(URL)]), + mediaType: z.string().optional(), mimeType: z.string().optional(), providerOptions: providerMetadataSchema.optional(), experimental_providerMetadata: providerMetadataSchema.optional(), @@ -107,9 +115,16 @@ Optional filename of the file. filename?: string; /** -Mime type of the file. +IANA media type of the file. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml + */ + mediaType: string; + + /** +@deprecated Use `mediaType` instead. */ - mimeType: string; + mimeType?: string; /** Additional provider-specific metadata. They are passed through @@ -131,7 +146,8 @@ export const filePartSchema: z.ZodType = z.object({ type: z.literal('file'), data: z.union([dataContentSchema, z.instanceof(URL)]), filename: z.string().optional(), - mimeType: z.string(), + mediaType: z.string(), + mimeType: z.string().optional(), providerOptions: providerMetadataSchema.optional(), experimental_providerMetadata: providerMetadataSchema.optional(), }); diff --git a/packages/ai/core/prompt/convert-to-core-messages.test.ts b/packages/ai/core/prompt/convert-to-core-messages.test.ts index f8b6eec5d632..a37b9c3b9613 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.test.ts +++ b/packages/ai/core/prompt/convert-to-core-messages.test.ts @@ -93,7 +93,7 @@ describe('convertToCoreMessages', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -282,7 +282,7 @@ describe('convertToCoreMessages', () => { parts: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: 'dGVzdA==', }, ], @@ -292,7 +292,7 @@ describe('convertToCoreMessages', () => { expect(result).toEqual([ { role: 'assistant', - content: [{ type: 'file', mimeType: 'image/png', data: 'dGVzdA==' }], + content: [{ type: 'file', mediaType: 'image/png', data: 'dGVzdA==' }], }, ] satisfies CoreMessage[]); }); diff --git a/packages/ai/core/prompt/convert-to-core-messages.ts b/packages/ai/core/prompt/convert-to-core-messages.ts index 54a79eb4aea9..acec7aacfc03 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.ts +++ b/packages/ai/core/prompt/convert-to-core-messages.ts @@ -82,11 +82,18 @@ export function convertToCoreMessages( for (const part of block) { switch (part.type) { - case 'file': case 'text': { content.push(part); break; } + case 'file': { + content.push({ + type: 'file' as const, + data: part.data, + mediaType: part.mediaType ?? (part as any).mimeType, // TODO migration, remove + }); + break; + } case 'reasoning': { for (const detail of part.details) { switch (detail.type) { diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts index 85bc5e35d05a..df9e8dfcfbbb 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts @@ -29,7 +29,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/image.png')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'image/png', + mediaType: 'image/png', }; }, }); @@ -40,7 +40,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: 'AAECAw==', }, ], @@ -70,7 +70,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/image.png')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'image/png', + mediaType: 'image/png', }; }, }); @@ -81,7 +81,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: 'AAECAw==', }, ], @@ -102,7 +102,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -119,7 +119,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -137,7 +137,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -149,7 +149,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/document.pdf')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }; }, }); @@ -160,7 +160,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: convertUint8ArrayToBase64(new Uint8Array([0, 1, 2, 3])), }, ], @@ -180,7 +180,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: base64Data, - mimeType: 'text/plain', + mediaType: 'text/plain', }, ], }, @@ -197,7 +197,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: base64Data, - mimeType: 'text/plain', + mediaType: 'text/plain', }, ], }, @@ -216,7 +216,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: uint8Data, - mimeType: 'text/plain', + mediaType: 'text/plain', }, ], }, @@ -233,7 +233,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: 'SGVsbG8=', // base64 encoded "Hello" - mimeType: 'text/plain', + mediaType: 'text/plain', }, ], }, @@ -251,7 +251,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -263,7 +263,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/document.pdf')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }; }, }); @@ -274,7 +274,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: convertUint8ArrayToBase64(new Uint8Array([0, 1, 2, 3])), }, ], @@ -293,7 +293,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: 'https://example.com/document.pdf', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -305,7 +305,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/document.pdf')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }; }, }); @@ -316,7 +316,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: convertUint8ArrayToBase64(new Uint8Array([0, 1, 2, 3])), }, ], @@ -335,7 +335,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: 'https://example.com/document.pdf', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -348,7 +348,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/document.pdf')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }; }, }); @@ -359,7 +359,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: convertUint8ArrayToBase64(new Uint8Array([0, 1, 2, 3])), }, ], @@ -378,7 +378,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -395,7 +395,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: new URL('https://example.com/document.pdf'), }, ], @@ -414,7 +414,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: 'https://example.com/document.pdf', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -426,7 +426,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/document.pdf')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }; }, }); @@ -437,7 +437,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: convertUint8ArrayToBase64(new Uint8Array([0, 1, 2, 3])), }, ], @@ -456,7 +456,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: 'SGVsbG8sIFdvcmxkIQ==', // "Hello, World!" in base64 - mimeType: 'text/plain', + mediaType: 'text/plain', filename: 'hello.txt', }, ], @@ -474,7 +474,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: 'SGVsbG8sIFdvcmxkIQ==', - mimeType: 'text/plain', + mediaType: 'text/plain', filename: 'hello.txt', }, ], @@ -493,7 +493,7 @@ describe('convertToLanguageModelPrompt', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'important-document.pdf', }, ], @@ -506,7 +506,7 @@ describe('convertToLanguageModelPrompt', () => { expect(url).toEqual(new URL('https://example.com/document.pdf')); return { data: new Uint8Array([0, 1, 2, 3]), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }; }, }); @@ -517,7 +517,7 @@ describe('convertToLanguageModelPrompt', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: convertUint8ArrayToBase64(new Uint8Array([0, 1, 2, 3])), filename: 'important-document.pdf', }, @@ -629,7 +629,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: new URL('https://example.com/image.jpg'), - mimeType: 'image/*', // wildcard since we don't know the exact type + mediaType: 'image/*', // wildcard since we don't know the exact type }, ], }); @@ -655,20 +655,20 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: '/9j/3Q==', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, ], }); }); - it('should prefer detected mimetype', async () => { + it('should prefer detected mediaType', async () => { const result = convertToLanguageModelMessage( { role: 'user', content: [ { type: 'image', - // incorrect mimetype: + // incorrect mediaType: image: 'data:image/png;base64,/9j/3Q==', }, ], @@ -682,7 +682,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: '/9j/3Q==', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, ], }); @@ -698,7 +698,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'https://example.com/image.jpg', - mimeType: 'image/jpg', + mediaType: 'image/jpg', }, ], }, @@ -711,7 +711,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: new URL('https://example.com/image.jpg'), - mimeType: 'image/jpg', + mediaType: 'image/jpg', }, ], }); @@ -725,7 +725,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'data:image/jpg;base64,dGVzdA==', - mimeType: 'image/jpg', + mediaType: 'image/jpg', }, ], }, @@ -738,7 +738,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', - mimeType: 'image/jpg', + mediaType: 'image/jpg', }, ], }); @@ -922,7 +922,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', // "test" in base64 - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -935,7 +935,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }); @@ -949,7 +949,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'test-document.pdf', }, ], @@ -963,7 +963,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', - mimeType: 'application/pdf', + mediaType: 'application/pdf', filename: 'test-document.pdf', }, ], @@ -978,7 +978,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', - mimeType: 'application/pdf', + mediaType: 'application/pdf', providerOptions: { 'test-provider': { 'key-a': 'test-value-1', @@ -997,7 +997,7 @@ describe('convertToLanguageModelMessage', () => { { type: 'file', data: 'dGVzdA==', - mimeType: 'application/pdf', + mediaType: 'application/pdf', providerOptions: { 'test-provider': { 'key-a': 'test-value-1', @@ -1124,7 +1124,7 @@ describe('convertToLanguageModelMessage', () => { toolCallId: 'toolCallId', result: { some: 'result' }, experimental_content: [ - { type: 'image', data: 'dGVzdA==', mimeType: 'image/png' }, + { type: 'image', data: 'dGVzdA==', mediaType: 'image/png' }, ], }, ], @@ -1141,7 +1141,7 @@ describe('convertToLanguageModelMessage', () => { toolCallId: 'toolCallId', toolName: 'toolName', content: [ - { type: 'image', data: 'dGVzdA==', mimeType: 'image/png' }, + { type: 'image', data: 'dGVzdA==', mediaType: 'image/png' }, ], }, ], diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index c4f806ac9308..77ba95fc6594 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -8,9 +8,9 @@ import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; import { download } from '../../util/download'; import { CoreMessage } from '../prompt/message'; import { - detectMimeType, - imageMimeTypeSignatures, -} from '../util/detect-mimetype'; + detectMediaType, + imageMediaTypeSignatures, +} from '../util/detect-media-type'; import { FilePart, ImagePart, TextPart } from './content-part'; import { convertDataContentToBase64String, @@ -60,7 +60,7 @@ export function convertToLanguageModelMessage( message: CoreMessage, downloadedAssets: Record< string, - { mimeType: string | undefined; data: Uint8Array } + { mediaType: string | undefined; data: Uint8Array } >, ): LanguageModelV2Message { const role = message.role; @@ -125,7 +125,7 @@ export function convertToLanguageModelMessage( ? part.data : convertDataContentToBase64String(part.data), filename: part.filename, - mimeType: part.mimeType, + mediaType: part.mediaType ?? part.mimeType, providerOptions, }; } @@ -200,7 +200,9 @@ async function downloadAssets( downloadImplementation: typeof download, modelSupportsImageUrls: boolean | undefined, modelSupportsUrl: (url: URL) => boolean, -): Promise> { +): Promise< + Record +> { const urls = messages .filter(message => message.role === 'user') .map(message => message.content) @@ -259,7 +261,7 @@ function convertPartToLanguageModelPart( part: TextPart | ImagePart | FilePart, downloadedAssets: Record< string, - { mimeType: string | undefined; data: Uint8Array } + { mediaType: string | undefined; data: Uint8Array } >, ): LanguageModelV2TextPart | LanguageModelV2FilePart { if (part.type === 'text') { @@ -271,7 +273,7 @@ function convertPartToLanguageModelPart( }; } - let mimeType: string | undefined = part.mimeType; + let mediaType: string | undefined = part.mediaType ?? part.mimeType; let data: DataContent | URL; let content: DataContent | URL | string; let normalizedData: Uint8Array | URL; @@ -302,15 +304,15 @@ function convertPartToLanguageModelPart( if (content instanceof URL) { // If the content is a data URL, we want to convert that to a Uint8Array if (content.protocol === 'data:') { - const { mimeType: dataUrlMimeType, base64Content } = splitDataUrl( + const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl( content.toString(), ); - if (dataUrlMimeType == null || base64Content == null) { + if (dataUrlMediaType == null || base64Content == null) { throw new Error(`Invalid data URL format in part ${type}`); } - mimeType = dataUrlMimeType; + mediaType = dataUrlMediaType; normalizedData = convertDataContentToUint8Array(base64Content); } else { /** @@ -321,7 +323,7 @@ function convertPartToLanguageModelPart( const downloadedFile = downloadedAssets[content.toString()]; if (downloadedFile) { normalizedData = downloadedFile.data; - mimeType ??= downloadedFile.mimeType; + mediaType ??= downloadedFile.mediaType; } else { normalizedData = content; } @@ -336,21 +338,21 @@ function convertPartToLanguageModelPart( // we can create the LanguageModelV2Part. switch (type) { case 'image': { - // When possible, try to detect the mimetype automatically - // to deal with incorrect mimetype inputs. - // When detection fails, use provided mimetype. + // When possible, try to detect the media type automatically + // to deal with incorrect media type inputs. + // When detection fails, use provided media type. if (normalizedData instanceof Uint8Array) { - mimeType = - detectMimeType({ + mediaType = + detectMediaType({ data: normalizedData, - signatures: imageMimeTypeSignatures, - }) ?? mimeType; + signatures: imageMediaTypeSignatures, + }) ?? mediaType; } return { type: 'file', - mimeType: mimeType ?? 'image/*', // any image + mediaType: mediaType ?? 'image/*', // any image filename: undefined, data: normalizedData instanceof Uint8Array @@ -362,14 +364,14 @@ function convertPartToLanguageModelPart( } case 'file': { - // We should have a mimeType at this point, if not, throw an error. - if (mimeType == null) { - throw new Error(`Mime type is missing for file part`); + // We should have a mediaType at this point, if not, throw an error. + if (mediaType == null) { + throw new Error(`Media type is missing for file part`); } return { type: 'file', - mimeType, + mediaType, filename: part.filename, data: normalizedData instanceof Uint8Array diff --git a/packages/ai/core/prompt/split-data-url.ts b/packages/ai/core/prompt/split-data-url.ts index dd9697417377..ced71e78c355 100644 --- a/packages/ai/core/prompt/split-data-url.ts +++ b/packages/ai/core/prompt/split-data-url.ts @@ -1,16 +1,16 @@ export function splitDataUrl(dataUrl: string): { - mimeType: string | undefined; + mediaType: string | undefined; base64Content: string | undefined; } { try { const [header, base64Content] = dataUrl.split(','); return { - mimeType: header.split(';')[0].split(':')[1], + mediaType: header.split(';')[0].split(':')[1], base64Content, }; } catch (error) { return { - mimeType: undefined, + mediaType: undefined, base64Content: undefined, }; } diff --git a/packages/ai/core/prompt/tool-result-content.ts b/packages/ai/core/prompt/tool-result-content.ts index 4f5791403bf2..7d0e983e7988 100644 --- a/packages/ai/core/prompt/tool-result-content.ts +++ b/packages/ai/core/prompt/tool-result-content.ts @@ -8,6 +8,11 @@ export type ToolResultContent = Array< | { type: 'image'; data: string; // base64 encoded png image, e.g. screenshot + mediaType?: string; // e.g. 'image/png'; + + /** + * @deprecated Use `mediaType` instead. + */ mimeType?: string; // e.g. 'image/png'; } >; @@ -18,7 +23,7 @@ export const toolResultContentSchema: z.ZodType = z.array( z.object({ type: z.literal('image'), data: z.string(), - mimeType: z.string().optional(), + mediaType: z.string().optional(), }), ]), ); diff --git a/packages/ai/core/transcribe/transcribe.ts b/packages/ai/core/transcribe/transcribe.ts index 1c7029102fc2..4400a91918ad 100644 --- a/packages/ai/core/transcribe/transcribe.ts +++ b/packages/ai/core/transcribe/transcribe.ts @@ -8,9 +8,9 @@ import { ProviderOptions } from '../types/provider-metadata'; import { TranscriptionWarning } from '../types/transcription-model'; import { TranscriptionModelResponseMetadata } from '../types/transcription-model-response-metadata'; import { - audioMimeTypeSignatures, - detectMimeType, -} from '../util/detect-mimetype'; + audioMediaTypeSignatures, + detectMediaType, +} from '../util/detect-media-type'; import { TranscriptionResult } from './transcribe-result'; /** @@ -91,9 +91,9 @@ Only applicable for HTTP-based providers. headers, providerOptions, mediaType: - detectMimeType({ + detectMediaType({ data: audioData, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }) ?? 'audio/wav', }), ); diff --git a/packages/ai/core/util/detect-mimetype.test.ts b/packages/ai/core/util/detect-media-type.test.ts similarity index 72% rename from packages/ai/core/util/detect-mimetype.test.ts rename to packages/ai/core/util/detect-media-type.test.ts index d965c712d359..505fd71ba1be 100644 --- a/packages/ai/core/util/detect-mimetype.test.ts +++ b/packages/ai/core/util/detect-media-type.test.ts @@ -1,25 +1,28 @@ -import { describe, it, expect } from 'vitest'; +import { describe, expect, it } from 'vitest'; import { - detectMimeType, - imageMimeTypeSignatures, - audioMimeTypeSignatures, -} from './detect-mimetype'; + audioMediaTypeSignatures, + detectMediaType, + imageMediaTypeSignatures, +} from './detect-media-type'; -describe('detectMimeType', () => { +describe('detectMediaType', () => { describe('GIF', () => { it('should detect GIF from bytes', () => { const gifBytes = new Uint8Array([0x47, 0x49, 0x46, 0xff, 0xff]); expect( - detectMimeType({ data: gifBytes, signatures: imageMimeTypeSignatures }), + detectMediaType({ + data: gifBytes, + signatures: imageMediaTypeSignatures, + }), ).toBe('image/gif'); }); it('should detect GIF from base64', () => { const gifBase64 = 'R0lGabc123'; // Base64 string starting with GIF signature expect( - detectMimeType({ + detectMediaType({ data: gifBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/gif'); }); @@ -29,16 +32,19 @@ describe('detectMimeType', () => { it('should detect PNG from bytes', () => { const pngBytes = new Uint8Array([0x89, 0x50, 0x4e, 0x47, 0xff, 0xff]); expect( - detectMimeType({ data: pngBytes, signatures: imageMimeTypeSignatures }), + detectMediaType({ + data: pngBytes, + signatures: imageMediaTypeSignatures, + }), ).toBe('image/png'); }); it('should detect PNG from base64', () => { const pngBase64 = 'iVBORwabc123'; // Base64 string starting with PNG signature expect( - detectMimeType({ + detectMediaType({ data: pngBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/png'); }); @@ -48,9 +54,9 @@ describe('detectMimeType', () => { it('should detect JPEG from bytes', () => { const jpegBytes = new Uint8Array([0xff, 0xd8, 0xff, 0xff]); expect( - detectMimeType({ + detectMediaType({ data: jpegBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/jpeg'); }); @@ -58,9 +64,9 @@ describe('detectMimeType', () => { it('should detect JPEG from base64', () => { const jpegBase64 = '/9j/abc123'; // Base64 string starting with JPEG signature expect( - detectMimeType({ + detectMediaType({ data: jpegBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/jpeg'); }); @@ -70,9 +76,9 @@ describe('detectMimeType', () => { it('should detect WebP from bytes', () => { const webpBytes = new Uint8Array([0x52, 0x49, 0x46, 0x46, 0xff, 0xff]); expect( - detectMimeType({ + detectMediaType({ data: webpBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/webp'); }); @@ -80,9 +86,9 @@ describe('detectMimeType', () => { it('should detect WebP from base64', () => { const webpBase64 = 'UklGRgabc123'; // Base64 string starting with WebP signature expect( - detectMimeType({ + detectMediaType({ data: webpBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/webp'); }); @@ -92,16 +98,19 @@ describe('detectMimeType', () => { it('should detect BMP from bytes', () => { const bmpBytes = new Uint8Array([0x42, 0x4d, 0xff, 0xff]); expect( - detectMimeType({ data: bmpBytes, signatures: imageMimeTypeSignatures }), + detectMediaType({ + data: bmpBytes, + signatures: imageMediaTypeSignatures, + }), ).toBe('image/bmp'); }); it('should detect BMP from base64', () => { const bmpBase64 = 'Qkabc123'; // Base64 string starting with BMP signature expect( - detectMimeType({ + detectMediaType({ data: bmpBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/bmp'); }); @@ -111,9 +120,9 @@ describe('detectMimeType', () => { it('should detect TIFF (little endian) from bytes', () => { const tiffLEBytes = new Uint8Array([0x49, 0x49, 0x2a, 0x00, 0xff]); expect( - detectMimeType({ + detectMediaType({ data: tiffLEBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/tiff'); }); @@ -121,9 +130,9 @@ describe('detectMimeType', () => { it('should detect TIFF (little endian) from base64', () => { const tiffLEBase64 = 'SUkqAAabc123'; // Base64 string starting with TIFF LE signature expect( - detectMimeType({ + detectMediaType({ data: tiffLEBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/tiff'); }); @@ -131,9 +140,9 @@ describe('detectMimeType', () => { it('should detect TIFF (big endian) from bytes', () => { const tiffBEBytes = new Uint8Array([0x4d, 0x4d, 0x00, 0x2a, 0xff]); expect( - detectMimeType({ + detectMediaType({ data: tiffBEBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/tiff'); }); @@ -141,9 +150,9 @@ describe('detectMimeType', () => { it('should detect TIFF (big endian) from base64', () => { const tiffBEBase64 = 'TU0AKgabc123'; // Base64 string starting with TIFF BE signature expect( - detectMimeType({ + detectMediaType({ data: tiffBEBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/tiff'); }); @@ -156,9 +165,9 @@ describe('detectMimeType', () => { 0xff, ]); expect( - detectMimeType({ + detectMediaType({ data: avifBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/avif'); }); @@ -166,9 +175,9 @@ describe('detectMimeType', () => { it('should detect AVIF from base64', () => { const avifBase64 = 'AAAAIGZ0eXBhdmlmabc123'; // Base64 string starting with AVIF signature expect( - detectMimeType({ + detectMediaType({ data: avifBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/avif'); }); @@ -181,9 +190,9 @@ describe('detectMimeType', () => { 0xff, ]); expect( - detectMimeType({ + detectMediaType({ data: heicBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/heic'); }); @@ -191,9 +200,9 @@ describe('detectMimeType', () => { it('should detect HEIC from base64', () => { const heicBase64 = 'AAAAIGZ0eXBoZWljabc123'; // Base64 string starting with HEIC signature expect( - detectMimeType({ + detectMediaType({ data: heicBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBe('image/heic'); }); @@ -203,16 +212,19 @@ describe('detectMimeType', () => { it('should detect MP3 from bytes', () => { const mp3Bytes = new Uint8Array([0xff, 0xfb]); expect( - detectMimeType({ data: mp3Bytes, signatures: audioMimeTypeSignatures }), + detectMediaType({ + data: mp3Bytes, + signatures: audioMediaTypeSignatures, + }), ).toBe('audio/mpeg'); }); it('should detect MP3 from base64', () => { const mp3Base64 = '//s='; // Base64 string starting with MP3 signature expect( - detectMimeType({ + detectMediaType({ data: mp3Base64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/mpeg'); }); @@ -222,16 +234,19 @@ describe('detectMimeType', () => { it('should detect WAV from bytes', () => { const wavBytes = new Uint8Array([0x52, 0x49, 0x46, 0x46]); expect( - detectMimeType({ data: wavBytes, signatures: audioMimeTypeSignatures }), + detectMediaType({ + data: wavBytes, + signatures: audioMediaTypeSignatures, + }), ).toBe('audio/wav'); }); it('should detect WAV from base64', () => { const wavBase64 = 'UklGRiQ='; // Base64 string starting with WAV signature expect( - detectMimeType({ + detectMediaType({ data: wavBase64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/wav'); }); @@ -241,16 +256,19 @@ describe('detectMimeType', () => { it('should detect OGG from bytes', () => { const oggBytes = new Uint8Array([0x4f, 0x67, 0x67, 0x53]); expect( - detectMimeType({ data: oggBytes, signatures: audioMimeTypeSignatures }), + detectMediaType({ + data: oggBytes, + signatures: audioMediaTypeSignatures, + }), ).toBe('audio/ogg'); }); it('should detect OGG from base64', () => { const oggBase64 = 'T2dnUw'; // Base64 string starting with OGG signature expect( - detectMimeType({ + detectMediaType({ data: oggBase64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/ogg'); }); @@ -260,9 +278,9 @@ describe('detectMimeType', () => { it('should detect FLAC from bytes', () => { const flacBytes = new Uint8Array([0x66, 0x4c, 0x61, 0x43]); expect( - detectMimeType({ + detectMediaType({ data: flacBytes, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/flac'); }); @@ -270,9 +288,9 @@ describe('detectMimeType', () => { it('should detect FLAC from base64', () => { const flacBase64 = 'ZkxhQw'; // Base64 string starting with FLAC signature expect( - detectMimeType({ + detectMediaType({ data: flacBase64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/flac'); }); @@ -282,16 +300,19 @@ describe('detectMimeType', () => { it('should detect AAC from bytes', () => { const aacBytes = new Uint8Array([0x40, 0x15, 0x00, 0x00]); expect( - detectMimeType({ data: aacBytes, signatures: audioMimeTypeSignatures }), + detectMediaType({ + data: aacBytes, + signatures: audioMediaTypeSignatures, + }), ).toBe('audio/aac'); }); it('should detect AAC from base64', () => { const aacBase64 = 'QBUA'; // Base64 string starting with AAC signature expect( - detectMimeType({ + detectMediaType({ data: aacBase64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/aac'); }); @@ -301,16 +322,19 @@ describe('detectMimeType', () => { it('should detect MP4 from bytes', () => { const mp4Bytes = new Uint8Array([0x66, 0x74, 0x79, 0x70]); expect( - detectMimeType({ data: mp4Bytes, signatures: audioMimeTypeSignatures }), + detectMediaType({ + data: mp4Bytes, + signatures: audioMediaTypeSignatures, + }), ).toBe('audio/mp4'); }); it('should detect MP4 from base64', () => { const mp4Base64 = 'ZnR5cA'; // Base64 string starting with MP4 signature expect( - detectMimeType({ + detectMediaType({ data: mp4Base64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBe('audio/mp4'); }); @@ -320,9 +344,9 @@ describe('detectMimeType', () => { it('should return undefined for unknown image formats', () => { const unknownBytes = new Uint8Array([0x00, 0x01, 0x02, 0x03]); expect( - detectMimeType({ + detectMediaType({ data: unknownBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -330,9 +354,9 @@ describe('detectMimeType', () => { it('should return undefined for unknown audio formats', () => { const unknownBytes = new Uint8Array([0x00, 0x01, 0x02, 0x03]); expect( - detectMimeType({ + detectMediaType({ data: unknownBytes, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -340,9 +364,9 @@ describe('detectMimeType', () => { it('should return undefined for empty arrays for image', () => { const emptyBytes = new Uint8Array([]); expect( - detectMimeType({ + detectMediaType({ data: emptyBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -350,9 +374,9 @@ describe('detectMimeType', () => { it('should return undefined for empty arrays for audio', () => { const emptyBytes = new Uint8Array([]); expect( - detectMimeType({ + detectMediaType({ data: emptyBytes, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -360,9 +384,9 @@ describe('detectMimeType', () => { it('should return undefined for arrays shorter than signature length for image', () => { const shortBytes = new Uint8Array([0x89, 0x50]); // Incomplete PNG signature expect( - detectMimeType({ + detectMediaType({ data: shortBytes, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -370,9 +394,9 @@ describe('detectMimeType', () => { it('should return undefined for arrays shorter than signature length for audio', () => { const shortBytes = new Uint8Array([0x4f, 0x67]); // Incomplete OGG signature expect( - detectMimeType({ + detectMediaType({ data: shortBytes, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -380,9 +404,9 @@ describe('detectMimeType', () => { it('should return undefined for invalid base64 strings for image', () => { const invalidBase64 = 'invalid123'; expect( - detectMimeType({ + detectMediaType({ data: invalidBase64, - signatures: imageMimeTypeSignatures, + signatures: imageMediaTypeSignatures, }), ).toBeUndefined(); }); @@ -390,9 +414,9 @@ describe('detectMimeType', () => { it('should return undefined for invalid base64 strings for audio', () => { const invalidBase64 = 'invalid123'; expect( - detectMimeType({ + detectMediaType({ data: invalidBase64, - signatures: audioMimeTypeSignatures, + signatures: audioMediaTypeSignatures, }), ).toBeUndefined(); }); diff --git a/packages/ai/core/util/detect-mimetype.ts b/packages/ai/core/util/detect-media-type.ts similarity index 60% rename from packages/ai/core/util/detect-mimetype.ts rename to packages/ai/core/util/detect-media-type.ts index 1851e3217a03..1c05dd57fe59 100644 --- a/packages/ai/core/util/detect-mimetype.ts +++ b/packages/ai/core/util/detect-media-type.ts @@ -1,48 +1,48 @@ -export const imageMimeTypeSignatures = [ +export const imageMediaTypeSignatures = [ { - mimeType: 'image/gif' as const, + mediaType: 'image/gif' as const, bytesPrefix: [0x47, 0x49, 0x46], base64Prefix: 'R0lG', }, { - mimeType: 'image/png' as const, + mediaType: 'image/png' as const, bytesPrefix: [0x89, 0x50, 0x4e, 0x47], base64Prefix: 'iVBORw', }, { - mimeType: 'image/jpeg' as const, + mediaType: 'image/jpeg' as const, bytesPrefix: [0xff, 0xd8], base64Prefix: '/9j/', }, { - mimeType: 'image/webp' as const, + mediaType: 'image/webp' as const, bytesPrefix: [0x52, 0x49, 0x46, 0x46], base64Prefix: 'UklGRg', }, { - mimeType: 'image/bmp' as const, + mediaType: 'image/bmp' as const, bytesPrefix: [0x42, 0x4d], base64Prefix: 'Qk', }, { - mimeType: 'image/tiff' as const, + mediaType: 'image/tiff' as const, bytesPrefix: [0x49, 0x49, 0x2a, 0x00], base64Prefix: 'SUkqAA', }, { - mimeType: 'image/tiff' as const, + mediaType: 'image/tiff' as const, bytesPrefix: [0x4d, 0x4d, 0x00, 0x2a], base64Prefix: 'TU0AKg', }, { - mimeType: 'image/avif' as const, + mediaType: 'image/avif' as const, bytesPrefix: [ 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x61, 0x76, 0x69, 0x66, ], base64Prefix: 'AAAAIGZ0eXBhdmlm', }, { - mimeType: 'image/heic' as const, + mediaType: 'image/heic' as const, bytesPrefix: [ 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70, 0x68, 0x65, 0x69, 0x63, ], @@ -50,46 +50,53 @@ export const imageMimeTypeSignatures = [ }, ] as const; -export const audioMimeTypeSignatures = [ +export const audioMediaTypeSignatures = [ { - mimeType: 'audio/mpeg' as const, + mediaType: 'audio/mpeg' as const, bytesPrefix: [0xff, 0xfb], base64Prefix: '//s=', }, { - mimeType: 'audio/wav' as const, + mediaType: 'audio/wav' as const, bytesPrefix: [0x52, 0x49, 0x46, 0x46], base64Prefix: 'UklGR', }, { - mimeType: 'audio/ogg' as const, + mediaType: 'audio/ogg' as const, bytesPrefix: [0x4f, 0x67, 0x67, 0x53], base64Prefix: 'T2dnUw', }, { - mimeType: 'audio/flac' as const, + mediaType: 'audio/flac' as const, bytesPrefix: [0x66, 0x4c, 0x61, 0x43], base64Prefix: 'ZkxhQw', }, { - mimeType: 'audio/aac' as const, + mediaType: 'audio/aac' as const, bytesPrefix: [0x40, 0x15, 0x00, 0x00], base64Prefix: 'QBUA', }, { - mimeType: 'audio/mp4' as const, + mediaType: 'audio/mp4' as const, bytesPrefix: [0x66, 0x74, 0x79, 0x70], base64Prefix: 'ZnR5cA', }, ] as const; -export function detectMimeType({ +/** + * Detect the media IANA media type of a file using a list of signatures. + * + * @param data - The file data. + * @param signatures - The signatures to use for detection. + * @returns The media type of the file. + */ +export function detectMediaType({ data, signatures, }: { data: Uint8Array | string; - signatures: typeof audioMimeTypeSignatures | typeof imageMimeTypeSignatures; -}): (typeof signatures)[number]['mimeType'] | undefined { + signatures: typeof audioMediaTypeSignatures | typeof imageMediaTypeSignatures; +}): (typeof signatures)[number]['mediaType'] | undefined { for (const signature of signatures) { if ( typeof data === 'string' @@ -97,7 +104,7 @@ export function detectMimeType({ : data.length >= signature.bytesPrefix.length && signature.bytesPrefix.every((byte, index) => data[index] === byte) ) { - return signature.mimeType; + return signature.mediaType; } } diff --git a/packages/ai/util/download.test.ts b/packages/ai/util/download.test.ts index f0f94df897df..ec59cdc36248 100644 --- a/packages/ai/util/download.test.ts +++ b/packages/ai/util/download.test.ts @@ -23,7 +23,7 @@ describe('download', () => { }); expect(result.data).toEqual(expectedBytes); - expect(result.mimeType).toBe('application/octet-stream'); + expect(result.mediaType).toBe('application/octet-stream'); }); it('should throw DownloadError when response is not ok', async () => { diff --git a/packages/ai/util/download.ts b/packages/ai/util/download.ts index 6a0082fb4c33..6685ec19507f 100644 --- a/packages/ai/util/download.ts +++ b/packages/ai/util/download.ts @@ -2,7 +2,7 @@ import { DownloadError } from './download-error'; export async function download({ url }: { url: URL }): Promise<{ data: Uint8Array; - mimeType: string | undefined; + mediaType: string | undefined; }> { const urlText = url.toString(); try { @@ -18,7 +18,7 @@ export async function download({ url }: { url: URL }): Promise<{ return { data: new Uint8Array(await response.arrayBuffer()), - mimeType: response.headers.get('content-type') ?? undefined, + mediaType: response.headers.get('content-type') ?? undefined, }; } catch (error) { if (DownloadError.isInstance(error)) { diff --git a/packages/amazon-bedrock/src/bedrock-prepare-tools.ts b/packages/amazon-bedrock/src/bedrock-prepare-tools.ts index 5c7679f9a653..938fe4c1338c 100644 --- a/packages/amazon-bedrock/src/bedrock-prepare-tools.ts +++ b/packages/amazon-bedrock/src/bedrock-prepare-tools.ts @@ -82,7 +82,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts index 721e3337c4d7..ff39725c11c0 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.test.ts @@ -47,7 +47,7 @@ describe('user messages', () => { { type: 'file', data: Buffer.from(imageData).toString('base64'), - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, @@ -80,7 +80,7 @@ describe('user messages', () => { { type: 'file', data: Buffer.from(fileData).toString('base64'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -484,7 +484,7 @@ describe('tool messages', () => { { type: 'image', data: 'base64data', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, ], }, @@ -527,7 +527,7 @@ describe('tool messages', () => { { type: 'image', data: 'base64data', - mimeType: 'image/webp', // unsupported format + mediaType: 'image/webp', // unsupported format }, ], }, @@ -552,7 +552,7 @@ describe('tool messages', () => { { type: 'image', data: 'base64data', - // missing mimeType + // missing mediaType }, ], }, diff --git a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts index 119e13450ad2..c7b6ff6dd5f9 100644 --- a/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts +++ b/packages/amazon-bedrock/src/convert-to-bedrock-chat-messages.ts @@ -84,11 +84,11 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { }); } - if (part.mimeType.startsWith('image/')) { + if (part.mediaType.startsWith('image/')) { const bedrockImageFormat = - part.mimeType === 'image/*' + part.mediaType === 'image/*' ? undefined - : part.mimeType?.split('/')?.[1]; + : part.mediaType?.split('/')?.[1]; bedrockContent.push({ image: { @@ -99,7 +99,7 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { } else { bedrockContent.push({ document: { - format: part.mimeType?.split( + format: part.mediaType?.split( '/', )?.[1] as BedrockDocumentFormat, name: generateFileId(), @@ -129,12 +129,12 @@ export function convertToBedrockChatMessages(prompt: LanguageModelV2Prompt): { text: part.text, }; case 'image': - if (!part.mimeType) { + if (!part.mediaType) { throw new Error( 'Image mime type is required in tool result part content', ); } - const format = part.mimeType.split('/')[1]; + const format = part.mediaType.split('/')[1]; if (!isBedrockImageFormat(format)) { throw new Error( `Unsupported image format: ${format}`, diff --git a/packages/anthropic/src/anthropic-prepare-tools.ts b/packages/anthropic/src/anthropic-prepare-tools.ts index 67261abbf59c..84bb8effc39a 100644 --- a/packages/anthropic/src/anthropic-prepare-tools.ts +++ b/packages/anthropic/src/anthropic-prepare-tools.ts @@ -138,7 +138,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/anthropic/src/anthropic-tools.ts b/packages/anthropic/src/anthropic-tools.ts index 1434431fb27f..c114a42afa28 100644 --- a/packages/anthropic/src/anthropic-tools.ts +++ b/packages/anthropic/src/anthropic-tools.ts @@ -17,7 +17,7 @@ export type ToolResultContent = Array< | { type: 'image'; data: string; // base64 encoded png image, e.g. screenshot - mimeType?: string; // e.g. 'image/png'; + mediaType?: string; // e.g. 'image/png'; } >; diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts index 9399c5271784..0f017ffb72f7 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts @@ -51,7 +51,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, @@ -92,7 +92,7 @@ describe('user messages', () => { { type: 'file', data: new URL('https://example.com/image.png'), - mimeType: 'image/*', + mediaType: 'image/*', }, ], }, @@ -132,7 +132,7 @@ describe('user messages', () => { { type: 'file', data: 'base64PDFdata', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, @@ -174,7 +174,7 @@ describe('user messages', () => { { type: 'file', data: 'base64data', - mimeType: 'text/plain', + mediaType: 'text/plain', }, ], }, @@ -182,9 +182,7 @@ describe('user messages', () => { sendReasoning: true, warnings: [], }), - ).toThrow( - "'unsupported file content type: text/plain' functionality not supported.", - ); + ).toThrow('media type: text/plain'); }); }); @@ -345,7 +343,7 @@ describe('tool messages', () => { { type: 'image', data: 'AAECAw==', - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts index e4ad87e54dcf..5109bd49bbb5 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts @@ -100,7 +100,7 @@ export function convertToAnthropicMessagesPrompt({ } case 'file': { - if (part.mimeType.startsWith('image/')) { + if (part.mediaType.startsWith('image/')) { anthropicContent.push({ type: 'image', source: @@ -112,14 +112,14 @@ export function convertToAnthropicMessagesPrompt({ : { type: 'base64', media_type: - part.mimeType === 'image/*' + part.mediaType === 'image/*' ? 'image/jpeg' - : part.mimeType, + : part.mediaType, data: part.data, }, cache_control: cacheControl, }); - } else if (part.mimeType === 'application/pdf') { + } else if (part.mediaType === 'application/pdf') { if (part.data instanceof URL) { // The AI SDK automatically downloads files for user file parts with URLs throw new UnsupportedFunctionalityError({ @@ -140,7 +140,7 @@ export function convertToAnthropicMessagesPrompt({ }); } else { throw new UnsupportedFunctionalityError({ - functionality: `unsupported file content type: ${part.mimeType}`, + functionality: `media type: ${part.mediaType}`, }); } @@ -181,7 +181,7 @@ export function convertToAnthropicMessagesPrompt({ type: 'image' as const, source: { type: 'base64' as const, - media_type: part.mimeType ?? 'image/jpeg', + media_type: part.mediaType ?? 'image/jpeg', data: part.data, }, cache_control: undefined, @@ -300,7 +300,7 @@ export function convertToAnthropicMessagesPrompt({ default: { const _exhaustiveCheck: never = type; - throw new Error(`Unsupported type: ${_exhaustiveCheck}`); + throw new Error(`content type: ${_exhaustiveCheck}`); } } } diff --git a/packages/cohere/src/cohere-prepare-tools.ts b/packages/cohere/src/cohere-prepare-tools.ts index b2ac25418440..2ff698d2f91c 100644 --- a/packages/cohere/src/cohere-prepare-tools.ts +++ b/packages/cohere/src/cohere-prepare-tools.ts @@ -86,7 +86,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/google/src/convert-to-google-generative-ai-messages.test.ts b/packages/google/src/convert-to-google-generative-ai-messages.test.ts index 6741e71c01e1..640bc927db5c 100644 --- a/packages/google/src/convert-to-google-generative-ai-messages.test.ts +++ b/packages/google/src/convert-to-google-generative-ai-messages.test.ts @@ -33,7 +33,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, @@ -61,7 +61,7 @@ describe('user messages', () => { const result = convertToGoogleGenerativeAIMessages([ { role: 'user', - content: [{ type: 'file', data: 'AAECAw==', mimeType: 'image/png' }], + content: [{ type: 'file', data: 'AAECAw==', mediaType: 'image/png' }], }, ]); @@ -127,7 +127,7 @@ describe('assistant messages', () => { const result = convertToGoogleGenerativeAIMessages([ { role: 'assistant', - content: [{ type: 'file', data: 'AAECAw==', mimeType: 'image/png' }], + content: [{ type: 'file', data: 'AAECAw==', mediaType: 'image/png' }], }, ]); @@ -154,7 +154,9 @@ describe('assistant messages', () => { convertToGoogleGenerativeAIMessages([ { role: 'assistant', - content: [{ type: 'file', data: 'AAECAw==', mimeType: 'image/jpeg' }], + content: [ + { type: 'file', data: 'AAECAw==', mediaType: 'image/jpeg' }, + ], }, ]), ).toThrow('Only PNG images are supported in assistant messages'); @@ -169,7 +171,7 @@ describe('assistant messages', () => { { type: 'file', data: new URL('https://example.com/image.png'), - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, diff --git a/packages/google/src/convert-to-google-generative-ai-messages.ts b/packages/google/src/convert-to-google-generative-ai-messages.ts index 48ad729ca1e1..d5946c62a77d 100644 --- a/packages/google/src/convert-to-google-generative-ai-messages.ts +++ b/packages/google/src/convert-to-google-generative-ai-messages.ts @@ -43,13 +43,18 @@ export function convertToGoogleGenerativeAIMessages( case 'file': { // default to image/jpeg for unknown image/* types - const mimeType = - part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + const mediaType = + part.mediaType === 'image/*' ? 'image/jpeg' : part.mediaType; parts.push( part.data instanceof URL - ? { fileData: { mimeType, fileUri: part.data.toString() } } - : { inlineData: { mimeType, data: part.data } }, + ? { + fileData: { + mimeType: mediaType, + fileUri: part.data.toString(), + }, + } + : { inlineData: { mimeType: mediaType, data: part.data } }, ); break; @@ -76,7 +81,7 @@ export function convertToGoogleGenerativeAIMessages( } case 'file': { - if (part.mimeType !== 'image/png') { + if (part.mediaType !== 'image/png') { throw new UnsupportedFunctionalityError({ functionality: 'Only PNG images are supported in assistant messages', @@ -92,7 +97,7 @@ export function convertToGoogleGenerativeAIMessages( return { inlineData: { - mimeType: part.mimeType, + mimeType: part.mediaType, data: part.data, }, }; diff --git a/packages/google/src/google-generative-ai-language-model.test.ts b/packages/google/src/google-generative-ai-language-model.test.ts index 405948232c04..b867118e106e 100644 --- a/packages/google/src/google-generative-ai-language-model.test.ts +++ b/packages/google/src/google-generative-ai-language-model.test.ts @@ -1132,11 +1132,11 @@ describe('doGenerate', () => { expect(files).toStrictEqual([ { data: 'base64encodedimagedata', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, { data: 'anotherbase64encodedimagedata', - mimeType: 'image/png', + mediaType: 'image/png', }, ]); }); @@ -1187,11 +1187,11 @@ describe('doGenerate', () => { expect(files).toStrictEqual([ { data: 'imagedata1', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, { data: 'imagedata2', - mimeType: 'image/png', + mediaType: 'image/png', }, ]); }); @@ -1258,11 +1258,11 @@ describe('doGenerate', () => { expect(files).toStrictEqual([ { data: 'validimagedata', - mimeType: 'image/jpeg', + mediaType: 'image/jpeg', }, { data: 'pdfdata', - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ]); }); @@ -1783,7 +1783,7 @@ describe('doStream', () => { expect(events.filter(event => event.type === 'error')).toEqual([]); // no errors expect(events.filter(event => event.type === 'file')).toEqual([ - { type: 'file', mimeType: 'text/plain', data: 'test' }, + { type: 'file', mediaType: 'text/plain', data: 'test' }, ]); }); diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index 6e806e95be12..3c3bace6e226 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -199,7 +199,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { text: getTextFromParts(parts), files: getInlineDataParts(parts)?.map(part => ({ data: part.inlineData.data, - mimeType: part.inlineData.mimeType, + mediaType: part.inlineData.mimeType, })), toolCalls, finishReason: mapGoogleGenerativeAIFinishReason({ @@ -310,7 +310,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV2 { for (const part of inlineDataParts) { controller.enqueue({ type: 'file', - mimeType: part.inlineData.mimeType, + mediaType: part.inlineData.mimeType, data: part.inlineData.data, }); } diff --git a/packages/google/src/google-prepare-tools.ts b/packages/google/src/google-prepare-tools.ts index 25b61cca4eb1..b320129efd11 100644 --- a/packages/google/src/google-prepare-tools.ts +++ b/packages/google/src/google-prepare-tools.ts @@ -131,7 +131,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/groq/src/convert-to-groq-chat-messages.test.ts b/packages/groq/src/convert-to-groq-chat-messages.test.ts index 3d124c860401..56d9ee7eb6f7 100644 --- a/packages/groq/src/convert-to-groq-chat-messages.test.ts +++ b/packages/groq/src/convert-to-groq-chat-messages.test.ts @@ -10,7 +10,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, diff --git a/packages/groq/src/convert-to-groq-chat-messages.ts b/packages/groq/src/convert-to-groq-chat-messages.ts index 81535300e54c..3fb9792d6fce 100644 --- a/packages/groq/src/convert-to-groq-chat-messages.ts +++ b/packages/groq/src/convert-to-groq-chat-messages.ts @@ -30,14 +30,14 @@ export function convertToGroqChatMessages( return { type: 'text', text: part.text }; } case 'file': { - if (!part.mimeType.startsWith('image/')) { + if (!part.mediaType.startsWith('image/')) { throw new UnsupportedFunctionalityError({ functionality: 'Non-image file content parts', }); } - const mimeType = - part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + const mediaType = + part.mediaType === 'image/*' ? 'image/jpeg' : part.mediaType; return { type: 'image_url', @@ -45,7 +45,7 @@ export function convertToGroqChatMessages( url: part.data instanceof URL ? part.data.toString() - : `data:${mimeType};base64,${part.data}`, + : `data:${mediaType};base64,${part.data}`, }, }; } diff --git a/packages/groq/src/groq-prepare-tools.ts b/packages/groq/src/groq-prepare-tools.ts index ef8a5ef8a8f4..be03925b540a 100644 --- a/packages/groq/src/groq-prepare-tools.ts +++ b/packages/groq/src/groq-prepare-tools.ts @@ -87,7 +87,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/mistral/src/convert-to-mistral-chat-messages.test.ts b/packages/mistral/src/convert-to-mistral-chat-messages.test.ts index 82808c3acef3..7ae44aaba1bc 100644 --- a/packages/mistral/src/convert-to-mistral-chat-messages.test.ts +++ b/packages/mistral/src/convert-to-mistral-chat-messages.test.ts @@ -10,7 +10,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, @@ -28,7 +28,7 @@ describe('user messages', () => { { type: 'file', data: new URL('https://example.com/document.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/packages/mistral/src/convert-to-mistral-chat-messages.ts b/packages/mistral/src/convert-to-mistral-chat-messages.ts index 4dc1ca6bb89a..42ce7f96e7e2 100644 --- a/packages/mistral/src/convert-to-mistral-chat-messages.ts +++ b/packages/mistral/src/convert-to-mistral-chat-messages.ts @@ -29,18 +29,20 @@ export function convertToMistralChatMessages( } case 'file': { - if (part.mimeType.startsWith('image/')) { - const mimeType = - part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + if (part.mediaType.startsWith('image/')) { + const mediaType = + part.mediaType === 'image/*' + ? 'image/jpeg' + : part.mediaType; return { type: 'image_url', image_url: part.data instanceof URL ? part.data.toString() - : `data:${mimeType};base64,${part.data}`, + : `data:${mediaType};base64,${part.data}`, }; - } else if (part.mimeType === 'application/pdf') { + } else if (part.mediaType === 'application/pdf') { return { type: 'document_url', document_url: part.data.toString(), diff --git a/packages/mistral/src/mistral-prepare-tools.ts b/packages/mistral/src/mistral-prepare-tools.ts index d6b17f1d9d1a..76d521884688 100644 --- a/packages/mistral/src/mistral-prepare-tools.ts +++ b/packages/mistral/src/mistral-prepare-tools.ts @@ -84,7 +84,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts index 3737dc26f2e5..0af02da91f80 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.test.ts @@ -21,7 +21,7 @@ describe('user messages', () => { { type: 'file', data: Buffer.from([0, 1, 2, 3]).toString('base64'), - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, @@ -49,7 +49,7 @@ describe('user messages', () => { { type: 'file', data: new URL('https://example.com/image.jpg'), - mimeType: 'image/*', + mediaType: 'image/*', }, ], }, @@ -250,7 +250,7 @@ describe('provider-specific metadata merging', () => { { type: 'file', data: imageUrl, - mimeType: 'image/*', + mediaType: 'image/*', providerOptions: { openaiCompatible: { cacheControl: { type: 'ephemeral' }, @@ -312,7 +312,7 @@ describe('provider-specific metadata merging', () => { { type: 'file', data: Buffer.from([0, 1, 2, 3]).toString('base64'), - mimeType: 'image/png', + mediaType: 'image/png', providerOptions: { openaiCompatible: { alt_text: 'A sample image' }, }, @@ -485,7 +485,7 @@ describe('provider-specific metadata merging', () => { { type: 'file', data: Buffer.from([9, 8, 7, 6]).toString('base64'), - mimeType: 'image/png', + mediaType: 'image/png', providerOptions: { openaiCompatible: { imagePartLevel: 'image-data' }, }, diff --git a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts index 6ff96e3edc23..3d9cf894c7ed 100644 --- a/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts +++ b/packages/openai-compatible/src/convert-to-openai-compatible-chat-messages.ts @@ -42,9 +42,11 @@ export function convertToOpenAICompatibleChatMessages( return { type: 'text', text: part.text, ...partMetadata }; } case 'file': { - if (part.mimeType.startsWith('image/')) { - const mimeType = - part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + if (part.mediaType.startsWith('image/')) { + const mediaType = + part.mediaType === 'image/*' + ? 'image/jpeg' + : part.mediaType; return { type: 'image_url', @@ -52,13 +54,13 @@ export function convertToOpenAICompatibleChatMessages( url: part.data instanceof URL ? part.data.toString() - : `data:${mimeType};base64,${part.data}`, + : `data:${mediaType};base64,${part.data}`, }, ...partMetadata, }; } else { throw new UnsupportedFunctionalityError({ - functionality: `file part media type ${part.mimeType}`, + functionality: `file part media type ${part.mediaType}`, }); } } diff --git a/packages/openai-compatible/src/openai-compatible-prepare-tools.ts b/packages/openai-compatible/src/openai-compatible-prepare-tools.ts index d9fe75aafec5..fd831439f2bf 100644 --- a/packages/openai-compatible/src/openai-compatible-prepare-tools.ts +++ b/packages/openai-compatible/src/openai-compatible-prepare-tools.ts @@ -85,7 +85,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/openai/src/convert-to-openai-chat-messages.test.ts b/packages/openai/src/convert-to-openai-chat-messages.test.ts index 811f6a5bbea6..4132f4a465b9 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.test.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.test.ts @@ -55,7 +55,7 @@ describe('user messages', () => { { type: 'text', text: 'Hello' }, { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: Buffer.from([0, 1, 2, 3]).toString('base64'), }, ], @@ -85,7 +85,7 @@ describe('user messages', () => { content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: Buffer.from([0, 1, 2, 3]).toString('base64'), providerOptions: { openai: { @@ -125,7 +125,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'application/something', + mediaType: 'application/something', }, ], }, @@ -144,7 +144,7 @@ describe('user messages', () => { { type: 'file', data: new URL('https://example.com/foo.wav'), - mimeType: 'audio/wav', + mediaType: 'audio/wav', }, ], }, @@ -162,7 +162,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'audio/wav', + mediaType: 'audio/wav', }, ], }, @@ -191,7 +191,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', }, ], }, @@ -220,7 +220,7 @@ describe('user messages', () => { { type: 'file', data: 'AAECAw==', - mimeType: 'audio/mp3', // not official but sometimes used + mediaType: 'audio/mp3', // not official but sometimes used }, ], }, @@ -250,7 +250,7 @@ describe('user messages', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: base64Data, filename: 'document.pdf', }, @@ -286,7 +286,7 @@ describe('user messages', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: base64Data, }, ], @@ -322,7 +322,7 @@ describe('user messages', () => { content: [ { type: 'file', - mimeType: 'text/plain', + mediaType: 'text/plain', data: base64Data, }, ], @@ -342,7 +342,7 @@ describe('user messages', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: new URL('https://example.com/document.pdf'), }, ], diff --git a/packages/openai/src/convert-to-openai-chat-messages.ts b/packages/openai/src/convert-to-openai-chat-messages.ts index 3320d303a036..58fb06d9b588 100644 --- a/packages/openai/src/convert-to-openai-chat-messages.ts +++ b/packages/openai/src/convert-to-openai-chat-messages.ts @@ -63,9 +63,11 @@ export function convertToOpenAIChatMessages({ return { type: 'text', text: part.text }; } case 'file': { - if (part.mimeType.startsWith('image/')) { - const mimeType = - part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + if (part.mediaType.startsWith('image/')) { + const mediaType = + part.mediaType === 'image/*' + ? 'image/jpeg' + : part.mediaType; return { type: 'image_url', @@ -73,20 +75,20 @@ export function convertToOpenAIChatMessages({ url: part.data instanceof URL ? part.data.toString() - : `data:${mimeType};base64,${part.data}`, + : `data:${mediaType};base64,${part.data}`, // OpenAI specific extension: image detail detail: part.providerOptions?.openai?.imageDetail, }, }; - } else if (part.mimeType.startsWith('audio/')) { + } else if (part.mediaType.startsWith('audio/')) { if (part.data instanceof URL) { throw new UnsupportedFunctionalityError({ functionality: 'audio file parts with URLs', }); } - switch (part.mimeType) { + switch (part.mediaType) { case 'audio/wav': { return { type: 'input_audio', @@ -103,11 +105,11 @@ export function convertToOpenAIChatMessages({ default: { throw new UnsupportedFunctionalityError({ - functionality: `audio content parts with media type ${part.mimeType}`, + functionality: `audio content parts with media type ${part.mediaType}`, }); } } - } else if (part.mimeType === 'application/pdf') { + } else if (part.mediaType === 'application/pdf') { if (part.data instanceof URL) { throw new UnsupportedFunctionalityError({ functionality: 'PDF file parts with URLs', @@ -123,7 +125,7 @@ export function convertToOpenAIChatMessages({ }; } else { throw new UnsupportedFunctionalityError({ - functionality: `file part media type ${part.mimeType}`, + functionality: `file part media type ${part.mediaType}`, }); } } diff --git a/packages/openai/src/openai-prepare-tools.ts b/packages/openai/src/openai-prepare-tools.ts index 47b02d8be868..1e5516296c8a 100644 --- a/packages/openai/src/openai-prepare-tools.ts +++ b/packages/openai/src/openai-prepare-tools.ts @@ -151,7 +151,7 @@ export function prepareTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts index fc47b68c34f2..6531c19dec17 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.test.ts @@ -58,7 +58,7 @@ describe('convertToOpenAIResponsesMessages', () => { { type: 'text', text: 'Hello' }, { type: 'file', - mimeType: 'image/*', + mediaType: 'image/*', data: new URL('https://example.com/image.jpg'), }, ], @@ -89,7 +89,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: Buffer.from([0, 1, 2, 3]).toString('base64'), }, ], @@ -119,7 +119,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'image/*', + mediaType: 'image/*', data: Buffer.from([0, 1, 2, 3]).toString('base64'), }, ], @@ -149,7 +149,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: Buffer.from([0, 1, 2, 3]).toString('base64'), providerOptions: { openai: { @@ -187,7 +187,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: base64Data, filename: 'document.pdf', }, @@ -221,7 +221,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: base64Data, }, ], @@ -255,7 +255,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'text/plain', + mediaType: 'text/plain', data: base64Data, }, ], @@ -275,7 +275,7 @@ describe('convertToOpenAIResponsesMessages', () => { content: [ { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: new URL('https://example.com/document.pdf'), }, ], diff --git a/packages/openai/src/responses/convert-to-openai-responses-messages.ts b/packages/openai/src/responses/convert-to-openai-responses-messages.ts index 33b87266dd2d..ce282ab001a8 100644 --- a/packages/openai/src/responses/convert-to-openai-responses-messages.ts +++ b/packages/openai/src/responses/convert-to-openai-responses-messages.ts @@ -56,21 +56,23 @@ export function convertToOpenAIResponsesMessages({ return { type: 'input_text', text: part.text }; } case 'file': { - if (part.mimeType.startsWith('image/')) { - const mimeType = - part.mimeType === 'image/*' ? 'image/jpeg' : part.mimeType; + if (part.mediaType.startsWith('image/')) { + const mediaType = + part.mediaType === 'image/*' + ? 'image/jpeg' + : part.mediaType; return { type: 'input_image', image_url: part.data instanceof URL ? part.data.toString() - : `data:${mimeType};base64,${part.data}`, + : `data:${mediaType};base64,${part.data}`, // OpenAI specific extension: image detail detail: part.providerOptions?.openai?.imageDetail, }; - } else if (part.mimeType === 'application/pdf') { + } else if (part.mediaType === 'application/pdf') { if (part.data instanceof URL) { // The AI SDK automatically downloads files for user file parts with URLs throw new UnsupportedFunctionalityError({ @@ -85,7 +87,7 @@ export function convertToOpenAIResponsesMessages({ }; } else { throw new UnsupportedFunctionalityError({ - functionality: `file part media type ${part.mimeType}`, + functionality: `file part media type ${part.mediaType}`, }); } } diff --git a/packages/openai/src/responses/openai-responses-prepare-tools.ts b/packages/openai/src/responses/openai-responses-prepare-tools.ts index a553b46309f8..631c2a44aea0 100644 --- a/packages/openai/src/responses/openai-responses-prepare-tools.ts +++ b/packages/openai/src/responses/openai-responses-prepare-tools.ts @@ -95,7 +95,7 @@ export function prepareResponsesTools({ default: { const _exhaustiveCheck: never = type; throw new UnsupportedFunctionalityError({ - functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + functionality: `tool choice type: ${_exhaustiveCheck}`, }); } } diff --git a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts index c233c7c777db..1a2bf7324e68 100644 --- a/packages/provider/src/language-model/v2/language-model-v2-prompt.ts +++ b/packages/provider/src/language-model/v2/language-model-v2-prompt.ts @@ -129,13 +129,13 @@ File data as base64 encoded string or as a URL. data: string | URL; /** -Mime type of the file. +IANA media type of the file. Can support wildcards, e.g. `image/*` (in which case the provider needs to take appropriate action). + +@see https://www.iana.org/assignments/media-types/media-types.xhtml */ - // TODO rename to mediaType or contentType - // https://www.iana.org/assignments/media-types/media-types.xhtml - mimeType: string; + mediaType: string; /** * Additional provider-specific options. They are passed through @@ -222,9 +222,11 @@ base-64 encoded image data data: string; /** -Mime type of the image. +IANA media type of the image. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml */ - mimeType?: string; + mediaType?: string; } >; diff --git a/packages/provider/src/language-model/v2/language-model-v2.ts b/packages/provider/src/language-model/v2/language-model-v2.ts index 894108d80319..cc01fd579481 100644 --- a/packages/provider/src/language-model/v2/language-model-v2.ts +++ b/packages/provider/src/language-model/v2/language-model-v2.ts @@ -117,13 +117,23 @@ An optional signature for verifying that the reasoning originated from the model /** Generated files as base64 encoded strings or binary data. The files should be returned without any unnecessary conversion. -If the API returns base64 encoded strings, the files should be returned -as base64 encoded strings. If the API returns binary data, the files should -be returned as binary data. */ files?: Array<{ + /** +Generated file data as base64 encoded strings or binary data. +The file data should be returned without any unnecessary conversion. +If the API returns base64 encoded strings, the file data should be returned +as base64 encoded strings. If the API returns binary data, the file data should +be returned as binary data. + */ data: string | Uint8Array; - mimeType: string; + + /** +The IANA media type of the file. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml + */ + mediaType: string; }>; /** @@ -307,7 +317,13 @@ export type LanguageModelV2StreamPart = // Files: | { type: 'file'; - mimeType: string; + + /** +The IANA media type of the file. + +@see https://www.iana.org/assignments/media-types/media-types.xhtml + */ + mediaType: string; /** Generated file data as base64 encoded strings or binary data. diff --git a/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap b/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap index afe0536f5c95..5816fdd25b80 100644 --- a/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap +++ b/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap @@ -396,12 +396,12 @@ exports[`scenario: server provides file parts > should call the onFinish functio }, { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, { "data": "{"key": "value"}", - "mimeType": "application/json", + "mediaType": "application/json", "type": "file", }, ], @@ -448,7 +448,7 @@ exports[`scenario: server provides file parts > should call the update function }, { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, ], @@ -470,7 +470,7 @@ exports[`scenario: server provides file parts > should call the update function }, { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, ], @@ -492,12 +492,12 @@ exports[`scenario: server provides file parts > should call the update function }, { "data": "Hello World", - "mimeType": "text/plain", + "mediaType": "text/plain", "type": "file", }, { "data": "{"key": "value"}", - "mimeType": "application/json", + "mediaType": "application/json", "type": "file", }, ], diff --git a/packages/ui-utils/src/data-url.ts b/packages/ui-utils/src/data-url.ts index 03242433d97b..101349627714 100644 --- a/packages/ui-utils/src/data-url.ts +++ b/packages/ui-utils/src/data-url.ts @@ -3,9 +3,9 @@ */ export function getTextFromDataUrl(dataUrl: string): string { const [header, base64Content] = dataUrl.split(','); - const mimeType = header.split(';')[0].split(':')[1]; + const mediaType = header.split(';')[0].split(':')[1]; - if (mimeType == null || base64Content == null) { + if (mediaType == null || base64Content == null) { throw new Error('Invalid data URL format'); } diff --git a/packages/ui-utils/src/process-chat-response.ts b/packages/ui-utils/src/process-chat-response.ts index 4c74f59ffd78..15c637e43709 100644 --- a/packages/ui-utils/src/process-chat-response.ts +++ b/packages/ui-utils/src/process-chat-response.ts @@ -203,7 +203,7 @@ export async function processChatResponse({ onFilePart(value) { message.parts.push({ type: 'file', - mimeType: value.mimeType, + mediaType: value.mimeType, data: value.data, }); diff --git a/packages/ui-utils/src/types.ts b/packages/ui-utils/src/types.ts index bfc67a517fb8..0db7c1c65cf7 100644 --- a/packages/ui-utils/src/types.ts +++ b/packages/ui-utils/src/types.ts @@ -191,8 +191,18 @@ export type SourceUIPart = { */ export type FileUIPart = { type: 'file'; - mimeType: string; - data: string; // base64 encoded data + + /** + * IANA media type of the file. + * + * @see https://www.iana.org/assignments/media-types/media-types.xhtml + */ + mediaType: string; + + /** + * The base64 encoded data. + */ + data: string; }; /** From 968d17361eff19fbcd85c77f4d60b404a04677c0 Mon Sep 17 00:00:00 2001 From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> Date: Tue, 8 Apr 2025 15:13:03 +0100 Subject: [PATCH 0050/1307] docs: update transcription highlights (#5603) --- .../docs/03-ai-sdk-core/36-transcription.mdx | 6 ++--- .../ai-no-transcript-generated-error.mdx | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 content/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx diff --git a/content/docs/03-ai-sdk-core/36-transcription.mdx b/content/docs/03-ai-sdk-core/36-transcription.mdx index 24d3dca4f1d1..8cfcdd47087b 100644 --- a/content/docs/03-ai-sdk-core/36-transcription.mdx +++ b/content/docs/03-ai-sdk-core/36-transcription.mdx @@ -38,7 +38,7 @@ const durationInSeconds = transcript.durationInSeconds; // duration of the trans Transcription models often have provider or model-specific settings which you can set using the `providerOptions` parameter. -```ts highlight={"9"} +```ts highlight="8-12" import { experimental_transcribe as transcribe } from 'ai'; import { openai } from '@ai-sdk/openai'; import { readFile } from 'fs/promises'; @@ -60,7 +60,7 @@ const transcript = await transcribe({ type [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal) that you can use to abort the transcription process or set a timeout. -```ts highlight={"7"} +```ts highlight="8" import { openai } from '@ai-sdk/openai'; import { experimental_transcribe as transcribe } from 'ai'; import { readFile } from 'fs/promises'; @@ -77,7 +77,7 @@ const transcript = await transcribe({ `transcribe` accepts an optional `headers` parameter of type `Record` that you can use to add custom headers to the transcription request. -```ts highlight={"7"} +```ts highlight="8" import { openai } from '@ai-sdk/openai'; import { experimental_transcribe as transcribe } from 'ai'; import { readFile } from 'fs/promises'; diff --git a/content/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx b/content/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx new file mode 100644 index 000000000000..c9124974439c --- /dev/null +++ b/content/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx @@ -0,0 +1,25 @@ +--- +title: AI_NoTranscriptGeneratedError +description: Learn how to fix AI_NoTranscriptGeneratedError +--- + +# AI_NoTranscriptGeneratedError + +This error occurs when no transcript could be generated from the input. + +## Properties + +- `responses`: Array of responses +- `message`: The error message + +## Checking for this Error + +You can check if an error is an instance of `AI_NoTranscriptGeneratedError` using: + +```typescript +import { NoTranscriptGeneratedError } from 'ai'; + +if (NoTranscriptGeneratedError.isInstance(error)) { + // Handle the error +} +``` From e1cbf8a70e84a6450eec76f145f47f50440e1b37 Mon Sep 17 00:00:00 2001 From: Sam Denty Date: Tue, 8 Apr 2025 18:18:34 +0100 Subject: [PATCH 0051/1307] chore: extract rsc to separate package (#5542) --- .changeset/afraid-moles-cross.md | 6 + content/cookbook/20-rsc/20-stream-text.mdx | 6 +- .../21-stream-text-with-chat-prompt.mdx | 4 +- content/cookbook/20-rsc/40-stream-object.mdx | 4 +- .../20-rsc/60-save-messages-to-database.mdx | 6 +- .../61-restore-messages-from-database.mdx | 6 +- .../90-render-visual-interface-in-chat.mdx | 6 +- ...91-stream-updates-to-visual-interfaces.mdx | 6 +- .../92-stream-ui-record-token-usage.mdx | 6 +- content/docs/02-guides/21-llama-3_1.mdx | 2 +- content/docs/05-ai-sdk-rsc/01-overview.mdx | 4 +- .../02-streaming-react-components.mdx | 2 +- .../05-ai-sdk-rsc/03-generative-ui-state.mdx | 18 +- .../03-saving-and-restoring-states.mdx | 2 +- .../05-ai-sdk-rsc/04-multistep-interfaces.mdx | 10 +- .../05-ai-sdk-rsc/05-streaming-values.mdx | 10 +- .../docs/05-ai-sdk-rsc/06-loading-state.mdx | 12 +- .../docs/05-ai-sdk-rsc/08-error-handling.mdx | 4 +- .../docs/05-ai-sdk-rsc/09-authentication.mdx | 2 +- .../docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx | 18 +- .../06-advanced/05-multiple-streamables.mdx | 2 +- .../07-rendering-ui-with-language-models.mdx | 6 +- .../03-ai-sdk-rsc/01-stream-ui.mdx | 2 +- .../03-ai-sdk-rsc/02-create-ai.mdx | 2 +- .../03-ai-sdk-rsc/03-create-streamable-ui.mdx | 5 +- .../04-create-streamable-value.mdx | 2 +- .../05-read-streamable-value.mdx | 7 +- .../03-ai-sdk-rsc/06-get-ai-state.mdx | 2 +- .../03-ai-sdk-rsc/07-get-mutable-ai-state.mdx | 5 +- .../03-ai-sdk-rsc/08-use-ai-state.mdx | 2 +- .../03-ai-sdk-rsc/09-use-actions.mdx | 2 +- .../03-ai-sdk-rsc/10-use-ui-state.mdx | 2 +- .../03-ai-sdk-rsc/11-use-streamable-value.mdx | 5 +- .../07-reference/03-ai-sdk-rsc/20-render.mdx | 2 +- .../29-migration-guide-4-0.mdx | 4 +- .../39-migration-guide-3-1.mdx | 4 +- .../07-unclosed-streams.mdx | 2 +- .../60-jest-cannot-find-module-ai-rsc.mdx | 10 +- .../app/completion-rsc/generate-completion.ts | 2 +- .../next-openai/app/completion-rsc/page.tsx | 2 +- .../next-openai/app/stream-object/actions.ts | 2 +- .../next-openai/app/stream-object/page.tsx | 2 +- .../next-openai/app/stream-ui/actions.tsx | 2 +- examples/next-openai/app/stream-ui/ai.ts | 2 +- .../next-openai/app/stream-ui/message.tsx | 2 +- examples/next-openai/app/stream-ui/page.tsx | 4 +- examples/next-openai/package.json | 1 + packages/ai/core/prompt/index.ts | 2 + packages/ai/core/types/index.ts | 2 +- packages/ai/internal/index.ts | 8 + packages/ai/package.json | 29 +- packages/ai/rsc/package.json | 13 - packages/ai/tsconfig.json | 2 +- packages/ai/tsup.config.ts | 43 +-- packages/rsc/.eslintrc.js | 4 + packages/rsc/CHANGELOG.md | 0 packages/rsc/README.md | 3 + packages/rsc/package.json | 95 +++++ packages/{ai => rsc}/playwright.config.ts | 0 packages/{ai/rsc => rsc/src}/ai-state.test.ts | 0 packages/{ai/rsc => rsc/src}/ai-state.tsx | 4 +- packages/{ai/rsc => rsc/src}/index.ts | 0 packages/{ai/rsc => rsc/src}/provider.tsx | 0 packages/{ai/rsc => rsc/src}/rsc-client.ts | 0 packages/{ai/rsc => rsc/src}/rsc-server.ts | 0 packages/{ai/rsc => rsc/src}/rsc-shared.mts | 0 .../rsc => rsc/src}/shared-client/context.tsx | 2 +- .../rsc => rsc/src}/shared-client/index.ts | 0 .../__snapshots__/render.ui.test.tsx.snap | 0 .../__snapshots__/stream-ui.ui.test.tsx.snap | 0 .../{ai/rsc => rsc/src}/stream-ui/index.tsx | 0 .../rsc => rsc/src}/stream-ui/stream-ui.tsx | 32 +- .../src}/stream-ui/stream-ui.ui.test.tsx | 2 +- .../streamable-ui/create-streamable-ui.tsx | 4 +- .../create-streamable-ui.ui.test.tsx | 0 .../streamable-ui/create-suspended-chunk.tsx | 2 +- .../create-streamable-value.test.tsx | 0 .../create-streamable-value.ts | 4 +- .../streamable-value/is-streamable-value.ts | 0 .../read-streamable-value.tsx | 0 .../read-streamable-value.ui.test.tsx | 0 .../src}/streamable-value/streamable-value.ts | 0 .../streamable-value/use-streamable-value.tsx | 0 packages/{ai/rsc => rsc/src}/types.test-d.ts | 2 +- packages/{ai/rsc => rsc/src}/types.ts | 0 .../rsc/src/util/create-resolvable-promise.ts | 28 ++ .../src}/util/is-async-generator.ts | 0 packages/{ai => rsc/src}/util/is-function.ts | 0 packages/{ai => rsc/src}/util/is-generator.ts | 0 .../tests/e2e/next-server/CHANGELOG.md | 0 .../tests/e2e/next-server/app/layout.js | 0 .../tests/e2e/next-server/app/page.js | 0 .../tests/e2e/next-server/app/rsc/actions.jsx | 2 +- .../e2e/next-server/app/rsc/client-utils.js | 0 .../tests/e2e/next-server/app/rsc/client.js | 2 +- .../tests/e2e/next-server/app/rsc/page.js | 0 .../tests/e2e/next-server/package.json | 0 .../tests/e2e/spec/streamable.e2e.test.ts | 0 packages/rsc/tsconfig.json | 9 + packages/rsc/tsup.config.ts | 37 ++ packages/rsc/turbo.json | 12 + packages/rsc/vitest.edge.config.js | 18 + packages/rsc/vitest.node.config.js | 18 + .../{ai => rsc}/vitest.ui.react.config.js | 3 +- pnpm-lock.yaml | 326 +++++------------- tools/tsconfig/base.json | 1 + 106 files changed, 495 insertions(+), 433 deletions(-) create mode 100644 .changeset/afraid-moles-cross.md create mode 100644 packages/ai/internal/index.ts delete mode 100644 packages/ai/rsc/package.json create mode 100644 packages/rsc/.eslintrc.js create mode 100644 packages/rsc/CHANGELOG.md create mode 100644 packages/rsc/README.md create mode 100644 packages/rsc/package.json rename packages/{ai => rsc}/playwright.config.ts (100%) rename packages/{ai/rsc => rsc/src}/ai-state.test.ts (100%) rename packages/{ai/rsc => rsc/src}/ai-state.tsx (97%) rename packages/{ai/rsc => rsc/src}/index.ts (100%) rename packages/{ai/rsc => rsc/src}/provider.tsx (100%) rename packages/{ai/rsc => rsc/src}/rsc-client.ts (100%) rename packages/{ai/rsc => rsc/src}/rsc-server.ts (100%) rename packages/{ai/rsc => rsc/src}/rsc-shared.mts (100%) rename packages/{ai/rsc => rsc/src}/shared-client/context.tsx (99%) rename packages/{ai/rsc => rsc/src}/shared-client/index.ts (100%) rename packages/{ai/rsc => rsc/src}/stream-ui/__snapshots__/render.ui.test.tsx.snap (100%) rename packages/{ai/rsc => rsc/src}/stream-ui/__snapshots__/stream-ui.ui.test.tsx.snap (100%) rename packages/{ai/rsc => rsc/src}/stream-ui/index.tsx (100%) rename packages/{ai/rsc => rsc/src}/stream-ui/stream-ui.tsx (90%) rename packages/{ai/rsc => rsc/src}/stream-ui/stream-ui.ui.test.tsx (99%) rename packages/{ai/rsc => rsc/src}/streamable-ui/create-streamable-ui.tsx (96%) rename packages/{ai/rsc => rsc/src}/streamable-ui/create-streamable-ui.ui.test.tsx (100%) rename packages/{ai/rsc => rsc/src}/streamable-ui/create-suspended-chunk.tsx (96%) rename packages/{ai/rsc => rsc/src}/streamable-value/create-streamable-value.test.tsx (100%) rename packages/{ai/rsc => rsc/src}/streamable-value/create-streamable-value.ts (98%) rename packages/{ai/rsc => rsc/src}/streamable-value/is-streamable-value.ts (100%) rename packages/{ai/rsc => rsc/src}/streamable-value/read-streamable-value.tsx (100%) rename packages/{ai/rsc => rsc/src}/streamable-value/read-streamable-value.ui.test.tsx (100%) rename packages/{ai/rsc => rsc/src}/streamable-value/streamable-value.ts (100%) rename packages/{ai/rsc => rsc/src}/streamable-value/use-streamable-value.tsx (100%) rename packages/{ai/rsc => rsc/src}/types.test-d.ts (94%) rename packages/{ai/rsc => rsc/src}/types.ts (100%) create mode 100644 packages/rsc/src/util/create-resolvable-promise.ts rename packages/{ai => rsc/src}/util/is-async-generator.ts (100%) rename packages/{ai => rsc/src}/util/is-function.ts (100%) rename packages/{ai => rsc/src}/util/is-generator.ts (100%) rename packages/{ai => rsc}/tests/e2e/next-server/CHANGELOG.md (100%) rename packages/{ai => rsc}/tests/e2e/next-server/app/layout.js (100%) rename packages/{ai => rsc}/tests/e2e/next-server/app/page.js (100%) rename packages/{ai => rsc}/tests/e2e/next-server/app/rsc/actions.jsx (92%) rename packages/{ai => rsc}/tests/e2e/next-server/app/rsc/client-utils.js (100%) rename packages/{ai => rsc}/tests/e2e/next-server/app/rsc/client.js (95%) rename packages/{ai => rsc}/tests/e2e/next-server/app/rsc/page.js (100%) rename packages/{ai => rsc}/tests/e2e/next-server/package.json (100%) rename packages/{ai => rsc}/tests/e2e/spec/streamable.e2e.test.ts (100%) create mode 100644 packages/rsc/tsconfig.json create mode 100644 packages/rsc/tsup.config.ts create mode 100644 packages/rsc/turbo.json create mode 100644 packages/rsc/vitest.edge.config.js create mode 100644 packages/rsc/vitest.node.config.js rename packages/{ai => rsc}/vitest.ui.react.config.js (74%) diff --git a/.changeset/afraid-moles-cross.md b/.changeset/afraid-moles-cross.md new file mode 100644 index 000000000000..1fd84e7e2e93 --- /dev/null +++ b/.changeset/afraid-moles-cross.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/rsc': major +'ai': major +--- + +chore(@ai-sdk/rsc): extract to separate package diff --git a/content/cookbook/20-rsc/20-stream-text.mdx b/content/cookbook/20-rsc/20-stream-text.mdx index f72ca8dc8c96..c10b9fbf696b 100644 --- a/content/cookbook/20-rsc/20-stream-text.mdx +++ b/content/cookbook/20-rsc/20-stream-text.mdx @@ -20,14 +20,14 @@ Text generation can sometimes take a long time to complete, especially when you' ## Client -Let's create a simple React component that will call the `generate` function when a button is clicked. The `generate` function will call the `streamText` function, which will then generate text based on the input prompt. To consume the stream of text in the client, we will use the `readStreamableValue` function from the `ai/rsc` module. +Let's create a simple React component that will call the `generate` function when a button is clicked. The `generate` function will call the `streamText` function, which will then generate text based on the input prompt. To consume the stream of text in the client, we will use the `readStreamableValue` function from the `@ai-sdk/rsc` module. ```tsx filename="app/page.tsx" 'use client'; import { useState } from 'react'; import { generate } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -66,7 +66,7 @@ Using DevTools, we can see the text generation being streamed to the client in r import { streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export async function generate(input: string) { const stream = createStreamableValue(''); diff --git a/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx b/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx index 0f168eb718d1..bcacb5ee37ad 100644 --- a/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx +++ b/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx @@ -32,7 +32,7 @@ Let's create a simple conversation between a user and a model, and place a butto import { useState } from 'react'; import { Message, continueConversation } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -95,7 +95,7 @@ Now, let's implement the `continueConversation` function that will insert the us import { streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export interface Message { role: 'user' | 'assistant'; diff --git a/content/cookbook/20-rsc/40-stream-object.mdx b/content/cookbook/20-rsc/40-stream-object.mdx index ec70dd5a022a..9ee1c408637b 100644 --- a/content/cookbook/20-rsc/40-stream-object.mdx +++ b/content/cookbook/20-rsc/40-stream-object.mdx @@ -51,7 +51,7 @@ Let's create a simple React component that will call the `getNotifications` func import { useState } from 'react'; import { generate } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -92,7 +92,7 @@ Now let's implement the `getNotifications` function. We'll use the `generateObje import { streamObject } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; import { z } from 'zod'; export async function generate(input: string) { diff --git a/content/cookbook/20-rsc/60-save-messages-to-database.mdx b/content/cookbook/20-rsc/60-save-messages-to-database.mdx index 357e45c8a0ed..766b67651a0e 100644 --- a/content/cookbook/20-rsc/60-save-messages-to-database.mdx +++ b/content/cookbook/20-rsc/60-save-messages-to-database.mdx @@ -41,7 +41,7 @@ export default function RootLayout({ import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -100,7 +100,7 @@ We will use the callback function to listen to state changes and save the conver ```tsx filename='app/actions.tsx' 'use server'; -import { getAIState, getMutableAIState, streamUI } from 'ai/rsc'; +import { getAIState, getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -176,7 +176,7 @@ export async function continueConversation( ``` ```ts filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/cookbook/20-rsc/61-restore-messages-from-database.mdx b/content/cookbook/20-rsc/61-restore-messages-from-database.mdx index 962782f50a32..295e7273331a 100644 --- a/content/cookbook/20-rsc/61-restore-messages-from-database.mdx +++ b/content/cookbook/20-rsc/61-restore-messages-from-database.mdx @@ -39,7 +39,7 @@ export default function RootLayout({ import { useState, useEffect } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; export default function Home() { @@ -97,7 +97,7 @@ export default function Home() { The server-side implementation handles the restoration of messages and their transformation into the appropriate format for display. ```tsx filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; import { Stock } from '@ai-studio/components/stock'; import { generateId } from 'ai'; @@ -126,7 +126,7 @@ export const AI = createAI({ ```tsx filename='app/actions.tsx' 'use server'; -import { getAIState } from 'ai/rsc'; +import { getAIState } from '@ai-sdk/rsc'; export interface ServerMessage { role: 'user' | 'assistant' | 'function'; diff --git a/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx b/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx index cdfd36d6958f..8d2a3ae01fbe 100644 --- a/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx +++ b/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx @@ -17,7 +17,7 @@ When we define multiple functions in [`tools`](/docs/reference/ai-sdk-core/gener import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -112,7 +112,7 @@ export async function Flight({ flightNumber }) { ```tsx filename='app/actions.tsx' 'use server'; -import { getMutableAIState, streamUI } from 'ai/rsc'; +import { getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -206,7 +206,7 @@ export async function continueConversation( ``` ```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx b/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx index dc9f3d374bf8..bfc5c75d2f6a 100644 --- a/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx +++ b/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx @@ -15,7 +15,7 @@ In our previous example we've been streaming react components from the server to import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -72,7 +72,7 @@ export default function Home() { ```tsx filename='app/actions.tsx' 'use server'; -import { getMutableAIState, streamUI } from 'ai/rsc'; +import { getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -137,7 +137,7 @@ export async function continueConversation( ``` ```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx b/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx index 09481680764d..864e70b8a997 100644 --- a/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx +++ b/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx @@ -19,7 +19,7 @@ It is called when the stream is finished. import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -76,7 +76,7 @@ export default function Home() { ```tsx filename='app/actions.tsx' highlight={"57-63"} 'use server'; -import { createAI, getMutableAIState, streamUI } from 'ai/rsc'; +import { createAI, getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -148,7 +148,7 @@ export async function continueConversation( ``` ```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/docs/02-guides/21-llama-3_1.mdx b/content/docs/02-guides/21-llama-3_1.mdx index 95d59a2f8fd4..527cc202937d 100644 --- a/content/docs/02-guides/21-llama-3_1.mdx +++ b/content/docs/02-guides/21-llama-3_1.mdx @@ -254,7 +254,7 @@ First, create a Server Action. ```tsx filename="app/actions.tsx" 'use server'; -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; import { deepinfra } from '@ai-sdk/deepinfra'; import { z } from 'zod'; diff --git a/content/docs/05-ai-sdk-rsc/01-overview.mdx b/content/docs/05-ai-sdk-rsc/01-overview.mdx index 7eaaff560cb0..bbd221ff7553 100644 --- a/content/docs/05-ai-sdk-rsc/01-overview.mdx +++ b/content/docs/05-ai-sdk-rsc/01-overview.mdx @@ -12,8 +12,8 @@ description: An overview of AI SDK RSC.
- The `ai/rsc` package is compatible with frameworks that support React Server - Components. + The `@ai-sdk/rsc` package is compatible with frameworks that support React + Server Components. [React Server Components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) (RSC) allow you to write UI that can be rendered on the server and streamed to the client. RSCs enable [ Server Actions ](https://nextjs.org/docs/app/building-your-application/data-fetching/server-actions-and-mutations#with-client-components), a new way to call server-side code directly from the client just like any other function with end-to-end type-safety. This combination opens the door to a new way of building AI applications, allowing the large language model (LLM) to generate and stream UI directly from the server to the client. diff --git a/content/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx b/content/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx index b9000118db86..38b0042bfdc2 100644 --- a/content/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +++ b/content/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx @@ -112,7 +112,7 @@ Create a Server Action at `app/actions.tsx` and add the following code: ```tsx filename="app/actions.tsx" 'use server'; -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; diff --git a/content/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx b/content/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx index 258a4c58f043..766a6783d61f 100644 --- a/content/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +++ b/content/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx @@ -56,7 +56,7 @@ AI SDK RSC simplifies managing AI and UI state across your application by provid Notably, this means you do not have to pass the message history to the server explicitly for each request. You also can access and update your application state in any child component of the context provider. As you begin building [multistep generative interfaces](/docs/ai-sdk-rsc/multistep-interfaces), this will be particularly helpful. -To use `ai/rsc` to manage AI and UI State in your application, you can create a React context using [`createAI`](/docs/reference/ai-sdk-rsc/create-ai): +To use `@ai-sdk/rsc` to manage AI and UI State in your application, you can create a React context using [`createAI`](/docs/reference/ai-sdk-rsc/create-ai): ```tsx filename='app/actions.tsx' // Define the AI state and UI state types @@ -78,7 +78,7 @@ export const sendMessage = async (input: string): Promise => { ``` ```tsx filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ClientMessage, ServerMessage, sendMessage } from './actions'; export type AIState = ServerMessage[]; @@ -124,7 +124,7 @@ The UI state can be accessed in Client Components using the [`useUIState`](/docs ```tsx filename='app/page.tsx' 'use client'; -import { useUIState } from 'ai/rsc'; +import { useUIState } from '@ai-sdk/rsc'; export default function Page() { const [messages, setMessages] = useUIState(); @@ -146,7 +146,7 @@ The AI state can be accessed in Client Components using the [`useAIState`](/docs ```tsx filename='app/page.tsx' 'use client'; -import { useAIState } from 'ai/rsc'; +import { useAIState } from '@ai-sdk/rsc'; export default function Page() { const [messages, setMessages] = useAIState(); @@ -166,7 +166,7 @@ export default function Page() { The AI State can be accessed within any Server Action provided to the `createAI` context using the [`getAIState`](/docs/reference/ai-sdk-rsc/get-ai-state) function. It returns the current AI state as a read-only value: ```tsx filename='app/actions.ts' -import { getAIState } from 'ai/rsc'; +import { getAIState } from '@ai-sdk/rsc'; export async function sendMessage(message: string) { 'use server'; @@ -192,7 +192,7 @@ export async function sendMessage(message: string) { The AI State can also be updated from within your Server Action with the [`getMutableAIState`](/docs/reference/ai-sdk-rsc/get-mutable-ai-state) function. This function is similar to `getAIState`, but it returns the state with methods to read and update it: ```tsx filename='app/actions.ts' -import { getMutableAIState } from 'ai/rsc'; +import { getMutableAIState } from '@ai-sdk/rsc'; export async function sendMessage(message: string) { 'use server'; @@ -226,7 +226,7 @@ To call the `sendMessage` action from the client, you can use the [`useActions`] ```tsx filename='app/page.tsx' 'use client'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { AI } from './ai'; export default function Page() { @@ -272,8 +272,8 @@ When the user submits a message, the `sendMessage` action is called with the mes Action otherwise the streamed component will not show in the UI.
-To learn more, check out this [example](/examples/next-app/state-management/ai-ui-states) on managing AI and UI state using `ai/rsc`. +To learn more, check out this [example](/examples/next-app/state-management/ai-ui-states) on managing AI and UI state using `@ai-sdk/rsc`. --- -Next, you will learn how you can save and restore state with `ai/rsc`. +Next, you will learn how you can save and restore state with `@ai-sdk/rsc`. diff --git a/content/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx b/content/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx index 81f98ec5ec0a..8e9462ba128f 100644 --- a/content/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +++ b/content/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx @@ -102,4 +102,4 @@ To learn more, check out this [example](/examples/next-app/state-management/save --- -Next, you will learn how you can use `ai/rsc` functions like `useActions` and `useUIState` to create interactive, multistep interfaces. +Next, you will learn how you can use `@ai-sdk/rsc` functions like `useActions` and `useUIState` to create interactive, multistep interfaces. diff --git a/content/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx b/content/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx index 500a6d486b42..9828090cf8d3 100644 --- a/content/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +++ b/content/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx @@ -27,7 +27,7 @@ To build this kind of application you will leverage two concepts, **tool composi ## Overview -In order to build a multistep interface with `ai/rsc`, you will need a few things: +In order to build a multistep interface with `@ai-sdk/rsc`, you will need a few things: - A Server Action that calls and returns the result from the `streamUI` function - Tool(s) (sub-tasks necessary to complete your overall task) @@ -49,7 +49,7 @@ The turn-by-turn implementation is the simplest form of multistep interfaces. In In the following example, you specify two tools (`searchFlights` and `lookupFlight`) that the model can use to search for flights and lookup details for a specific flight. ```tsx filename="app/actions.tsx" -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; @@ -137,7 +137,7 @@ export async function submitUserMessage(input: string) { Next, create an AI context that will hold the UI State and AI State. ```ts filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { submitUserMessage } from './actions'; export const AI = createAI({ @@ -175,7 +175,7 @@ To call your Server Action, update your root page with the following: import { useState } from 'react'; import { AI } from './ai'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; export default function Page() { const [input, setInput] = useState(''); @@ -224,7 +224,7 @@ To add user interaction, you will have to convert the component into a client co ```tsx filename="components/flights.tsx" 'use client'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { ReactNode } from 'react'; interface FlightsProps { diff --git a/content/docs/05-ai-sdk-rsc/05-streaming-values.mdx b/content/docs/05-ai-sdk-rsc/05-streaming-values.mdx index 779dfeae07df..ebeb28c72c67 100644 --- a/content/docs/05-ai-sdk-rsc/05-streaming-values.mdx +++ b/content/docs/05-ai-sdk-rsc/05-streaming-values.mdx @@ -42,12 +42,12 @@ This is useful when you want to stream: ## Creating a Streamable Value -You can import `createStreamableValue` from `ai/rsc` and use it to create a streamable value. +You can import `createStreamableValue` from `@ai-sdk/rsc` and use it to create a streamable value. ```tsx file='app/actions.ts' 'use server'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export const runThread = async () => { const streamableStatus = createStreamableValue('thread.init'); @@ -70,7 +70,7 @@ export const runThread = async () => { You can read streamable values on the client using `readStreamableValue`. It returns an async iterator that yields the value of the streamable as it is updated: ```tsx file='app/page.tsx' -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; import { runThread } from '@/actions'; export default function Page() { @@ -103,7 +103,7 @@ Let's look at how you can use the `createStreamableUI` function with a Server Ac ```tsx filename='app/actions.tsx' 'use server'; -import { createStreamableUI } from 'ai/rsc'; +import { createStreamableUI } from '@ai-sdk/rsc'; export async function getWeather() { const weatherUI = createStreamableUI(); @@ -128,7 +128,7 @@ On the client side, you can call the `getWeather` Server Action and render the r 'use client'; import { useState } from 'react'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; import { getWeather } from '@/actions'; export default function Page() { diff --git a/content/docs/05-ai-sdk-rsc/06-loading-state.mdx b/content/docs/05-ai-sdk-rsc/06-loading-state.mdx index bd14cf1eb057..ca11831e056d 100644 --- a/content/docs/05-ai-sdk-rsc/06-loading-state.mdx +++ b/content/docs/05-ai-sdk-rsc/06-loading-state.mdx @@ -30,7 +30,7 @@ Let's create a simple Next.js page that will call the `generateResponse` functio import { useState } from 'react'; import { generateResponse } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Force the page to be dynamic and allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -84,7 +84,7 @@ Now let's implement the `generateResponse` function. Use the `streamText` functi import { streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export async function generateResponse(prompt: string) { const stream = createStreamableValue(); @@ -117,7 +117,7 @@ If you are looking to track loading state on a more granular level, you can crea import { streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export async function generateResponse(prompt: string) { const stream = createStreamableValue(); @@ -148,7 +148,7 @@ export async function generateResponse(prompt: string) { import { useState } from 'react'; import { generateResponse } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Force the page to be dynamic and allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -210,7 +210,7 @@ If you are using the [ `streamUI` ](/docs/reference/ai-sdk-rsc/stream-ui) functi 'use server'; import { openai } from '@ai-sdk/openai'; -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; export async function generateResponse(prompt: string) { const result = await streamUI({ @@ -238,7 +238,7 @@ export async function generateResponse(prompt: string) { import { useState } from 'react'; import { generateResponse } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Force the page to be dynamic and allow streaming responses up to 30 seconds export const maxDuration = 30; diff --git a/content/docs/05-ai-sdk-rsc/08-error-handling.mdx b/content/docs/05-ai-sdk-rsc/08-error-handling.mdx index d01d75ddfb1d..b797f65a1f67 100644 --- a/content/docs/05-ai-sdk-rsc/08-error-handling.mdx +++ b/content/docs/05-ai-sdk-rsc/08-error-handling.mdx @@ -20,7 +20,7 @@ To handle errors while generating UI, the [`streamableUI`](/docs/reference/ai-sd ```tsx filename='app/actions.tsx' 'use server'; -import { createStreamableUI } from 'ai/rsc'; +import { createStreamableUI } from '@ai-sdk/rsc'; export async function getStreamedUI() { const ui = createStreamableUI(); @@ -70,7 +70,7 @@ To handle other errors while streaming, you can return an error object that the ```tsx filename='app/actions.tsx' 'use server'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; import { fetchData, emptyData } from '../utils/data'; export const getStreamedData = async () => { diff --git a/content/docs/05-ai-sdk-rsc/09-authentication.mdx b/content/docs/05-ai-sdk-rsc/09-authentication.mdx index b4dd37fb2d31..bb6f1b221639 100644 --- a/content/docs/05-ai-sdk-rsc/09-authentication.mdx +++ b/content/docs/05-ai-sdk-rsc/09-authentication.mdx @@ -19,7 +19,7 @@ Server Actions are exposed as public, unprotected endpoints. As a result, you sh 'use server'; import { cookies } from 'next/headers'; -import { createStremableUI } from 'ai/rsc'; +import { createStremableUI } from '@ai-sdk/rsc'; import { validateToken } from '../utils/auth'; export const getWeather = async () => { diff --git a/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx b/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx index 34c28b957af0..2e6f05c57411 100644 --- a/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +++ b/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx @@ -37,7 +37,7 @@ The `streamUI` function executes as part of a server action as illustrated below ```tsx filename="@/app/actions.tsx" import { openai } from '@ai-sdk/openai'; -import { getMutableAIState, streamUI } from 'ai/rsc'; +import { getMutableAIState, streamUI } from '@ai-sdk/rsc'; export async function sendMessage(message: string) { 'use server'; @@ -70,7 +70,7 @@ The chat interface calls the server action. The response is then saved using the 'use client'; import { useState, ReactNode } from 'react'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; export default function Page() { const { sendMessage } = useActions(); @@ -175,7 +175,7 @@ The `streamUI` function uses `tools` as a way to execute functions based on user ```tsx filename="@/app/actions.tsx" import { z } from 'zod'; -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { getWeather } from '@/utils/queries'; import { Weather } from '@/components/weather'; @@ -314,7 +314,7 @@ With AI SDK RSC, components streamed to the client can trigger subsequent genera ```tsx filename="@/app/components/list-flights.tsx" 'use client'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; export function ListFlights({ flights }) { const { sendMessage } = useActions(); @@ -385,7 +385,7 @@ In AI SDK RSC, you can use the `initial` parameter of `streamUI` to define the c ```tsx filename="@/app/actions.tsx" import { openai } from '@ai-sdk/openai'; -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; const { value: stream } = await streamUI({ model: openai('gpt-4o'), @@ -457,7 +457,7 @@ Before implementing `streamUI` as a server action, you should create an `` #### Before: Save chats using callback function of context provider ```ts filename="@/app/actions.ts" -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { saveChat } from '@/utils/queries'; export const AI = createAI({ @@ -519,7 +519,7 @@ Similar to how you typically save chats in AI SDK RSC, you should use the `onGet #### Before: Load chat from database using callback function of context provider ```ts filename="@/app/actions.ts" -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { loadChatFromDB, convertToUIState } from '@/utils/queries'; export const AI = createAI({ @@ -605,7 +605,7 @@ The `createStreamableValue` function streams any serializable data from the serv ```ts filename="@/app/actions.ts" import { streamObject } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; import { notificationsSchema } from '@/utils/schemas'; export async function generateSampleNotifications() { @@ -638,7 +638,7 @@ export async function generateSampleNotifications() { 'use client'; import { useState } from 'react'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; import { generateSampleNotifications } from '@/app/actions'; export default function Page() { diff --git a/content/docs/06-advanced/05-multiple-streamables.mdx b/content/docs/06-advanced/05-multiple-streamables.mdx index e6d264da5b33..bcad57613680 100644 --- a/content/docs/06-advanced/05-multiple-streamables.mdx +++ b/content/docs/06-advanced/05-multiple-streamables.mdx @@ -12,7 +12,7 @@ The AI SDK RSC APIs allow you to compose and return any number of streamable UIs ```tsx file='app/actions.tsx' 'use server'; -import { createStreamableUI } from 'ai/rsc'; +import { createStreamableUI } from '@ai-sdk/rsc'; export async function getWeather() { const weatherUI = createStreamableUI(); diff --git a/content/docs/06-advanced/07-rendering-ui-with-language-models.mdx b/content/docs/06-advanced/07-rendering-ui-with-language-models.mdx index 7ba96ed5d282..64c624686c6a 100644 --- a/content/docs/06-advanced/07-rendering-ui-with-language-models.mdx +++ b/content/docs/06-advanced/07-rendering-ui-with-language-models.mdx @@ -141,12 +141,12 @@ For example, a tool that searches for courses can return a list of courses, whil ## Rendering User Interfaces on the Server -The **AI SDK RSC (`ai/rsc`)** takes advantage of RSCs to solve the problem of managing all your React components on the client side, allowing you to render React components on the server and stream them to the client. +The **AI SDK RSC (`@ai-sdk/rsc`)** takes advantage of RSCs to solve the problem of managing all your React components on the client side, allowing you to render React components on the server and stream them to the client. Rather than conditionally rendering user interfaces on the client based on the data returned by the language model, you can directly stream them from the server during a model generation. ```tsx highlight="3,22-31,38" filename="app/action.ts" -import { createStreamableUI } from 'ai/rsc' +import { createStreamableUI } from '@ai-sdk/rsc' const uiStream = createStreamableUI(); @@ -187,7 +187,7 @@ return { } ``` -The [`createStreamableUI`](/docs/reference/ai-sdk-rsc/create-streamable-ui) function belongs to the `ai/rsc` module and creates a stream that can send React components to the client. +The [`createStreamableUI`](/docs/reference/ai-sdk-rsc/create-streamable-ui) function belongs to the `@ai-sdk/rsc` module and creates a stream that can send React components to the client. On the server, you render the `` component with the props passed to it, and then stream it to the client. On the client side, you only need to render the UI that is streamed from the server. diff --git a/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx b/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx index 46b33733f4cf..257493378b58 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx @@ -17,7 +17,7 @@ To see `streamUI` in action, check out [these examples](#examples). ## Import - + ## Parameters diff --git a/content/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx b/content/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx index e19491bb966f..ad6508bd254d 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx @@ -15,7 +15,7 @@ Creates a client-server context provider that can be used to wrap parts of your ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx b/content/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx index 585f61b988d5..fe602d330320 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx @@ -15,7 +15,10 @@ Create a stream that sends UI from the server to the client. On the client side, ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx b/content/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx index d462fb7c8e66..e1e96c6aa029 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx @@ -16,7 +16,7 @@ Create a stream that sends values from the server to the client. The value can b ## Import diff --git a/content/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx b/content/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx index 48a32a1a6837..e89260cb3520 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx @@ -15,7 +15,10 @@ It is a function that helps you read the streamable value from the client that w ## Import - + ## Example @@ -33,7 +36,7 @@ async function generate() { ``` ```tsx filename="app/page.tsx" highlight="12" -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; export default function Page() { const [generation, setGeneration] = useState(''); diff --git a/content/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx b/content/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx index 6d4ea54787a8..373c7dfd9b36 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx @@ -15,7 +15,7 @@ Get the current AI state. ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx b/content/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx index 066aca3edeaf..b650e7775c96 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx @@ -15,7 +15,10 @@ Get a mutable copy of the AI state. You can use this to update the state in the ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx b/content/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx index 94bc68f70731..2d535b66822d 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx @@ -17,7 +17,7 @@ The AI state is intended to contain context and information shared with the AI m ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx b/content/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx index 93db17e184e4..911eab4030b3 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx @@ -17,7 +17,7 @@ It is required to access these server actions via this hook because they are pat ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx b/content/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx index 95c8f1f02938..d9b8535f1202 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx @@ -15,7 +15,7 @@ It is a hook that enables you to read and update the UI State. The state is clie ## Import - + ## API Signature diff --git a/content/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx b/content/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx index 6ef94f4c3a9b..352ef4d5534e 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx @@ -15,7 +15,10 @@ It is a React hook that takes a streamable value created using [`createStreamabl ## Import - + ## Example diff --git a/content/docs/07-reference/03-ai-sdk-rsc/20-render.mdx b/content/docs/07-reference/03-ai-sdk-rsc/20-render.mdx index 305fbeaec055..dd452bf2ad40 100644 --- a/content/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +++ b/content/docs/07-reference/03-ai-sdk-rsc/20-render.mdx @@ -19,7 +19,7 @@ A helper function to create a streamable UI from LLM providers. This function is ## Import - + ## API Signature diff --git a/content/docs/08-migration-guides/29-migration-guide-4-0.mdx b/content/docs/08-migration-guides/29-migration-guide-4-0.mdx index 1f856f4bb734..6218cf43dd14 100644 --- a/content/docs/08-migration-guides/29-migration-guide-4-0.mdx +++ b/content/docs/08-migration-guides/29-migration-guide-4-0.mdx @@ -731,11 +731,11 @@ The AI SDK RSC 3.0 `render` function has been removed. Please use the `streamUI` function instead or [switch to AI SDK UI](/docs/ai-sdk-rsc/migrating-to-ui). ```ts filename="AI SDK 3.0" -import { render } from 'ai/rsc'; +import { render } from '@ai-sdk/rsc'; ``` ```ts filename="AI SDK 4.0" -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; ``` ## AI SDK UI Changes diff --git a/content/docs/08-migration-guides/39-migration-guide-3-1.mdx b/content/docs/08-migration-guides/39-migration-guide-3-1.mdx index 4392c1a0014e..e2c2dac7b217 100644 --- a/content/docs/08-migration-guides/39-migration-guide-3-1.mdx +++ b/content/docs/08-migration-guides/39-migration-guide-3-1.mdx @@ -90,7 +90,7 @@ The AI SDK RSC API was launched as part of version 3.0. This API introduced the The following example Server Action uses the `render` function using the model provider directly from OpenAI. You first create an OpenAI provider instance with the OpenAI SDK. Then, you pass it to the provider key of the render function alongside a tool that returns a React Server Component, defined in the `render` key of the tool. ```tsx -import { render } from 'ai/rsc'; +import { render } from '@ai-sdk/rsc'; import OpenAI from 'openai'; import { z } from 'zod'; import { Spinner, Weather } from '@/components'; @@ -131,7 +131,7 @@ async function submitMessage(userInput = 'What is the weather in SF?') { With the new [`streamUI`](/docs/reference/ai-sdk-rsc/stream-ui) function, you can now use any compatible AI SDK provider. In this example, you import the AI SDK OpenAI provider. Then, you pass it to the [`model`](/docs/reference/ai-sdk-rsc/stream-ui#model) key of the new [`streamUI`](/docs/reference/ai-sdk-rsc/stream-ui) function. Finally, you declare a tool and return a React Server Component, defined in the [`generate`](/docs/reference/ai-sdk-rsc/stream-ui#tools-generate) key of the tool. ```tsx -import { streamUI } from 'ai/rsc'; +import { streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; import { Spinner, Weather } from '@/components'; diff --git a/content/docs/09-troubleshooting/07-unclosed-streams.mdx b/content/docs/09-troubleshooting/07-unclosed-streams.mdx index 5ea1e7b9d000..d85635f9633e 100644 --- a/content/docs/09-troubleshooting/07-unclosed-streams.mdx +++ b/content/docs/09-troubleshooting/07-unclosed-streams.mdx @@ -18,7 +18,7 @@ In order to fix this, you must ensure you close the stream by calling the [`.don This will ensure the stream is closed. ```tsx file='app/actions.tsx' -import { createStreamableUI } from 'ai/rsc'; +import { createStreamableUI } from '@ai-sdk/rsc'; const submitMessage = async () => { 'use server'; diff --git a/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx b/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx index 6626d31c1bf0..b5ca97c59b35 100644 --- a/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +++ b/content/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx @@ -1,15 +1,15 @@ --- -title: "Jest: cannot find module 'ai/rsc'" -description: "Troubleshooting AI SDK errors related to the Jest: cannot find module 'ai/rsc' error" +title: "Jest: cannot find module '@ai-sdk/rsc'" +description: "Troubleshooting AI SDK errors related to the Jest: cannot find module '@ai-sdk/rsc' error" --- -# Jest: cannot find module 'ai/rsc' +# Jest: cannot find module '@ai-sdk/rsc' ## Issue I am using AI SDK RSC and am writing tests for my RSC components with Jest. -I am getting the following error: `Cannot find module 'ai/rsc'`. +I am getting the following error: `Cannot find module '@ai-sdk/rsc'`. ## Solution @@ -17,6 +17,6 @@ Configure the module resolution via `jest config update` in `moduleNameMapper`: ```json filename="jest.config.js" "moduleNameMapper": { - "^ai/rsc$": "/node_modules/ai/rsc/dist" + "^@ai-sdk/rsc$": "/node_modules/@ai-sdk/rsc/dist" } ``` diff --git a/examples/next-openai/app/completion-rsc/generate-completion.ts b/examples/next-openai/app/completion-rsc/generate-completion.ts index ab751a3647b5..7f9dbea77c02 100644 --- a/examples/next-openai/app/completion-rsc/generate-completion.ts +++ b/examples/next-openai/app/completion-rsc/generate-completion.ts @@ -2,7 +2,7 @@ import { openai } from '@ai-sdk/openai'; import { streamText } from 'ai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export async function generateCompletion(prompt: string) { const result = streamText({ diff --git a/examples/next-openai/app/completion-rsc/page.tsx b/examples/next-openai/app/completion-rsc/page.tsx index 37de30d00711..7c96c2dbc150 100644 --- a/examples/next-openai/app/completion-rsc/page.tsx +++ b/examples/next-openai/app/completion-rsc/page.tsx @@ -1,6 +1,6 @@ 'use client'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; import { useState } from 'react'; import { generateCompletion } from './generate-completion'; diff --git a/examples/next-openai/app/stream-object/actions.ts b/examples/next-openai/app/stream-object/actions.ts index 3700cf9ce3c9..236c1e0dc654 100644 --- a/examples/next-openai/app/stream-object/actions.ts +++ b/examples/next-openai/app/stream-object/actions.ts @@ -2,7 +2,7 @@ import { openai } from '@ai-sdk/openai'; import { streamObject } from 'ai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; import { PartialNotification, notificationSchema } from './schema'; export async function generateNotifications(context: string) { diff --git a/examples/next-openai/app/stream-object/page.tsx b/examples/next-openai/app/stream-object/page.tsx index b85c1c167620..7700ef7cf41d 100644 --- a/examples/next-openai/app/stream-object/page.tsx +++ b/examples/next-openai/app/stream-object/page.tsx @@ -1,6 +1,6 @@ 'use client'; -import { StreamableValue, useStreamableValue } from 'ai/rsc'; +import { StreamableValue, useStreamableValue } from '@ai-sdk/rsc'; import { useState } from 'react'; import { generateNotifications } from './actions'; import { PartialNotification } from './schema'; diff --git a/examples/next-openai/app/stream-ui/actions.tsx b/examples/next-openai/app/stream-ui/actions.tsx index 24bbe6eb4b68..bace01b6b522 100644 --- a/examples/next-openai/app/stream-ui/actions.tsx +++ b/examples/next-openai/app/stream-ui/actions.tsx @@ -5,7 +5,7 @@ import { createStreamableValue, getMutableAIState as $getMutableAIState, streamUI, -} from 'ai/rsc'; +} from '@ai-sdk/rsc'; import { Message, BotMessage } from './message'; import { z } from 'zod'; diff --git a/examples/next-openai/app/stream-ui/ai.ts b/examples/next-openai/app/stream-ui/ai.ts index 1023c2f283d6..e187f5a691a9 100644 --- a/examples/next-openai/app/stream-ui/ai.ts +++ b/examples/next-openai/app/stream-ui/ai.ts @@ -1,4 +1,4 @@ -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { AIState, submitUserMessage, UIState } from './actions'; import { generateId } from 'ai'; diff --git a/examples/next-openai/app/stream-ui/message.tsx b/examples/next-openai/app/stream-ui/message.tsx index 4553a6b66753..68443650d26e 100644 --- a/examples/next-openai/app/stream-ui/message.tsx +++ b/examples/next-openai/app/stream-ui/message.tsx @@ -1,6 +1,6 @@ 'use client'; -import { StreamableValue, useStreamableValue } from 'ai/rsc'; +import { StreamableValue, useStreamableValue } from '@ai-sdk/rsc'; export function BotMessage({ textStream }: { textStream: StreamableValue }) { const [text] = useStreamableValue(textStream); diff --git a/examples/next-openai/app/stream-ui/page.tsx b/examples/next-openai/app/stream-ui/page.tsx index 9601842953b1..659c2ba98827 100644 --- a/examples/next-openai/app/stream-ui/page.tsx +++ b/examples/next-openai/app/stream-ui/page.tsx @@ -2,9 +2,9 @@ import { Fragment, useState } from 'react'; import type { AI } from './ai'; -import { useActions } from 'ai/rsc'; +import { useActions } from '@ai-sdk/rsc'; -import { useAIState, useUIState } from 'ai/rsc'; +import { useAIState, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; import { Message } from './message'; diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index f2384ceb1142..5feded50c027 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -18,6 +18,7 @@ "@ai-sdk/perplexity": "2.0.0-canary.2", "@ai-sdk/ui-utils": "2.0.0-canary.2", "@ai-sdk/react": "2.0.0-canary.2", + "@ai-sdk/rsc": "1.0.0-canary.1", "@vercel/blob": "^0.26.0", "ai": "5.0.0-canary.3", "next": "latest", diff --git a/packages/ai/core/prompt/index.ts b/packages/ai/core/prompt/index.ts index d3a4d1f2d8c2..760c76d258e7 100644 --- a/packages/ai/core/prompt/index.ts +++ b/packages/ai/core/prompt/index.ts @@ -26,3 +26,5 @@ export type { ToolContent, UserContent, } from './message'; +export type { Prompt } from './prompt'; +export type { CallSettings } from './call-settings'; diff --git a/packages/ai/core/types/index.ts b/packages/ai/core/types/index.ts index 78c7d580d7b3..1c77586ff9a3 100644 --- a/packages/ai/core/types/index.ts +++ b/packages/ai/core/types/index.ts @@ -19,5 +19,5 @@ export type { export type { LanguageModelRequestMetadata } from './language-model-request-metadata'; export type { LanguageModelResponseMetadata } from './language-model-response-metadata'; export type { Provider } from './provider'; -export type { ProviderMetadata } from './provider-metadata'; +export type { ProviderOptions, ProviderMetadata } from './provider-metadata'; export type { EmbeddingModelUsage, LanguageModelUsage } from './usage'; diff --git a/packages/ai/internal/index.ts b/packages/ai/internal/index.ts new file mode 100644 index 000000000000..82633c5c9d56 --- /dev/null +++ b/packages/ai/internal/index.ts @@ -0,0 +1,8 @@ +export { standardizePrompt } from '../core/prompt/standardize-prompt'; +export { prepareToolsAndToolChoice } from '../core/prompt/prepare-tools-and-tool-choice'; +export { prepareRetries } from '../core/prompt/prepare-retries'; +export { prepareCallSettings } from '../core/prompt/prepare-call-settings'; +export { convertToLanguageModelPrompt } from '../core/prompt/convert-to-language-model-prompt'; +export { calculateLanguageModelUsage } from '../core/types/usage'; + +export * from '../util/constants'; diff --git a/packages/ai/package.json b/packages/ai/package.json index 924f94cfe142..96a428635187 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -21,18 +21,14 @@ "lint": "eslint \"./**/*.ts*\"", "type-check": "tsc --noEmit", "prettier-check": "prettier --check \"./**/*.ts*\"", - "test": "pnpm test:node && pnpm test:edge && pnpm test:ui && pnpm test:e2e", - "test:e2e": "playwright test", + "test": "pnpm test:node && pnpm test:edge", "test:edge": "vitest --config vitest.edge.config.js --run", "test:edge:watch": "vitest --config vitest.edge.config.js", "test:node": "vitest --config vitest.node.config.js --run", "test:node:watch": "vitest --config vitest.node.config.js", "test:node:core": "pnpm vitest --config vitest.node.config.js --run ./core/", "test:node:core:watch": "pnpm vitest --config vitest.node.config.js ./core/", - "test:node:util": "pnpm vitest --config vitest.node.config.js --run ./util/", - "test:ui": "pnpm test:ui:react", - "test:ui:react": "vitest --config vitest.ui.react.config.js --run", - "test:ui:react:watch": "vitest --config vitest.ui.react.config.js" + "test:node:util": "pnpm vitest --config vitest.node.config.js --run ./util/" }, "exports": { "./package.json": "./package.json", @@ -41,17 +37,17 @@ "import": "./dist/index.mjs", "require": "./dist/index.js" }, + "./internal": { + "types": "./dist/internal/index.d.ts", + "import": "./dist/internal/index.mjs", + "require": "./dist/internal/index.js" + }, "./test": { "types": "./test/dist/index.d.ts", "import": "./test/dist/index.mjs", "module": "./test/dist/index.mjs", "require": "./test/dist/index.js" }, - "./rsc": { - "types": "./rsc/dist/index.d.ts", - "react-server": "./rsc/dist/rsc-server.mjs", - "import": "./rsc/dist/rsc-client.mjs" - }, "./mcp-stdio": { "types": "./mcp-stdio/dist/index.d.ts", "import": "./mcp-stdio/dist/index.mjs", @@ -68,27 +64,16 @@ "devDependencies": { "@edge-runtime/vm": "^5.0.0", "@types/node": "20.17.24", - "@types/react": "^18", - "@types/react-dom": "^18", "@vercel/ai-tsconfig": "workspace:*", - "@vitejs/plugin-react": "4.3.3", "eslint": "8.57.1", "eslint-config-vercel-ai": "workspace:*", - "react-dom": "^18", - "react-server-dom-webpack": "18.3.0-canary-eb33bd747-20240312", "tsup": "^7.2.0", "typescript": "5.8.3", "zod": "3.23.8" }, "peerDependencies": { - "react": "^18 || ^19 || ^19.0.0-rc", "zod": "^3.23.8" }, - "peerDependenciesMeta": { - "react": { - "optional": true - } - }, "engines": { "node": ">=18" }, diff --git a/packages/ai/rsc/package.json b/packages/ai/rsc/package.json deleted file mode 100644 index 95363586907a..000000000000 --- a/packages/ai/rsc/package.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "types": "./dist/index.d.ts", - "exports": { - "types": "./dist/index.d.ts", - "react-server": "./dist/rsc-server.mjs", - "import": "./dist/rsc-client.mjs" - }, - "private": true, - "peerDependencies": { - "react": ">=18", - "zod": ">=3" - } -} diff --git a/packages/ai/tsconfig.json b/packages/ai/tsconfig.json index 02fd510cb726..75f7310d2f41 100644 --- a/packages/ai/tsconfig.json +++ b/packages/ai/tsconfig.json @@ -1,5 +1,5 @@ { - "extends": "./node_modules/@vercel/ai-tsconfig/react-library.json", + "extends": "./node_modules/@vercel/ai-tsconfig/base.json", "compilerOptions": { "target": "ES2018", "stripInternal": true, diff --git a/packages/ai/tsup.config.ts b/packages/ai/tsup.config.ts index b2d726105e08..73f2ee479b56 100644 --- a/packages/ai/tsup.config.ts +++ b/packages/ai/tsup.config.ts @@ -9,47 +9,26 @@ export default defineConfig([ dts: true, sourcemap: true, }, - // Test utilities + // Internal APIs { - entry: ['test/index.ts'], - outDir: 'test/dist', + entry: ['internal/index.ts'], + // This bundle isn't actually used, + // we export the internal bundle with @internal from the root package + // and provide different types in package.json for the exports + // to save duplicating 40kb for bundle size + outDir: 'dist/internal', format: ['cjs', 'esm'], dts: true, sourcemap: true, }, - // RSC APIs - shared client - { - // Entry is `.mts` as the entrypoints that import it will be ESM so it needs exact imports that includes the `.mjs` extension. - entry: ['rsc/rsc-shared.mts'], - outDir: 'rsc/dist', - format: ['esm'], - external: ['react', 'zod'], - dts: true, - sourcemap: true, - }, - // RSC APIs - server, client + // Test utilities { - entry: ['rsc/rsc-server.ts', 'rsc/rsc-client.ts'], - outDir: 'rsc/dist', - format: ['esm'], - external: ['react', 'zod', /\/rsc-shared/], + entry: ['test/index.ts'], + outDir: 'test/dist', + format: ['cjs', 'esm'], dts: true, sourcemap: true, }, - // RSC APIs - types - { - entry: ['rsc/index.ts'], - outDir: 'rsc/dist', - dts: true, - outExtension() { - return { - // It must be `.d.ts` instead of `.d.mts` to support node resolution. - // See https://github.com/vercel/ai/issues/1028. - dts: '.d.ts', - js: '.mjs', - }; - }, - }, // MCP stdio { entry: ['mcp-stdio/index.ts'], diff --git a/packages/rsc/.eslintrc.js b/packages/rsc/.eslintrc.js new file mode 100644 index 000000000000..2ad0d0686d9c --- /dev/null +++ b/packages/rsc/.eslintrc.js @@ -0,0 +1,4 @@ +module.exports = { + root: true, + extends: ['vercel-ai'], +}; diff --git a/packages/rsc/CHANGELOG.md b/packages/rsc/CHANGELOG.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/rsc/README.md b/packages/rsc/README.md new file mode 100644 index 000000000000..1ce96cbfd0e4 --- /dev/null +++ b/packages/rsc/README.md @@ -0,0 +1,3 @@ +# AI SDK: React Server Components + +[React Server Components](https://react.dev/reference/rsc/server-components) for the [AI SDK](https://sdk.vercel.ai/docs): diff --git a/packages/rsc/package.json b/packages/rsc/package.json new file mode 100644 index 000000000000..75df8e7e513b --- /dev/null +++ b/packages/rsc/package.json @@ -0,0 +1,95 @@ +{ + "name": "@ai-sdk/rsc", + "version": "1.0.0-canary.1", + "license": "Apache-2.0", + "sideEffects": false, + "main": "./dist/rsc-client.mjs", + "module": "./dist/rsc-client.mjs", + "types": "./dist/index.d.ts", + "scripts": { + "build": "tsup", + "build:watch": "tsup --watch", + "clean": "rm -rf dist", + "lint": "eslint \"./**/*.ts*\"", + "type-check": "tsc --noEmit", + "prettier-check": "prettier --check \"./**/*.ts*\"", + "test:watch": "vitest --config vitest.config.js", + "test": "pnpm test:node && pnpm test:ui && pnpm test:e2e && pnpm test:edge", + "test:e2e": "playwright test", + "test:edge": "vitest --config vitest.edge.config.js --run", + "test:edge:watch": "vitest --config vitest.edge.config.js", + "test:node": "vitest --config vitest.node.config.js --run", + "test:node:watch": "vitest --config vitest.node.config.js", + "test:ui": "pnpm test:ui:react", + "test:ui:react": "vitest --config vitest.ui.react.config.js --run", + "test:ui:react:watch": "vitest --config vitest.ui.react.config.js" + }, + "exports": { + "./package.json": "./package.json", + ".": { + "types": "./dist/index.d.ts", + "react-server": "./dist/rsc-server.mjs", + "module": "./dist/rsc-client.mjs", + "import": "./dist/rsc-client.mjs" + } + }, + "files": [ + "dist/**/*", + "CHANGELOG.md" + ], + "dependencies": { + "ai": "5.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.1", + "@ai-sdk/provider-utils": "3.0.0-canary.2", + "jsondiffpatch": "0.6.0" + }, + "devDependencies": { + "@testing-library/jest-dom": "^6.6.3", + "@testing-library/user-event": "^14.5.2", + "@testing-library/react": "^16.0.1", + "@types/node": "20.17.24", + "@types/react": "^18", + "@types/react-dom": "^18", + "@vitejs/plugin-react": "4.3.3", + "react-dom": "^18", + "react": "^18", + "react-server-dom-webpack": "18.3.0-canary-eb33bd747-20240312", + "@vercel/ai-tsconfig": "workspace:*", + "eslint": "8.57.1", + "eslint-config-vercel-ai": "workspace:*", + "jsdom": "^24.0.0", + "msw": "2.6.4", + "tsup": "^7.2.0", + "typescript": "5.8.3", + "zod": "3.23.8" + }, + "peerDependencies": { + "react": "^18 || ^19 || ^19.0.0-rc", + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "zod": { + "optional": true + } + }, + "engines": { + "node": ">=18" + }, + "publishConfig": { + "access": "public" + }, + "homepage": "https://sdk.vercel.ai/docs", + "repository": { + "type": "git", + "url": "git+https://github.com/vercel/ai.git" + }, + "bugs": { + "url": "https://github.com/vercel/ai/issues" + }, + "keywords": [ + "ai", + "react", + "rsc", + "server-components" + ] +} diff --git a/packages/ai/playwright.config.ts b/packages/rsc/playwright.config.ts similarity index 100% rename from packages/ai/playwright.config.ts rename to packages/rsc/playwright.config.ts diff --git a/packages/ai/rsc/ai-state.test.ts b/packages/rsc/src/ai-state.test.ts similarity index 100% rename from packages/ai/rsc/ai-state.test.ts rename to packages/rsc/src/ai-state.test.ts diff --git a/packages/ai/rsc/ai-state.tsx b/packages/rsc/src/ai-state.tsx similarity index 97% rename from packages/ai/rsc/ai-state.tsx rename to packages/rsc/src/ai-state.tsx index 178e21ca5782..a4bdf56a601f 100644 --- a/packages/ai/rsc/ai-state.tsx +++ b/packages/rsc/src/ai-state.tsx @@ -1,7 +1,7 @@ import * as jsondiffpatch from 'jsondiffpatch'; import { AsyncLocalStorage } from 'node:async_hooks'; -import { createResolvablePromise } from '../util/create-resolvable-promise'; -import { isFunction } from '../util/is-function'; +import { createResolvablePromise } from './util/create-resolvable-promise'; +import { isFunction } from './util/is-function'; import type { AIProvider, InferAIState, diff --git a/packages/ai/rsc/index.ts b/packages/rsc/src/index.ts similarity index 100% rename from packages/ai/rsc/index.ts rename to packages/rsc/src/index.ts diff --git a/packages/ai/rsc/provider.tsx b/packages/rsc/src/provider.tsx similarity index 100% rename from packages/ai/rsc/provider.tsx rename to packages/rsc/src/provider.tsx diff --git a/packages/ai/rsc/rsc-client.ts b/packages/rsc/src/rsc-client.ts similarity index 100% rename from packages/ai/rsc/rsc-client.ts rename to packages/rsc/src/rsc-client.ts diff --git a/packages/ai/rsc/rsc-server.ts b/packages/rsc/src/rsc-server.ts similarity index 100% rename from packages/ai/rsc/rsc-server.ts rename to packages/rsc/src/rsc-server.ts diff --git a/packages/ai/rsc/rsc-shared.mts b/packages/rsc/src/rsc-shared.mts similarity index 100% rename from packages/ai/rsc/rsc-shared.mts rename to packages/rsc/src/rsc-shared.mts diff --git a/packages/ai/rsc/shared-client/context.tsx b/packages/rsc/src/shared-client/context.tsx similarity index 99% rename from packages/ai/rsc/shared-client/context.tsx rename to packages/rsc/src/shared-client/context.tsx index 64d279981e46..54664d3a5814 100644 --- a/packages/ai/rsc/shared-client/context.tsx +++ b/packages/rsc/src/shared-client/context.tsx @@ -4,7 +4,7 @@ import * as React from 'react'; import * as jsondiffpatch from 'jsondiffpatch'; -import { isFunction } from '../../util/is-function'; +import { isFunction } from '../util/is-function'; import type { AIProvider, InferActions, diff --git a/packages/ai/rsc/shared-client/index.ts b/packages/rsc/src/shared-client/index.ts similarity index 100% rename from packages/ai/rsc/shared-client/index.ts rename to packages/rsc/src/shared-client/index.ts diff --git a/packages/ai/rsc/stream-ui/__snapshots__/render.ui.test.tsx.snap b/packages/rsc/src/stream-ui/__snapshots__/render.ui.test.tsx.snap similarity index 100% rename from packages/ai/rsc/stream-ui/__snapshots__/render.ui.test.tsx.snap rename to packages/rsc/src/stream-ui/__snapshots__/render.ui.test.tsx.snap diff --git a/packages/ai/rsc/stream-ui/__snapshots__/stream-ui.ui.test.tsx.snap b/packages/rsc/src/stream-ui/__snapshots__/stream-ui.ui.test.tsx.snap similarity index 100% rename from packages/ai/rsc/stream-ui/__snapshots__/stream-ui.ui.test.tsx.snap rename to packages/rsc/src/stream-ui/__snapshots__/stream-ui.ui.test.tsx.snap diff --git a/packages/ai/rsc/stream-ui/index.tsx b/packages/rsc/src/stream-ui/index.tsx similarity index 100% rename from packages/ai/rsc/stream-ui/index.tsx rename to packages/rsc/src/stream-ui/index.tsx diff --git a/packages/ai/rsc/stream-ui/stream-ui.tsx b/packages/rsc/src/stream-ui/stream-ui.tsx similarity index 90% rename from packages/ai/rsc/stream-ui/stream-ui.tsx rename to packages/rsc/src/stream-ui/stream-ui.tsx index b9dca3368743..3d0956501665 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.tsx +++ b/packages/rsc/src/stream-ui/stream-ui.tsx @@ -2,29 +2,29 @@ import { LanguageModelV2 } from '@ai-sdk/provider'; import { safeParseJSON } from '@ai-sdk/provider-utils'; import { ReactNode } from 'react'; import { z } from 'zod'; -import { CallSettings } from '../../core/prompt/call-settings'; -import { convertToLanguageModelPrompt } from '../../core/prompt/convert-to-language-model-prompt'; -import { prepareCallSettings } from '../../core/prompt/prepare-call-settings'; -import { prepareRetries } from '../../core/prompt/prepare-retries'; -import { prepareToolsAndToolChoice } from '../../core/prompt/prepare-tools-and-tool-choice'; -import { Prompt } from '../../core/prompt/prompt'; -import { standardizePrompt } from '../../core/prompt/standardize-prompt'; import { CallWarning, FinishReason, ProviderMetadata, + ProviderOptions, + LanguageModelUsage, ToolChoice, -} from '../../core/types'; -import { ProviderOptions } from '../../core/types/provider-metadata'; + Prompt, + CallSettings, + InvalidToolArgumentsError, + NoSuchToolError, +} from 'ai'; import { - LanguageModelUsage, calculateLanguageModelUsage, -} from '../../core/types/usage'; -import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; -import { NoSuchToolError } from '../../errors/no-such-tool-error'; -import { createResolvablePromise } from '../../util/create-resolvable-promise'; -import { isAsyncGenerator } from '../../util/is-async-generator'; -import { isGenerator } from '../../util/is-generator'; + standardizePrompt, + prepareToolsAndToolChoice, + prepareRetries, + prepareCallSettings, + convertToLanguageModelPrompt, +} from 'ai/internal'; +import { createResolvablePromise } from '../util/create-resolvable-promise'; +import { isAsyncGenerator } from '../util/is-async-generator'; +import { isGenerator } from '../util/is-generator'; import { createStreamableUI } from '../streamable-ui/create-streamable-ui'; type Streamable = ReactNode | Promise; diff --git a/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx b/packages/rsc/src/stream-ui/stream-ui.ui.test.tsx similarity index 99% rename from packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx rename to packages/rsc/src/stream-ui/stream-ui.ui.test.tsx index be65786d7f3f..863d4933304d 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.ui.test.tsx +++ b/packages/rsc/src/stream-ui/stream-ui.ui.test.tsx @@ -1,6 +1,6 @@ import { delay } from '@ai-sdk/provider-utils'; import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; -import { MockLanguageModelV2 } from '../../core/test/mock-language-model-v1'; +import { MockLanguageModelV2 } from 'ai/test'; import { streamUI } from './stream-ui'; import { z } from 'zod'; diff --git a/packages/ai/rsc/streamable-ui/create-streamable-ui.tsx b/packages/rsc/src/streamable-ui/create-streamable-ui.tsx similarity index 96% rename from packages/ai/rsc/streamable-ui/create-streamable-ui.tsx rename to packages/rsc/src/streamable-ui/create-streamable-ui.tsx index ba286adc32e4..4cc895929d20 100644 --- a/packages/ai/rsc/streamable-ui/create-streamable-ui.tsx +++ b/packages/rsc/src/streamable-ui/create-streamable-ui.tsx @@ -1,5 +1,5 @@ -import { HANGING_STREAM_WARNING_TIME_MS } from '../../util/constants'; -import { createResolvablePromise } from '../../util/create-resolvable-promise'; +import { HANGING_STREAM_WARNING_TIME_MS } from 'ai/internal'; +import { createResolvablePromise } from '../util/create-resolvable-promise'; import { createSuspendedChunk } from './create-suspended-chunk'; // It's necessary to define the type manually here, otherwise TypeScript compiler diff --git a/packages/ai/rsc/streamable-ui/create-streamable-ui.ui.test.tsx b/packages/rsc/src/streamable-ui/create-streamable-ui.ui.test.tsx similarity index 100% rename from packages/ai/rsc/streamable-ui/create-streamable-ui.ui.test.tsx rename to packages/rsc/src/streamable-ui/create-streamable-ui.ui.test.tsx diff --git a/packages/ai/rsc/streamable-ui/create-suspended-chunk.tsx b/packages/rsc/src/streamable-ui/create-suspended-chunk.tsx similarity index 96% rename from packages/ai/rsc/streamable-ui/create-suspended-chunk.tsx rename to packages/rsc/src/streamable-ui/create-suspended-chunk.tsx index 8684f61f7c24..a10769e40f2f 100644 --- a/packages/ai/rsc/streamable-ui/create-suspended-chunk.tsx +++ b/packages/rsc/src/streamable-ui/create-suspended-chunk.tsx @@ -1,5 +1,5 @@ import React, { Suspense } from 'react'; -import { createResolvablePromise } from '../../util/create-resolvable-promise'; +import { createResolvablePromise } from '../util/create-resolvable-promise'; // Recursive type for the chunk. type ChunkType = diff --git a/packages/ai/rsc/streamable-value/create-streamable-value.test.tsx b/packages/rsc/src/streamable-value/create-streamable-value.test.tsx similarity index 100% rename from packages/ai/rsc/streamable-value/create-streamable-value.test.tsx rename to packages/rsc/src/streamable-value/create-streamable-value.test.tsx diff --git a/packages/ai/rsc/streamable-value/create-streamable-value.ts b/packages/rsc/src/streamable-value/create-streamable-value.ts similarity index 98% rename from packages/ai/rsc/streamable-value/create-streamable-value.ts rename to packages/rsc/src/streamable-value/create-streamable-value.ts index 97108f38cf07..9edd018349e4 100644 --- a/packages/ai/rsc/streamable-value/create-streamable-value.ts +++ b/packages/rsc/src/streamable-value/create-streamable-value.ts @@ -1,5 +1,5 @@ -import { HANGING_STREAM_WARNING_TIME_MS } from '../../util/constants'; -import { createResolvablePromise } from '../../util/create-resolvable-promise'; +import { HANGING_STREAM_WARNING_TIME_MS } from 'ai/internal'; +import { createResolvablePromise } from '../util/create-resolvable-promise'; import { STREAMABLE_VALUE_TYPE, StreamablePatch, diff --git a/packages/ai/rsc/streamable-value/is-streamable-value.ts b/packages/rsc/src/streamable-value/is-streamable-value.ts similarity index 100% rename from packages/ai/rsc/streamable-value/is-streamable-value.ts rename to packages/rsc/src/streamable-value/is-streamable-value.ts diff --git a/packages/ai/rsc/streamable-value/read-streamable-value.tsx b/packages/rsc/src/streamable-value/read-streamable-value.tsx similarity index 100% rename from packages/ai/rsc/streamable-value/read-streamable-value.tsx rename to packages/rsc/src/streamable-value/read-streamable-value.tsx diff --git a/packages/ai/rsc/streamable-value/read-streamable-value.ui.test.tsx b/packages/rsc/src/streamable-value/read-streamable-value.ui.test.tsx similarity index 100% rename from packages/ai/rsc/streamable-value/read-streamable-value.ui.test.tsx rename to packages/rsc/src/streamable-value/read-streamable-value.ui.test.tsx diff --git a/packages/ai/rsc/streamable-value/streamable-value.ts b/packages/rsc/src/streamable-value/streamable-value.ts similarity index 100% rename from packages/ai/rsc/streamable-value/streamable-value.ts rename to packages/rsc/src/streamable-value/streamable-value.ts diff --git a/packages/ai/rsc/streamable-value/use-streamable-value.tsx b/packages/rsc/src/streamable-value/use-streamable-value.tsx similarity index 100% rename from packages/ai/rsc/streamable-value/use-streamable-value.tsx rename to packages/rsc/src/streamable-value/use-streamable-value.tsx diff --git a/packages/ai/rsc/types.test-d.ts b/packages/rsc/src/types.test-d.ts similarity index 94% rename from packages/ai/rsc/types.test-d.ts rename to packages/rsc/src/types.test-d.ts index d344efa07ff2..e8d370adcc2e 100644 --- a/packages/ai/rsc/types.test-d.ts +++ b/packages/rsc/src/types.test-d.ts @@ -1,6 +1,6 @@ import { expectTypeOf } from 'vitest'; -import type { StreamableValue } from './dist'; +import type { StreamableValue } from '../dist'; describe('StreamableValue type', () => { it('should not contain types marked with @internal after compilation', () => { diff --git a/packages/ai/rsc/types.ts b/packages/rsc/src/types.ts similarity index 100% rename from packages/ai/rsc/types.ts rename to packages/rsc/src/types.ts diff --git a/packages/rsc/src/util/create-resolvable-promise.ts b/packages/rsc/src/util/create-resolvable-promise.ts new file mode 100644 index 000000000000..80ebc1753c94 --- /dev/null +++ b/packages/rsc/src/util/create-resolvable-promise.ts @@ -0,0 +1,28 @@ +/** + * Creates a Promise with externally accessible resolve and reject functions. + * + * @template T - The type of the value that the Promise will resolve to. + * @returns An object containing: + * - promise: A Promise that can be resolved or rejected externally. + * - resolve: A function to resolve the Promise with a value of type T. + * - reject: A function to reject the Promise with an error. + */ +export function createResolvablePromise(): { + promise: Promise; + resolve: (value: T) => void; + reject: (error: unknown) => void; +} { + let resolve: (value: T) => void; + let reject: (error: unknown) => void; + + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + + return { + promise, + resolve: resolve!, + reject: reject!, + }; +} diff --git a/packages/ai/util/is-async-generator.ts b/packages/rsc/src/util/is-async-generator.ts similarity index 100% rename from packages/ai/util/is-async-generator.ts rename to packages/rsc/src/util/is-async-generator.ts diff --git a/packages/ai/util/is-function.ts b/packages/rsc/src/util/is-function.ts similarity index 100% rename from packages/ai/util/is-function.ts rename to packages/rsc/src/util/is-function.ts diff --git a/packages/ai/util/is-generator.ts b/packages/rsc/src/util/is-generator.ts similarity index 100% rename from packages/ai/util/is-generator.ts rename to packages/rsc/src/util/is-generator.ts diff --git a/packages/ai/tests/e2e/next-server/CHANGELOG.md b/packages/rsc/tests/e2e/next-server/CHANGELOG.md similarity index 100% rename from packages/ai/tests/e2e/next-server/CHANGELOG.md rename to packages/rsc/tests/e2e/next-server/CHANGELOG.md diff --git a/packages/ai/tests/e2e/next-server/app/layout.js b/packages/rsc/tests/e2e/next-server/app/layout.js similarity index 100% rename from packages/ai/tests/e2e/next-server/app/layout.js rename to packages/rsc/tests/e2e/next-server/app/layout.js diff --git a/packages/ai/tests/e2e/next-server/app/page.js b/packages/rsc/tests/e2e/next-server/app/page.js similarity index 100% rename from packages/ai/tests/e2e/next-server/app/page.js rename to packages/rsc/tests/e2e/next-server/app/page.js diff --git a/packages/ai/tests/e2e/next-server/app/rsc/actions.jsx b/packages/rsc/tests/e2e/next-server/app/rsc/actions.jsx similarity index 92% rename from packages/ai/tests/e2e/next-server/app/rsc/actions.jsx rename to packages/rsc/tests/e2e/next-server/app/rsc/actions.jsx index 64afa6a9fead..db570bdd209f 100644 --- a/packages/ai/tests/e2e/next-server/app/rsc/actions.jsx +++ b/packages/rsc/tests/e2e/next-server/app/rsc/actions.jsx @@ -1,6 +1,6 @@ 'use server'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; +import { createStreamableUI, createStreamableValue } from '@ai-sdk/rsc'; import { ClientInfo } from './client-utils'; function sleep(ms = 0) { diff --git a/packages/ai/tests/e2e/next-server/app/rsc/client-utils.js b/packages/rsc/tests/e2e/next-server/app/rsc/client-utils.js similarity index 100% rename from packages/ai/tests/e2e/next-server/app/rsc/client-utils.js rename to packages/rsc/tests/e2e/next-server/app/rsc/client-utils.js diff --git a/packages/ai/tests/e2e/next-server/app/rsc/client.js b/packages/rsc/tests/e2e/next-server/app/rsc/client.js similarity index 95% rename from packages/ai/tests/e2e/next-server/app/rsc/client.js rename to packages/rsc/tests/e2e/next-server/app/rsc/client.js index 50ee26a1271e..87766afa28f3 100644 --- a/packages/ai/tests/e2e/next-server/app/rsc/client.js +++ b/packages/rsc/tests/e2e/next-server/app/rsc/client.js @@ -1,7 +1,7 @@ 'use client'; import { useState } from 'react'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; export function Client({ actions }) { const [log, setLog] = useState(''); diff --git a/packages/ai/tests/e2e/next-server/app/rsc/page.js b/packages/rsc/tests/e2e/next-server/app/rsc/page.js similarity index 100% rename from packages/ai/tests/e2e/next-server/app/rsc/page.js rename to packages/rsc/tests/e2e/next-server/app/rsc/page.js diff --git a/packages/ai/tests/e2e/next-server/package.json b/packages/rsc/tests/e2e/next-server/package.json similarity index 100% rename from packages/ai/tests/e2e/next-server/package.json rename to packages/rsc/tests/e2e/next-server/package.json diff --git a/packages/ai/tests/e2e/spec/streamable.e2e.test.ts b/packages/rsc/tests/e2e/spec/streamable.e2e.test.ts similarity index 100% rename from packages/ai/tests/e2e/spec/streamable.e2e.test.ts rename to packages/rsc/tests/e2e/spec/streamable.e2e.test.ts diff --git a/packages/rsc/tsconfig.json b/packages/rsc/tsconfig.json new file mode 100644 index 000000000000..73b8d8e639ec --- /dev/null +++ b/packages/rsc/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "./node_modules/@vercel/ai-tsconfig/react-library.json", + "compilerOptions": { + "target": "ES2018", + "stripInternal": true + }, + "include": ["."], + "exclude": ["*/dist", "dist", "build", "node_modules"] +} diff --git a/packages/rsc/tsup.config.ts b/packages/rsc/tsup.config.ts new file mode 100644 index 000000000000..36b7f95f0435 --- /dev/null +++ b/packages/rsc/tsup.config.ts @@ -0,0 +1,37 @@ +import { defineConfig } from 'tsup'; + +export default defineConfig([ + // RSC APIs - shared client + { + // Entry is `.mts` as the entrypoints that import it will be ESM so it needs exact imports that includes the `.mjs` extension. + entry: ['src/rsc-shared.mts'], + outDir: 'dist', + format: ['esm'], + external: ['react', 'zod'], + dts: true, + sourcemap: true, + }, + // RSC APIs - server, client + { + entry: ['src/rsc-server.ts', 'src/rsc-client.ts'], + outDir: 'dist', + format: ['esm'], + external: ['react', 'zod', /\/rsc-shared/], + dts: true, + sourcemap: true, + }, + // RSC APIs - types + { + entry: ['src/index.ts'], + outDir: 'dist', + dts: true, + outExtension() { + return { + // It must be `.d.ts` instead of `.d.mts` to support node resolution. + // See https://github.com/vercel/ai/issues/1028. + dts: '.d.ts', + js: '.mjs', + }; + }, + }, +]); diff --git a/packages/rsc/turbo.json b/packages/rsc/turbo.json new file mode 100644 index 000000000000..620b8380e744 --- /dev/null +++ b/packages/rsc/turbo.json @@ -0,0 +1,12 @@ +{ + "extends": [ + "//" + ], + "tasks": { + "build": { + "outputs": [ + "**/dist/**" + ] + } + } +} diff --git a/packages/rsc/vitest.edge.config.js b/packages/rsc/vitest.edge.config.js new file mode 100644 index 000000000000..6ae6d084183e --- /dev/null +++ b/packages/rsc/vitest.edge.config.js @@ -0,0 +1,18 @@ +import { defineConfig } from 'vite'; + +// https://vitejs.dev/config/ +export default defineConfig({ + test: { + environment: 'edge-runtime', + globals: true, + include: ['**/*.test.ts{,x}'], + exclude: [ + '**/*.ui.test.ts{,x}', + '**/*.e2e.test.ts{,x}', + '**/node_modules/**', + ], + typecheck: { + enabled: true, + }, + }, +}); diff --git a/packages/rsc/vitest.node.config.js b/packages/rsc/vitest.node.config.js new file mode 100644 index 000000000000..ab37155d56dc --- /dev/null +++ b/packages/rsc/vitest.node.config.js @@ -0,0 +1,18 @@ +import { defineConfig } from 'vite'; + +// https://vitejs.dev/config/ +export default defineConfig({ + test: { + environment: 'node', + globals: true, + include: ['**/*.test.ts{,x}'], + exclude: [ + '**/*.ui.test.ts{,x}', + '**/*.e2e.test.ts{,x}', + '**/node_modules/**', + ], + typecheck: { + enabled: true, + }, + }, +}); diff --git a/packages/ai/vitest.ui.react.config.js b/packages/rsc/vitest.ui.react.config.js similarity index 74% rename from packages/ai/vitest.ui.react.config.js rename to packages/rsc/vitest.ui.react.config.js index d5b58155aa94..2c08b5adb1fb 100644 --- a/packages/ai/vitest.ui.react.config.js +++ b/packages/rsc/vitest.ui.react.config.js @@ -7,7 +7,6 @@ export default defineConfig({ test: { environment: 'jsdom', globals: true, - include: ['rsc/**/*.ui.test.ts{,x}'], - exclude: ['**/node_modules/**'], + include: ['src/**/*.ui.test.ts', 'src/**/*.ui.test.tsx'], }, }); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ac0d96d5e0ae..bdc0ede86f24 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -557,6 +557,9 @@ importers: '@ai-sdk/react': specifier: 2.0.0-canary.2 version: link:../../packages/react + '@ai-sdk/rsc': + specifier: 1.0.0-canary.1 + version: link:../../packages/rsc '@ai-sdk/ui-utils': specifier: 2.0.0-canary.2 version: link:../../packages/ui-utils @@ -1105,9 +1108,6 @@ importers: jsondiffpatch: specifier: 0.6.0 version: 0.6.0 - react: - specifier: ^18 || ^19 || ^19.0.0-rc - version: 18.3.1 devDependencies: '@edge-runtime/vm': specifier: ^5.0.0 @@ -1115,30 +1115,15 @@ importers: '@types/node': specifier: 20.17.24 version: 20.17.24 - '@types/react': - specifier: ^18 - version: 18.3.3 - '@types/react-dom': - specifier: ^18 - version: 18.3.0 '@vercel/ai-tsconfig': specifier: workspace:* version: link:../../tools/tsconfig - '@vitejs/plugin-react': - specifier: 4.3.3 - version: 4.3.3(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) eslint: specifier: 8.57.1 version: 8.57.1 eslint-config-vercel-ai: specifier: workspace:* version: link:../../tools/eslint-config - react-dom: - specifier: ^18 - version: 18.3.1(react@18.3.1) - react-server-dom-webpack: - specifier: 18.3.0-canary-eb33bd747-20240312 - version: 18.3.0-canary-eb33bd747-20240312(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(webpack@5.96.1(esbuild@0.18.20)) tsup: specifier: ^7.2.0 version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) @@ -1149,21 +1134,6 @@ importers: specifier: 3.23.8 version: 3.23.8 - packages/ai/tests/e2e/next-server: - dependencies: - ai: - specifier: workspace:* - version: link:../../.. - next: - specifier: canary - version: 15.3.0-canary.10(@opentelemetry/api@1.9.0)(@playwright/test@1.50.1)(react-dom@19.0.0-rc.1(react@19.0.0-rc.1))(react@19.0.0-rc.1) - react: - specifier: rc - version: 19.0.0-rc.1 - react-dom: - specifier: rc - version: 19.0.0-rc.1(react@19.0.0-rc.1) - packages/amazon-bedrock: dependencies: '@ai-sdk/provider': @@ -1810,6 +1780,76 @@ importers: specifier: 3.23.8 version: 3.23.8 + packages/rsc: + dependencies: + '@ai-sdk/provider': + specifier: 2.0.0-canary.1 + version: link:../provider + '@ai-sdk/provider-utils': + specifier: 3.0.0-canary.2 + version: link:../provider-utils + ai: + specifier: 5.0.0-canary.3 + version: link:../ai + jsondiffpatch: + specifier: 0.6.0 + version: 0.6.0 + devDependencies: + '@testing-library/jest-dom': + specifier: ^6.6.3 + version: 6.6.3 + '@testing-library/react': + specifier: ^16.0.1 + version: 16.0.1(@testing-library/dom@10.4.0)(@types/react-dom@18.3.0)(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@testing-library/user-event': + specifier: ^14.5.2 + version: 14.5.2(@testing-library/dom@10.4.0) + '@types/node': + specifier: 20.17.24 + version: 20.17.24 + '@types/react': + specifier: ^18 + version: 18.3.3 + '@types/react-dom': + specifier: ^18 + version: 18.3.0 + '@vercel/ai-tsconfig': + specifier: workspace:* + version: link:../../tools/tsconfig + '@vitejs/plugin-react': + specifier: 4.3.3 + version: 4.3.3(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) + eslint: + specifier: 8.57.1 + version: 8.57.1 + eslint-config-vercel-ai: + specifier: workspace:* + version: link:../../tools/eslint-config + jsdom: + specifier: ^24.0.0 + version: 24.0.0 + msw: + specifier: 2.6.4 + version: 2.6.4(@types/node@20.17.24)(typescript@5.8.3) + react: + specifier: ^18 + version: 18.3.1 + react-dom: + specifier: ^18 + version: 18.3.1(react@18.3.1) + react-server-dom-webpack: + specifier: 18.3.0-canary-eb33bd747-20240312 + version: 18.3.0-canary-eb33bd747-20240312(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(webpack@5.96.1) + tsup: + specifier: ^7.2.0 + version: 7.2.0(postcss@8.5.3)(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.8.3))(typescript@5.8.3) + typescript: + specifier: 5.8.3 + version: 5.8.3 + zod: + specifier: 3.23.8 + version: 3.23.8 + packages/svelte: dependencies: '@ai-sdk/provider-utils': @@ -4816,9 +4856,6 @@ packages: '@next/env@15.2.2': resolution: {integrity: sha512-yWgopCfA9XDR8ZH3taB5nRKtKJ1Q5fYsTOuYkzIIoS8TJ0UAUKAGF73JnGszbjk2ufAQDj6mDdgsJAFx5CLtYQ==} - '@next/env@15.3.0-canary.10': - resolution: {integrity: sha512-W/JkRHQyELbL9iwqE0hNPmkewolbyumHQdRlVxIOvRI8lVfCICqAvl27MhQ4UweKi5wHMov+oPhNGgAAhekGxA==} - '@next/eslint-plugin-next@14.2.3': resolution: {integrity: sha512-L3oDricIIjgj1AVnRdRor21gI7mShlSwU/1ZGHmqM3LzHhXXhdkrfeNY5zif25Bi5Dd7fiJHsbhoZCHfXYvlAw==} @@ -4834,12 +4871,6 @@ packages: cpu: [arm64] os: [darwin] - '@next/swc-darwin-arm64@15.3.0-canary.10': - resolution: {integrity: sha512-1ZXY8o08g/PwV07G+vTPyxQYk8gLH5tdZmdC/bpgLeidqx8YG+2F8tSRqUpjYeblOAl46v/0mhLByLWjmFGFKw==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [darwin] - '@next/swc-darwin-x64@15.0.0-canary.23': resolution: {integrity: sha512-TRYY7ka1QRJlYer//aDz1dsLOLb+M7ODgYHA1DDtLLrovL3dbrD70mjWDiWdiP91EMpukGatNJaVpFCaAi0yuA==} engines: {node: '>= 10'} @@ -4852,12 +4883,6 @@ packages: cpu: [x64] os: [darwin] - '@next/swc-darwin-x64@15.3.0-canary.10': - resolution: {integrity: sha512-nlI1CiO4rbc+D7cjX9hCBTzdeFTffQpr9zAgxPx0YzgUEArnNlUifv49jh/R6TTaPOwWoqjFvOTFOBQ3xr+XjQ==} - engines: {node: '>= 10'} - cpu: [x64] - os: [darwin] - '@next/swc-linux-arm64-gnu@15.0.0-canary.23': resolution: {integrity: sha512-VceywUaF8xg9zZ2PnElzMOI1CjmKFSV1EJ0O1V/OSnf8RPtD+yisXvaFSvMH1b1aZsb1wBhYFZSjVSDNFsQrzA==} engines: {node: '>= 10'} @@ -4870,12 +4895,6 @@ packages: cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-gnu@15.3.0-canary.10': - resolution: {integrity: sha512-mKz3G7OA0U1SS+uFUycJlONBcBsUg84p7sGPqQxk01WagW7UYgW1axbQBIPHxhAYz5o3+iLrdu7w5WeuHJcDRg==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [linux] - '@next/swc-linux-arm64-musl@15.0.0-canary.23': resolution: {integrity: sha512-vcy0jSSr7br9UhnRH0tfll40l22NmHBD/A0cjpTBXGH8qxH6FmdkGIy56sJO+OAvZzPX1ZYVy3nAoZlFeMrjyQ==} engines: {node: '>= 10'} @@ -4888,12 +4907,6 @@ packages: cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-musl@15.3.0-canary.10': - resolution: {integrity: sha512-1q6HWePFxOtiLtZZKh1eF0XPzp8EM36aMJZqamQj+yPYdL8X0Iqin5QPEKqX5QeOwpsiXo82TZ2qnDyb/ps9Wg==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [linux] - '@next/swc-linux-x64-gnu@15.0.0-canary.23': resolution: {integrity: sha512-3CY4UcqOc3LPJ9iIXRN8/UHNZ96f5VKNaWWLq+EYqDOKRx5RBwedNSoaap/fx3+MHIw+lUpizWxHFx+amlMVZQ==} engines: {node: '>= 10'} @@ -4906,12 +4919,6 @@ packages: cpu: [x64] os: [linux] - '@next/swc-linux-x64-gnu@15.3.0-canary.10': - resolution: {integrity: sha512-aA04C+JDYlblbCdepWeP3UfPHXp0Bh6uNOCC3qH/AUwcdlMkUgaQxqDkn3rYN8DoVjkcvwtucRy5iQAQEWZMLA==} - engines: {node: '>= 10'} - cpu: [x64] - os: [linux] - '@next/swc-linux-x64-musl@15.0.0-canary.23': resolution: {integrity: sha512-DkxO6p9DRs5xTBpQYUuTvaimB68/iwRifpFJQPEA4YTFS8W0G3MR1MZ/Phmz/C0SxMOFiiSraqQ2c12WfvFwQw==} engines: {node: '>= 10'} @@ -4924,12 +4931,6 @@ packages: cpu: [x64] os: [linux] - '@next/swc-linux-x64-musl@15.3.0-canary.10': - resolution: {integrity: sha512-NHLJqblMx4nERqt8FyQhGcnzq/woaZ4zsRFKjTo9eyqihlZCKxoPmN10PP4qQIDhBQfaxHAbzvYl5eAgn53xsA==} - engines: {node: '>= 10'} - cpu: [x64] - os: [linux] - '@next/swc-win32-arm64-msvc@15.0.0-canary.23': resolution: {integrity: sha512-FM2RSVvVOnOWriLZhsZUbXGdBQQErLE1BDObnPQ7T5AtISpjtFXIHt6Oh/bmJ5HmB4logtUGDJYT8kW94E2Wew==} engines: {node: '>= 10'} @@ -4942,12 +4943,6 @@ packages: cpu: [arm64] os: [win32] - '@next/swc-win32-arm64-msvc@15.3.0-canary.10': - resolution: {integrity: sha512-KonCwSez8FNfVTjMkuK7RjBTqelZvT7TLVmni8C8TmulJARJlUwkDwsXSS2sTy9zmxH5XTROdVGppvEx+rLskA==} - engines: {node: '>= 10'} - cpu: [arm64] - os: [win32] - '@next/swc-win32-ia32-msvc@15.0.0-canary.23': resolution: {integrity: sha512-Rgok/7bZ2mA9/N3Qs42EgKiE/0kKj9Jv2EoZlttWXCEIhr6IuXatlbJ3sn76JHnvbc8jQieqq6l0M+nZZ0BzlQ==} engines: {node: '>= 10'} @@ -4966,12 +4961,6 @@ packages: cpu: [x64] os: [win32] - '@next/swc-win32-x64-msvc@15.3.0-canary.10': - resolution: {integrity: sha512-KezU9dlIdQ6fXZb4lHvv+AeUG9RR4J/FVGExiLR/3+rkbHxIJRB8qzM/h/94vecrovvwomKBLiNRS/9ThZsGng==} - engines: {node: '>= 10'} - cpu: [x64] - os: [win32] - '@nodelib/fs.scandir@2.1.5': resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} @@ -11657,27 +11646,6 @@ packages: sass: optional: true - next@15.3.0-canary.10: - resolution: {integrity: sha512-M4o2+geTFAkeboP7IVCoRTsEfyyt56o+mLof7HW8LKwdG6TydvyCmsPbCvtRbU0V7Nl2IMRfGk/rts3TuB9bug==} - engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} - hasBin: true - peerDependencies: - '@opentelemetry/api': ^1.1.0 - '@playwright/test': ^1.41.2 - babel-plugin-react-compiler: '*' - react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 - react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 - sass: ^1.3.0 - peerDependenciesMeta: - '@opentelemetry/api': - optional: true - '@playwright/test': - optional: true - babel-plugin-react-compiler: - optional: true - sass: - optional: true - nitropack@2.10.4: resolution: {integrity: sha512-sJiG/MIQlZCVSw2cQrFG1H6mLeSqHlYfFerRjLKz69vUfdu0EL2l0WdOxlQbzJr3mMv/l4cOlCCLzVRzjzzF/g==} engines: {node: ^16.11.0 || >=17.0.0} @@ -12714,11 +12682,6 @@ packages: peerDependencies: react: 19.0.0-rc-cc1ec60d0d-20240607 - react-dom@19.0.0-rc.1: - resolution: {integrity: sha512-k8MfDX+4G+eaa1cXXI9QF4d+pQtYol3nx8vauqRWUEOPqC7NQn2qmEqUsLoSd28rrZUL+R3T2VC+kZ2Hyx1geQ==} - peerDependencies: - react: 19.0.0-rc.1 - react-is@16.13.1: resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} @@ -12758,10 +12721,6 @@ packages: resolution: {integrity: sha512-q8A0/IdJ2wdHsjDNO1igFcSSFIMqSKmO7oJZtAjxIA9g0klK45Lxt15NQJ7z7cBvgD1r3xRTtQ/MAqnmwYHs1Q==} engines: {node: '>=0.10.0'} - react@19.0.0-rc.1: - resolution: {integrity: sha512-NZKln+uyPuyHchzP07I6GGYFxdAoaKhehgpCa3ltJGzwE31OYumLeshGaitA1R/fS5d9D2qpZVwTFAr6zCLM9w==} - engines: {node: '>=0.10.0'} - read-cache@1.0.0: resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} @@ -13058,9 +13017,6 @@ packages: scheduler@0.25.0-rc-cc1ec60d0d-20240607: resolution: {integrity: sha512-yFVKy6SDJkN2bOJSeH6gNo4+1MTygTZXnLRY5IHvEB6P9+O6WYRWz9PkELLjnl64lQwRgiigwzWQRSMNEboOGQ==} - scheduler@0.25.0-rc.1: - resolution: {integrity: sha512-fVinv2lXqYpKConAMdergOl5owd0rY1O4P/QTe0aWKCqGtu7VsCt1iqQFxSJtqK4Lci/upVSBpGwVC7eWcuS9Q==} - schema-utils@3.3.0: resolution: {integrity: sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==} engines: {node: '>= 10.13.0'} @@ -15665,14 +15621,14 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/plugin-transform-react-jsx-self@7.25.9(@babel/core@7.25.2)': + '@babel/plugin-transform-react-jsx-self@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.2 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-react-jsx-source@7.25.9(@babel/core@7.25.2)': + '@babel/plugin-transform-react-jsx-source@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.2 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 '@babel/plugin-transform-typescript@7.25.9(@babel/core@7.25.2)': @@ -17735,8 +17691,6 @@ snapshots: '@next/env@15.2.2': {} - '@next/env@15.3.0-canary.10': {} - '@next/eslint-plugin-next@14.2.3': dependencies: glob: 10.3.10 @@ -17747,63 +17701,42 @@ snapshots: '@next/swc-darwin-arm64@15.2.2': optional: true - '@next/swc-darwin-arm64@15.3.0-canary.10': - optional: true - '@next/swc-darwin-x64@15.0.0-canary.23': optional: true '@next/swc-darwin-x64@15.2.2': optional: true - '@next/swc-darwin-x64@15.3.0-canary.10': - optional: true - '@next/swc-linux-arm64-gnu@15.0.0-canary.23': optional: true '@next/swc-linux-arm64-gnu@15.2.2': optional: true - '@next/swc-linux-arm64-gnu@15.3.0-canary.10': - optional: true - '@next/swc-linux-arm64-musl@15.0.0-canary.23': optional: true '@next/swc-linux-arm64-musl@15.2.2': optional: true - '@next/swc-linux-arm64-musl@15.3.0-canary.10': - optional: true - '@next/swc-linux-x64-gnu@15.0.0-canary.23': optional: true '@next/swc-linux-x64-gnu@15.2.2': optional: true - '@next/swc-linux-x64-gnu@15.3.0-canary.10': - optional: true - '@next/swc-linux-x64-musl@15.0.0-canary.23': optional: true '@next/swc-linux-x64-musl@15.2.2': optional: true - '@next/swc-linux-x64-musl@15.3.0-canary.10': - optional: true - '@next/swc-win32-arm64-msvc@15.0.0-canary.23': optional: true '@next/swc-win32-arm64-msvc@15.2.2': optional: true - '@next/swc-win32-arm64-msvc@15.3.0-canary.10': - optional: true - '@next/swc-win32-ia32-msvc@15.0.0-canary.23': optional: true @@ -17813,9 +17746,6 @@ snapshots: '@next/swc-win32-x64-msvc@15.2.2': optional: true - '@next/swc-win32-x64-msvc@15.3.0-canary.10': - optional: true - '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -20439,6 +20369,16 @@ snapshots: '@types/react': 18.3.3 '@types/react-dom': 18.2.4 + '@testing-library/react@16.0.1(@testing-library/dom@10.4.0)(@types/react-dom@18.3.0)(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@babel/runtime': 7.25.7 + '@testing-library/dom': 10.4.0 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.3 + '@types/react-dom': 18.3.0 + '@testing-library/svelte@5.2.7(svelte@5.22.4)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))(vitest@3.0.7(@edge-runtime/vm@5.0.0)(@types/debug@4.1.12)(@types/node@22.7.4)(jiti@2.4.0)(jsdom@26.0.0)(msw@2.7.0(@types/node@22.7.4)(typescript@5.6.3))(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))': dependencies: '@testing-library/dom': 10.4.0 @@ -21094,9 +21034,9 @@ snapshots: '@vitejs/plugin-react@4.3.3(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))': dependencies: - '@babel/core': 7.25.2 - '@babel/plugin-transform-react-jsx-self': 7.25.9(@babel/core@7.25.2) - '@babel/plugin-transform-react-jsx-source': 7.25.9(@babel/core@7.25.2) + '@babel/core': 7.26.0 + '@babel/plugin-transform-react-jsx-self': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-react-jsx-source': 7.25.9(@babel/core@7.26.0) '@types/babel__core': 7.20.5 react-refresh: 0.14.2 vite: 6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) @@ -26766,33 +26706,6 @@ snapshots: - '@babel/core' - babel-plugin-macros - next@15.3.0-canary.10(@opentelemetry/api@1.9.0)(@playwright/test@1.50.1)(react-dom@19.0.0-rc.1(react@19.0.0-rc.1))(react@19.0.0-rc.1): - dependencies: - '@next/env': 15.3.0-canary.10 - '@swc/counter': 0.1.3 - '@swc/helpers': 0.5.15 - busboy: 1.6.0 - caniuse-lite: 1.0.30001666 - postcss: 8.4.31 - react: 19.0.0-rc.1 - react-dom: 19.0.0-rc.1(react@19.0.0-rc.1) - styled-jsx: 5.1.6(react@19.0.0-rc.1) - optionalDependencies: - '@next/swc-darwin-arm64': 15.3.0-canary.10 - '@next/swc-darwin-x64': 15.3.0-canary.10 - '@next/swc-linux-arm64-gnu': 15.3.0-canary.10 - '@next/swc-linux-arm64-musl': 15.3.0-canary.10 - '@next/swc-linux-x64-gnu': 15.3.0-canary.10 - '@next/swc-linux-x64-musl': 15.3.0-canary.10 - '@next/swc-win32-arm64-msvc': 15.3.0-canary.10 - '@next/swc-win32-x64-msvc': 15.3.0-canary.10 - '@opentelemetry/api': 1.9.0 - '@playwright/test': 1.50.1 - sharp: 0.33.5 - transitivePeerDependencies: - - '@babel/core' - - babel-plugin-macros - nitropack@2.10.4(@upstash/redis@1.34.3)(typescript@5.8.3): dependencies: '@cloudflare/kv-asset-handler': 0.3.4 @@ -28025,11 +27938,6 @@ snapshots: react: 19.0.0-rc-cc1ec60d0d-20240607 scheduler: 0.25.0-rc-cc1ec60d0d-20240607 - react-dom@19.0.0-rc.1(react@19.0.0-rc.1): - dependencies: - react: 19.0.0-rc.1 - scheduler: 0.25.0-rc.1 - react-is@16.13.1: {} react-is@17.0.2: {} @@ -28055,13 +27963,13 @@ snapshots: react-refresh@0.14.2: {} - react-server-dom-webpack@18.3.0-canary-eb33bd747-20240312(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(webpack@5.96.1(esbuild@0.18.20)): + react-server-dom-webpack@18.3.0-canary-eb33bd747-20240312(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(webpack@5.96.1): dependencies: acorn-loose: 8.4.0 neo-async: 2.6.2 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - webpack: 5.96.1(esbuild@0.18.20) + webpack: 5.96.1 react@18.2.0: dependencies: @@ -28073,8 +27981,6 @@ snapshots: react@19.0.0-rc-cc1ec60d0d-20240607: {} - react@19.0.0-rc.1: {} - read-cache@1.0.0: dependencies: pify: 2.3.0 @@ -28493,8 +28399,6 @@ snapshots: scheduler@0.25.0-rc-cc1ec60d0d-20240607: {} - scheduler@0.25.0-rc.1: {} - schema-utils@3.3.0: dependencies: '@types/json-schema': 7.0.15 @@ -28924,11 +28828,6 @@ snapshots: client-only: 0.0.1 react: 19.0.0-rc-cc1ec60d0d-20240607 - styled-jsx@5.1.6(react@19.0.0-rc.1): - dependencies: - client-only: 0.0.1 - react: 19.0.0-rc.1 - stylehacks@7.0.4(postcss@8.4.49): dependencies: browserslist: 4.24.0 @@ -29234,17 +29133,6 @@ snapshots: transitivePeerDependencies: - debug - terser-webpack-plugin@5.3.10(esbuild@0.18.20)(webpack@5.96.1(esbuild@0.18.20)): - dependencies: - '@jridgewell/trace-mapping': 0.3.25 - jest-worker: 27.5.1 - schema-utils: 3.3.0 - serialize-javascript: 6.0.2 - terser: 5.31.3 - webpack: 5.96.1(esbuild@0.18.20) - optionalDependencies: - esbuild: 0.18.20 - terser-webpack-plugin@5.3.10(webpack@5.96.1): dependencies: '@jridgewell/trace-mapping': 0.3.25 @@ -30481,36 +30369,6 @@ snapshots: - esbuild - uglify-js - webpack@5.96.1(esbuild@0.18.20): - dependencies: - '@types/eslint-scope': 3.7.7 - '@types/estree': 1.0.6 - '@webassemblyjs/ast': 1.12.1 - '@webassemblyjs/wasm-edit': 1.12.1 - '@webassemblyjs/wasm-parser': 1.12.1 - acorn: 8.14.1 - browserslist: 4.24.0 - chrome-trace-event: 1.0.4 - enhanced-resolve: 5.17.1 - es-module-lexer: 1.5.4 - eslint-scope: 5.1.1 - events: 3.3.0 - glob-to-regexp: 0.4.1 - graceful-fs: 4.2.11 - json-parse-even-better-errors: 2.3.1 - loader-runner: 4.3.0 - mime-types: 2.1.35 - neo-async: 2.6.2 - schema-utils: 3.3.0 - tapable: 2.2.1 - terser-webpack-plugin: 5.3.10(esbuild@0.18.20)(webpack@5.96.1(esbuild@0.18.20)) - watchpack: 2.4.2 - webpack-sources: 3.2.3 - transitivePeerDependencies: - - '@swc/core' - - esbuild - - uglify-js - whatwg-encoding@3.1.1: dependencies: iconv-lite: 0.6.3 diff --git a/tools/tsconfig/base.json b/tools/tsconfig/base.json index 7f822833816f..b0dbe6643238 100644 --- a/tools/tsconfig/base.json +++ b/tools/tsconfig/base.json @@ -9,6 +9,7 @@ "forceConsistentCasingInFileNames": true, "inlineSources": false, "isolatedModules": true, + "module": "ESNext", "moduleResolution": "Bundler", "noUnusedLocals": false, "noUnusedParameters": false, From 2d161eb7a72e9fc8dd242792622bc377e4d47072 Mon Sep 17 00:00:00 2001 From: Sam Denty Date: Tue, 8 Apr 2025 19:06:14 +0100 Subject: [PATCH 0052/1307] fix(#5542): fix release action with moved test dir (#5610) --- pnpm-lock.yaml | 416 +++++++++++++++++++++++++++++++++++++++++++- pnpm-workspace.yaml | 2 +- 2 files changed, 410 insertions(+), 8 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bdc0ede86f24..f7e4963b26c0 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1850,6 +1850,21 @@ importers: specifier: 3.23.8 version: 3.23.8 + packages/rsc/tests/e2e/next-server: + dependencies: + ai: + specifier: workspace:* + version: link:../../../../ai + next: + specifier: canary + version: 15.3.0-canary.43(@opentelemetry/api@1.9.0)(@playwright/test@1.50.1)(react-dom@19.0.0-rc.1(react@19.0.0-rc.1))(react@19.0.0-rc.1) + react: + specifier: rc + version: 19.0.0-rc.1 + react-dom: + specifier: rc + version: 19.0.0-rc.1(react@19.0.0-rc.1) + packages/svelte: dependencies: '@ai-sdk/provider-utils': @@ -2736,6 +2751,9 @@ packages: '@emnapi/runtime@1.2.0': resolution: {integrity: sha512-bV21/9LQmcQeCPEg3BDFtvwL6cwiTMksYNWQQ4KOxCZikEGalWtenoZ0wCiukJINlGCIi2KXx01g4FoH/LxpzQ==} + '@emnapi/runtime@1.4.0': + resolution: {integrity: sha512-64WYIf4UYcdLnbKn/umDlNjQDSS8AgZrI/R9+x5ilkUVFxXcA1Ebl+gQLc/6mERA4407Xof0R7wEyEuj091CVw==} + '@esbuild/aix-ppc64@0.19.12': resolution: {integrity: sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==} engines: {node: '>=12'} @@ -3860,105 +3878,215 @@ packages: cpu: [arm64] os: [darwin] + '@img/sharp-darwin-arm64@0.34.1': + resolution: {integrity: sha512-pn44xgBtgpEbZsu+lWf2KNb6OAf70X68k+yk69Ic2Xz11zHR/w24/U49XT7AeRwJ0Px+mhALhU5LPci1Aymk7A==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + '@img/sharp-darwin-x64@0.33.5': resolution: {integrity: sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [darwin] + '@img/sharp-darwin-x64@0.34.1': + resolution: {integrity: sha512-VfuYgG2r8BpYiOUN+BfYeFo69nP/MIwAtSJ7/Zpxc5QF3KS22z8Pvg3FkrSFJBPNQ7mmcUcYQFBmEQp7eu1F8Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + '@img/sharp-libvips-darwin-arm64@1.0.4': resolution: {integrity: sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==} cpu: [arm64] os: [darwin] + '@img/sharp-libvips-darwin-arm64@1.1.0': + resolution: {integrity: sha512-HZ/JUmPwrJSoM4DIQPv/BfNh9yrOA8tlBbqbLz4JZ5uew2+o22Ik+tHQJcih7QJuSa0zo5coHTfD5J8inqj9DA==} + cpu: [arm64] + os: [darwin] + '@img/sharp-libvips-darwin-x64@1.0.4': resolution: {integrity: sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==} cpu: [x64] os: [darwin] + '@img/sharp-libvips-darwin-x64@1.1.0': + resolution: {integrity: sha512-Xzc2ToEmHN+hfvsl9wja0RlnXEgpKNmftriQp6XzY/RaSfwD9th+MSh0WQKzUreLKKINb3afirxW7A0fz2YWuQ==} + cpu: [x64] + os: [darwin] + '@img/sharp-libvips-linux-arm64@1.0.4': resolution: {integrity: sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==} cpu: [arm64] os: [linux] + '@img/sharp-libvips-linux-arm64@1.1.0': + resolution: {integrity: sha512-IVfGJa7gjChDET1dK9SekxFFdflarnUB8PwW8aGwEoF3oAsSDuNUTYS+SKDOyOJxQyDC1aPFMuRYLoDInyV9Ew==} + cpu: [arm64] + os: [linux] + '@img/sharp-libvips-linux-arm@1.0.5': resolution: {integrity: sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==} cpu: [arm] os: [linux] + '@img/sharp-libvips-linux-arm@1.1.0': + resolution: {integrity: sha512-s8BAd0lwUIvYCJyRdFqvsj+BJIpDBSxs6ivrOPm/R7piTs5UIwY5OjXrP2bqXC9/moGsyRa37eYWYCOGVXxVrA==} + cpu: [arm] + os: [linux] + + '@img/sharp-libvips-linux-ppc64@1.1.0': + resolution: {integrity: sha512-tiXxFZFbhnkWE2LA8oQj7KYR+bWBkiV2nilRldT7bqoEZ4HiDOcePr9wVDAZPi/Id5fT1oY9iGnDq20cwUz8lQ==} + cpu: [ppc64] + os: [linux] + '@img/sharp-libvips-linux-s390x@1.0.4': resolution: {integrity: sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==} cpu: [s390x] os: [linux] + '@img/sharp-libvips-linux-s390x@1.1.0': + resolution: {integrity: sha512-xukSwvhguw7COyzvmjydRb3x/09+21HykyapcZchiCUkTThEQEOMtBj9UhkaBRLuBrgLFzQ2wbxdeCCJW/jgJA==} + cpu: [s390x] + os: [linux] + '@img/sharp-libvips-linux-x64@1.0.4': resolution: {integrity: sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==} cpu: [x64] os: [linux] + '@img/sharp-libvips-linux-x64@1.1.0': + resolution: {integrity: sha512-yRj2+reB8iMg9W5sULM3S74jVS7zqSzHG3Ol/twnAAkAhnGQnpjj6e4ayUz7V+FpKypwgs82xbRdYtchTTUB+Q==} + cpu: [x64] + os: [linux] + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': resolution: {integrity: sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==} cpu: [arm64] os: [linux] + '@img/sharp-libvips-linuxmusl-arm64@1.1.0': + resolution: {integrity: sha512-jYZdG+whg0MDK+q2COKbYidaqW/WTz0cc1E+tMAusiDygrM4ypmSCjOJPmFTvHHJ8j/6cAGyeDWZOsK06tP33w==} + cpu: [arm64] + os: [linux] + '@img/sharp-libvips-linuxmusl-x64@1.0.4': resolution: {integrity: sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==} cpu: [x64] os: [linux] + '@img/sharp-libvips-linuxmusl-x64@1.1.0': + resolution: {integrity: sha512-wK7SBdwrAiycjXdkPnGCPLjYb9lD4l6Ze2gSdAGVZrEL05AOUJESWU2lhlC+Ffn5/G+VKuSm6zzbQSzFX/P65A==} + cpu: [x64] + os: [linux] + '@img/sharp-linux-arm64@0.33.5': resolution: {integrity: sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + '@img/sharp-linux-arm64@0.34.1': + resolution: {integrity: sha512-kX2c+vbvaXC6vly1RDf/IWNXxrlxLNpBVWkdpRq5Ka7OOKj6nr66etKy2IENf6FtOgklkg9ZdGpEu9kwdlcwOQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + '@img/sharp-linux-arm@0.33.5': resolution: {integrity: sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm] os: [linux] + '@img/sharp-linux-arm@0.34.1': + resolution: {integrity: sha512-anKiszvACti2sGy9CirTlNyk7BjjZPiML1jt2ZkTdcvpLU1YH6CXwRAZCA2UmRXnhiIftXQ7+Oh62Ji25W72jA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + '@img/sharp-linux-s390x@0.33.5': resolution: {integrity: sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [s390x] os: [linux] + '@img/sharp-linux-s390x@0.34.1': + resolution: {integrity: sha512-7s0KX2tI9mZI2buRipKIw2X1ufdTeaRgwmRabt5bi9chYfhur+/C1OXg3TKg/eag1W+6CCWLVmSauV1owmRPxA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + '@img/sharp-linux-x64@0.33.5': resolution: {integrity: sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + '@img/sharp-linux-x64@0.34.1': + resolution: {integrity: sha512-wExv7SH9nmoBW3Wr2gvQopX1k8q2g5V5Iag8Zk6AVENsjwd+3adjwxtp3Dcu2QhOXr8W9NusBU6XcQUohBZ5MA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + '@img/sharp-linuxmusl-arm64@0.33.5': resolution: {integrity: sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + '@img/sharp-linuxmusl-arm64@0.34.1': + resolution: {integrity: sha512-DfvyxzHxw4WGdPiTF0SOHnm11Xv4aQexvqhRDAoD00MzHekAj9a/jADXeXYCDFH/DzYruwHbXU7uz+H+nWmSOQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + '@img/sharp-linuxmusl-x64@0.33.5': resolution: {integrity: sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + '@img/sharp-linuxmusl-x64@0.34.1': + resolution: {integrity: sha512-pax/kTR407vNb9qaSIiWVnQplPcGU8LRIJpDT5o8PdAx5aAA7AS3X9PS8Isw1/WfqgQorPotjrZL3Pqh6C5EBg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + '@img/sharp-wasm32@0.33.5': resolution: {integrity: sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [wasm32] + '@img/sharp-wasm32@0.34.1': + resolution: {integrity: sha512-YDybQnYrLQfEpzGOQe7OKcyLUCML4YOXl428gOOzBgN6Gw0rv8dpsJ7PqTHxBnXnwXr8S1mYFSLSa727tpz0xg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + '@img/sharp-win32-ia32@0.33.5': resolution: {integrity: sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [ia32] os: [win32] + '@img/sharp-win32-ia32@0.34.1': + resolution: {integrity: sha512-WKf/NAZITnonBf3U1LfdjoMgNO5JYRSlhovhRhMxXVdvWYveM4kM3L8m35onYIdh75cOMCo1BexgVQcCDzyoWw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + '@img/sharp-win32-x64@0.33.5': resolution: {integrity: sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [win32] + '@img/sharp-win32-x64@0.34.1': + resolution: {integrity: sha512-hw1iIAHpNE8q3uMIRCgGOeDoz9KtFNarFLQclLxr/LK1VBkj8nby18RjFvr6aP7USRYAjTZW6yisnBWMX571Tw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + '@inquirer/confirm@5.0.2': resolution: {integrity: sha512-KJLUHOaKnNCYzwVbryj3TNBxyZIrr56fR5N45v6K9IPrbT6B7DcudBMfylkV1A8PUdJE15mybkEQyp2/ZUpxUA==} engines: {node: '>=18'} @@ -4856,6 +4984,9 @@ packages: '@next/env@15.2.2': resolution: {integrity: sha512-yWgopCfA9XDR8ZH3taB5nRKtKJ1Q5fYsTOuYkzIIoS8TJ0UAUKAGF73JnGszbjk2ufAQDj6mDdgsJAFx5CLtYQ==} + '@next/env@15.3.0-canary.43': + resolution: {integrity: sha512-ogryUGufxhtidxr/42NY6+ADlvxt9Hq6Q/DOMS/20vGuePZ70wO6Z5m1RP3Q397RGgpe1gSWyYdlg+Mt7H4KeQ==} + '@next/eslint-plugin-next@14.2.3': resolution: {integrity: sha512-L3oDricIIjgj1AVnRdRor21gI7mShlSwU/1ZGHmqM3LzHhXXhdkrfeNY5zif25Bi5Dd7fiJHsbhoZCHfXYvlAw==} @@ -4871,6 +5002,12 @@ packages: cpu: [arm64] os: [darwin] + '@next/swc-darwin-arm64@15.3.0-canary.43': + resolution: {integrity: sha512-eXrs8CyIBgdnMTNjku+h/xm61gmRCcKAT+tM2CjpEXbEqXBux5hRIakOk5kJJDu2fA2P3pzQGt5PRD1hg4srXA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + '@next/swc-darwin-x64@15.0.0-canary.23': resolution: {integrity: sha512-TRYY7ka1QRJlYer//aDz1dsLOLb+M7ODgYHA1DDtLLrovL3dbrD70mjWDiWdiP91EMpukGatNJaVpFCaAi0yuA==} engines: {node: '>= 10'} @@ -4883,6 +5020,12 @@ packages: cpu: [x64] os: [darwin] + '@next/swc-darwin-x64@15.3.0-canary.43': + resolution: {integrity: sha512-iRGvblEh/b2grxkkp9pT+yea9EzGNM4tLyUZoCzkejkU2jMLsn2DH6h3bQwCfEYZL3YFGsYmVISrVCOVi8LeMw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + '@next/swc-linux-arm64-gnu@15.0.0-canary.23': resolution: {integrity: sha512-VceywUaF8xg9zZ2PnElzMOI1CjmKFSV1EJ0O1V/OSnf8RPtD+yisXvaFSvMH1b1aZsb1wBhYFZSjVSDNFsQrzA==} engines: {node: '>= 10'} @@ -4895,6 +5038,12 @@ packages: cpu: [arm64] os: [linux] + '@next/swc-linux-arm64-gnu@15.3.0-canary.43': + resolution: {integrity: sha512-IqknHGNxpL03uIutIuv7FPjGHuD/AnJVC5exi5g+C7P3f6JVvOjFLS264eqi91tVCXhN2LpcKNGwTlK81bJVVg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + '@next/swc-linux-arm64-musl@15.0.0-canary.23': resolution: {integrity: sha512-vcy0jSSr7br9UhnRH0tfll40l22NmHBD/A0cjpTBXGH8qxH6FmdkGIy56sJO+OAvZzPX1ZYVy3nAoZlFeMrjyQ==} engines: {node: '>= 10'} @@ -4907,6 +5056,12 @@ packages: cpu: [arm64] os: [linux] + '@next/swc-linux-arm64-musl@15.3.0-canary.43': + resolution: {integrity: sha512-FbO0dnFsNe3f1LWTn4vyXTWTSrZdwNRnURYExSQ+0AINHphNfwKQNrPqVLrapQ9CAOCOz8R5p9Kf1++IsH3JJQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + '@next/swc-linux-x64-gnu@15.0.0-canary.23': resolution: {integrity: sha512-3CY4UcqOc3LPJ9iIXRN8/UHNZ96f5VKNaWWLq+EYqDOKRx5RBwedNSoaap/fx3+MHIw+lUpizWxHFx+amlMVZQ==} engines: {node: '>= 10'} @@ -4919,6 +5074,12 @@ packages: cpu: [x64] os: [linux] + '@next/swc-linux-x64-gnu@15.3.0-canary.43': + resolution: {integrity: sha512-MAaLEm8eO6Xir3YC3xLYDHDjLGogAAGRrxhuflvaqAtGQZ6NIMG4YjvAyramYTq/SwrUIDobggKxdQLtu8/pPQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + '@next/swc-linux-x64-musl@15.0.0-canary.23': resolution: {integrity: sha512-DkxO6p9DRs5xTBpQYUuTvaimB68/iwRifpFJQPEA4YTFS8W0G3MR1MZ/Phmz/C0SxMOFiiSraqQ2c12WfvFwQw==} engines: {node: '>= 10'} @@ -4931,6 +5092,12 @@ packages: cpu: [x64] os: [linux] + '@next/swc-linux-x64-musl@15.3.0-canary.43': + resolution: {integrity: sha512-gdwF79/EQjY3zgcolO0jlDe0yfII9tXyXQeqL+uvzA8gZT5FpH0KkwSWzxj8EUswWzZcprbDa87sq8H0Eo+whw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + '@next/swc-win32-arm64-msvc@15.0.0-canary.23': resolution: {integrity: sha512-FM2RSVvVOnOWriLZhsZUbXGdBQQErLE1BDObnPQ7T5AtISpjtFXIHt6Oh/bmJ5HmB4logtUGDJYT8kW94E2Wew==} engines: {node: '>= 10'} @@ -4943,6 +5110,12 @@ packages: cpu: [arm64] os: [win32] + '@next/swc-win32-arm64-msvc@15.3.0-canary.43': + resolution: {integrity: sha512-5WYne3jvo1478kUfe901wFxvPMdC8tRKundKIgU5Upe1HafMMS7ymm1hQ7CUpp3/1vY/R1TV1oKHHJfqDubiNg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + '@next/swc-win32-ia32-msvc@15.0.0-canary.23': resolution: {integrity: sha512-Rgok/7bZ2mA9/N3Qs42EgKiE/0kKj9Jv2EoZlttWXCEIhr6IuXatlbJ3sn76JHnvbc8jQieqq6l0M+nZZ0BzlQ==} engines: {node: '>= 10'} @@ -4961,6 +5134,12 @@ packages: cpu: [x64] os: [win32] + '@next/swc-win32-x64-msvc@15.3.0-canary.43': + resolution: {integrity: sha512-xE3WZhwjb91eezecVsmXn/OtdISfMsIfS3t0ZXsS/+bMvO/LZLdcVBtl0Zy5yR+XJyKfXXmwpdYbL6WH4dGuQg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + '@nodelib/fs.scandir@2.1.5': resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} @@ -11646,6 +11825,27 @@ packages: sass: optional: true + next@15.3.0-canary.43: + resolution: {integrity: sha512-am6xpZIx2P0VJ26N7K2CImmznYUP65XS0e0nkYtypWf/RiMsScwmCqrA4qrEK9u/tiPlA+583IcQPos9yKLg1Q==} + engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} + hasBin: true + peerDependencies: + '@opentelemetry/api': ^1.1.0 + '@playwright/test': ^1.41.2 + babel-plugin-react-compiler: '*' + react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + sass: ^1.3.0 + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@playwright/test': + optional: true + babel-plugin-react-compiler: + optional: true + sass: + optional: true + nitropack@2.10.4: resolution: {integrity: sha512-sJiG/MIQlZCVSw2cQrFG1H6mLeSqHlYfFerRjLKz69vUfdu0EL2l0WdOxlQbzJr3mMv/l4cOlCCLzVRzjzzF/g==} engines: {node: ^16.11.0 || >=17.0.0} @@ -12682,6 +12882,11 @@ packages: peerDependencies: react: 19.0.0-rc-cc1ec60d0d-20240607 + react-dom@19.0.0-rc.1: + resolution: {integrity: sha512-k8MfDX+4G+eaa1cXXI9QF4d+pQtYol3nx8vauqRWUEOPqC7NQn2qmEqUsLoSd28rrZUL+R3T2VC+kZ2Hyx1geQ==} + peerDependencies: + react: 19.0.0-rc.1 + react-is@16.13.1: resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} @@ -12721,6 +12926,10 @@ packages: resolution: {integrity: sha512-q8A0/IdJ2wdHsjDNO1igFcSSFIMqSKmO7oJZtAjxIA9g0klK45Lxt15NQJ7z7cBvgD1r3xRTtQ/MAqnmwYHs1Q==} engines: {node: '>=0.10.0'} + react@19.0.0-rc.1: + resolution: {integrity: sha512-NZKln+uyPuyHchzP07I6GGYFxdAoaKhehgpCa3ltJGzwE31OYumLeshGaitA1R/fS5d9D2qpZVwTFAr6zCLM9w==} + engines: {node: '>=0.10.0'} + read-cache@1.0.0: resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} @@ -13017,6 +13226,9 @@ packages: scheduler@0.25.0-rc-cc1ec60d0d-20240607: resolution: {integrity: sha512-yFVKy6SDJkN2bOJSeH6gNo4+1MTygTZXnLRY5IHvEB6P9+O6WYRWz9PkELLjnl64lQwRgiigwzWQRSMNEboOGQ==} + scheduler@0.25.0-rc.1: + resolution: {integrity: sha512-fVinv2lXqYpKConAMdergOl5owd0rY1O4P/QTe0aWKCqGtu7VsCt1iqQFxSJtqK4Lci/upVSBpGwVC7eWcuS9Q==} + schema-utils@3.3.0: resolution: {integrity: sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==} engines: {node: '>= 10.13.0'} @@ -13043,6 +13255,11 @@ packages: engines: {node: '>=10'} hasBin: true + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} + engines: {node: '>=10'} + hasBin: true + send@0.19.0: resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} engines: {node: '>= 0.8.0'} @@ -13097,6 +13314,10 @@ packages: resolution: {integrity: sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + sharp@0.34.1: + resolution: {integrity: sha512-1j0w61+eVxu7DawFJtnfYcvSv6qPFvfTaqzTQ2BLknVhHTwGS8sc63ZBF4rzkWMBVKybo4S5OBtDdZahh2A1xg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + shebang-command@2.0.0: resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} engines: {node: '>=8'} @@ -15922,6 +16143,11 @@ snapshots: tslib: 2.8.1 optional: true + '@emnapi/runtime@1.4.0': + dependencies: + tslib: 2.8.1 + optional: true + '@esbuild/aix-ppc64@0.19.12': optional: true @@ -16561,76 +16787,154 @@ snapshots: '@img/sharp-libvips-darwin-arm64': 1.0.4 optional: true + '@img/sharp-darwin-arm64@0.34.1': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.1.0 + optional: true + '@img/sharp-darwin-x64@0.33.5': optionalDependencies: '@img/sharp-libvips-darwin-x64': 1.0.4 optional: true + '@img/sharp-darwin-x64@0.34.1': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.1.0 + optional: true + '@img/sharp-libvips-darwin-arm64@1.0.4': optional: true + '@img/sharp-libvips-darwin-arm64@1.1.0': + optional: true + '@img/sharp-libvips-darwin-x64@1.0.4': optional: true + '@img/sharp-libvips-darwin-x64@1.1.0': + optional: true + '@img/sharp-libvips-linux-arm64@1.0.4': optional: true + '@img/sharp-libvips-linux-arm64@1.1.0': + optional: true + '@img/sharp-libvips-linux-arm@1.0.5': optional: true + '@img/sharp-libvips-linux-arm@1.1.0': + optional: true + + '@img/sharp-libvips-linux-ppc64@1.1.0': + optional: true + '@img/sharp-libvips-linux-s390x@1.0.4': optional: true + '@img/sharp-libvips-linux-s390x@1.1.0': + optional: true + '@img/sharp-libvips-linux-x64@1.0.4': optional: true + '@img/sharp-libvips-linux-x64@1.1.0': + optional: true + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': optional: true + '@img/sharp-libvips-linuxmusl-arm64@1.1.0': + optional: true + '@img/sharp-libvips-linuxmusl-x64@1.0.4': optional: true + '@img/sharp-libvips-linuxmusl-x64@1.1.0': + optional: true + '@img/sharp-linux-arm64@0.33.5': optionalDependencies: '@img/sharp-libvips-linux-arm64': 1.0.4 optional: true + '@img/sharp-linux-arm64@0.34.1': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.1.0 + optional: true + '@img/sharp-linux-arm@0.33.5': optionalDependencies: '@img/sharp-libvips-linux-arm': 1.0.5 optional: true + '@img/sharp-linux-arm@0.34.1': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.1.0 + optional: true + '@img/sharp-linux-s390x@0.33.5': optionalDependencies: '@img/sharp-libvips-linux-s390x': 1.0.4 optional: true + '@img/sharp-linux-s390x@0.34.1': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.1.0 + optional: true + '@img/sharp-linux-x64@0.33.5': optionalDependencies: '@img/sharp-libvips-linux-x64': 1.0.4 optional: true + '@img/sharp-linux-x64@0.34.1': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.1.0 + optional: true + '@img/sharp-linuxmusl-arm64@0.33.5': optionalDependencies: '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 optional: true + '@img/sharp-linuxmusl-arm64@0.34.1': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.1.0 + optional: true + '@img/sharp-linuxmusl-x64@0.33.5': optionalDependencies: '@img/sharp-libvips-linuxmusl-x64': 1.0.4 optional: true + '@img/sharp-linuxmusl-x64@0.34.1': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.1.0 + optional: true + '@img/sharp-wasm32@0.33.5': dependencies: '@emnapi/runtime': 1.2.0 optional: true + '@img/sharp-wasm32@0.34.1': + dependencies: + '@emnapi/runtime': 1.4.0 + optional: true + '@img/sharp-win32-ia32@0.33.5': optional: true + '@img/sharp-win32-ia32@0.34.1': + optional: true + '@img/sharp-win32-x64@0.33.5': optional: true + '@img/sharp-win32-x64@0.34.1': + optional: true + '@inquirer/confirm@5.0.2(@types/node@20.17.24)': dependencies: '@inquirer/core': 10.1.0(@types/node@20.17.24) @@ -17691,6 +17995,8 @@ snapshots: '@next/env@15.2.2': {} + '@next/env@15.3.0-canary.43': {} + '@next/eslint-plugin-next@14.2.3': dependencies: glob: 10.3.10 @@ -17701,42 +18007,63 @@ snapshots: '@next/swc-darwin-arm64@15.2.2': optional: true + '@next/swc-darwin-arm64@15.3.0-canary.43': + optional: true + '@next/swc-darwin-x64@15.0.0-canary.23': optional: true '@next/swc-darwin-x64@15.2.2': optional: true + '@next/swc-darwin-x64@15.3.0-canary.43': + optional: true + '@next/swc-linux-arm64-gnu@15.0.0-canary.23': optional: true '@next/swc-linux-arm64-gnu@15.2.2': optional: true + '@next/swc-linux-arm64-gnu@15.3.0-canary.43': + optional: true + '@next/swc-linux-arm64-musl@15.0.0-canary.23': optional: true '@next/swc-linux-arm64-musl@15.2.2': optional: true + '@next/swc-linux-arm64-musl@15.3.0-canary.43': + optional: true + '@next/swc-linux-x64-gnu@15.0.0-canary.23': optional: true '@next/swc-linux-x64-gnu@15.2.2': optional: true + '@next/swc-linux-x64-gnu@15.3.0-canary.43': + optional: true + '@next/swc-linux-x64-musl@15.0.0-canary.23': optional: true '@next/swc-linux-x64-musl@15.2.2': optional: true + '@next/swc-linux-x64-musl@15.3.0-canary.43': + optional: true + '@next/swc-win32-arm64-msvc@15.0.0-canary.23': optional: true '@next/swc-win32-arm64-msvc@15.2.2': optional: true + '@next/swc-win32-arm64-msvc@15.3.0-canary.43': + optional: true + '@next/swc-win32-ia32-msvc@15.0.0-canary.23': optional: true @@ -17746,6 +18073,9 @@ snapshots: '@next/swc-win32-x64-msvc@15.2.2': optional: true + '@next/swc-win32-x64-msvc@15.3.0-canary.43': + optional: true + '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -23235,7 +23565,7 @@ snapshots: eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.9.0(eslint@8.57.1) eslint-plugin-react: 7.35.0(eslint@8.57.1) eslint-plugin-react-hooks: 4.6.2(eslint@8.57.1) @@ -23310,8 +23640,8 @@ snapshots: debug: 4.4.0(supports-color@9.4.0) enhanced-resolve: 5.17.1 eslint: 8.57.1 - eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) fast-glob: 3.3.2 get-tsconfig: 4.7.2 is-core-module: 2.13.1 @@ -23350,7 +23680,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: @@ -23383,7 +23713,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: @@ -23432,7 +23762,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): + eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): dependencies: array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 @@ -23442,7 +23772,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.15.0 is-glob: 4.0.3 @@ -26706,6 +27036,33 @@ snapshots: - '@babel/core' - babel-plugin-macros + next@15.3.0-canary.43(@opentelemetry/api@1.9.0)(@playwright/test@1.50.1)(react-dom@19.0.0-rc.1(react@19.0.0-rc.1))(react@19.0.0-rc.1): + dependencies: + '@next/env': 15.3.0-canary.43 + '@swc/counter': 0.1.3 + '@swc/helpers': 0.5.15 + busboy: 1.6.0 + caniuse-lite: 1.0.30001666 + postcss: 8.4.31 + react: 19.0.0-rc.1 + react-dom: 19.0.0-rc.1(react@19.0.0-rc.1) + styled-jsx: 5.1.6(react@19.0.0-rc.1) + optionalDependencies: + '@next/swc-darwin-arm64': 15.3.0-canary.43 + '@next/swc-darwin-x64': 15.3.0-canary.43 + '@next/swc-linux-arm64-gnu': 15.3.0-canary.43 + '@next/swc-linux-arm64-musl': 15.3.0-canary.43 + '@next/swc-linux-x64-gnu': 15.3.0-canary.43 + '@next/swc-linux-x64-musl': 15.3.0-canary.43 + '@next/swc-win32-arm64-msvc': 15.3.0-canary.43 + '@next/swc-win32-x64-msvc': 15.3.0-canary.43 + '@opentelemetry/api': 1.9.0 + '@playwright/test': 1.50.1 + sharp: 0.34.1 + transitivePeerDependencies: + - '@babel/core' + - babel-plugin-macros + nitropack@2.10.4(@upstash/redis@1.34.3)(typescript@5.8.3): dependencies: '@cloudflare/kv-asset-handler': 0.3.4 @@ -27938,6 +28295,11 @@ snapshots: react: 19.0.0-rc-cc1ec60d0d-20240607 scheduler: 0.25.0-rc-cc1ec60d0d-20240607 + react-dom@19.0.0-rc.1(react@19.0.0-rc.1): + dependencies: + react: 19.0.0-rc.1 + scheduler: 0.25.0-rc.1 + react-is@16.13.1: {} react-is@17.0.2: {} @@ -27981,6 +28343,8 @@ snapshots: react@19.0.0-rc-cc1ec60d0d-20240607: {} + react@19.0.0-rc.1: {} + read-cache@1.0.0: dependencies: pify: 2.3.0 @@ -28399,6 +28763,8 @@ snapshots: scheduler@0.25.0-rc-cc1ec60d0d-20240607: {} + scheduler@0.25.0-rc.1: {} + schema-utils@3.3.0: dependencies: '@types/json-schema': 7.0.15 @@ -28417,6 +28783,9 @@ snapshots: semver@7.6.3: {} + semver@7.7.1: + optional: true + send@0.19.0: dependencies: debug: 2.6.9 @@ -28538,6 +28907,34 @@ snapshots: '@img/sharp-win32-ia32': 0.33.5 '@img/sharp-win32-x64': 0.33.5 + sharp@0.34.1: + dependencies: + color: 4.2.3 + detect-libc: 2.0.3 + semver: 7.7.1 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.34.1 + '@img/sharp-darwin-x64': 0.34.1 + '@img/sharp-libvips-darwin-arm64': 1.1.0 + '@img/sharp-libvips-darwin-x64': 1.1.0 + '@img/sharp-libvips-linux-arm': 1.1.0 + '@img/sharp-libvips-linux-arm64': 1.1.0 + '@img/sharp-libvips-linux-ppc64': 1.1.0 + '@img/sharp-libvips-linux-s390x': 1.1.0 + '@img/sharp-libvips-linux-x64': 1.1.0 + '@img/sharp-libvips-linuxmusl-arm64': 1.1.0 + '@img/sharp-libvips-linuxmusl-x64': 1.1.0 + '@img/sharp-linux-arm': 0.34.1 + '@img/sharp-linux-arm64': 0.34.1 + '@img/sharp-linux-s390x': 0.34.1 + '@img/sharp-linux-x64': 0.34.1 + '@img/sharp-linuxmusl-arm64': 0.34.1 + '@img/sharp-linuxmusl-x64': 0.34.1 + '@img/sharp-wasm32': 0.34.1 + '@img/sharp-win32-ia32': 0.34.1 + '@img/sharp-win32-x64': 0.34.1 + optional: true + shebang-command@2.0.0: dependencies: shebang-regex: 3.0.0 @@ -28828,6 +29225,11 @@ snapshots: client-only: 0.0.1 react: 19.0.0-rc-cc1ec60d0d-20240607 + styled-jsx@5.1.6(react@19.0.0-rc.1): + dependencies: + client-only: 0.0.1 + react: 19.0.0-rc.1 + stylehacks@7.0.4(postcss@8.4.49): dependencies: browserslist: 4.24.0 diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index b06605522dea..0f8617056061 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -3,4 +3,4 @@ packages: - 'packages/*' - 'tools/*' - 'examples/*' - - 'packages/ai/tests/e2e/next-server' + - 'packages/rsc/tests/e2e/next-server' From 61ab528fbca7af382015f48e6f061dea1e20da36 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 9 Apr 2025 10:40:27 +0200 Subject: [PATCH 0053/1307] feat (provider/anthropic): add support for URL-based PDF documents in the Anthropic provider (#5619) Co-authored-by: Anish K Srinivasan --- .changeset/smart-swans-drive.md | 5 +++ .../01-ai-sdk-providers/05-anthropic.mdx | 28 +++++++++++++ .../src/generate-text/anthropic-pdf-url.ts | 31 ++++++++++++++ packages/anthropic/src/anthropic-api-types.ts | 28 ++++++------- .../src/anthropic-messages-language-model.ts | 4 ++ ...nvert-to-anthropic-messages-prompt.test.ts | 42 ++++++++++++++++++- .../convert-to-anthropic-messages-prompt.ts | 23 +++++----- 7 files changed, 133 insertions(+), 28 deletions(-) create mode 100644 .changeset/smart-swans-drive.md create mode 100644 examples/ai-core/src/generate-text/anthropic-pdf-url.ts diff --git a/.changeset/smart-swans-drive.md b/.changeset/smart-swans-drive.md new file mode 100644 index 000000000000..ec8649ef9131 --- /dev/null +++ b/.changeset/smart-swans-drive.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +Add support for URL-based PDF documents in the Anthropic provider diff --git a/content/providers/01-ai-sdk-providers/05-anthropic.mdx b/content/providers/01-ai-sdk-providers/05-anthropic.mdx index 93b2580446a6..e2e0768fb4e6 100644 --- a/content/providers/01-ai-sdk-providers/05-anthropic.mdx +++ b/content/providers/01-ai-sdk-providers/05-anthropic.mdx @@ -344,6 +344,34 @@ These tools can be used in conjunction with the `sonnet-3-5-sonnet-20240620` mod Anthropic Sonnet `claude-3-5-sonnet-20241022` supports reading PDF files. You can pass PDF files as part of the message content using the `file` type: +Option 1: URL-based PDF document + +```ts +const result = await generateText({ + model: anthropic('claude-3-5-sonnet-20241022'), + messages: [ + { + role: 'user', + content: [ + { + type: 'text', + text: 'What is an embedding model according to this document?', + }, + { + type: 'file', + data: new URL( + 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', + ), + mimeType: 'application/pdf', + }, + ], + }, + ], +}); +``` + +Option 2: Base64-encoded PDF document + ```ts const result = await generateText({ model: anthropic('claude-3-5-sonnet-20241022'), diff --git a/examples/ai-core/src/generate-text/anthropic-pdf-url.ts b/examples/ai-core/src/generate-text/anthropic-pdf-url.ts new file mode 100644 index 000000000000..ecb9d670e59b --- /dev/null +++ b/examples/ai-core/src/generate-text/anthropic-pdf-url.ts @@ -0,0 +1,31 @@ +import { anthropic } from '@ai-sdk/anthropic'; +import { generateText } from 'ai'; +import 'dotenv/config'; + +async function main() { + const result = await generateText({ + model: anthropic('claude-3-5-sonnet-20241022'), + messages: [ + { + role: 'user', + content: [ + { + type: 'text', + text: 'What is an embedding model according to this document?', + }, + { + type: 'file', + data: new URL( + 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/ai.pdf?raw=true', + ), + mediaType: 'application/pdf', + }, + ], + }, + ], + }); + + console.log(result.text); +} + +main().catch(console.error); diff --git a/packages/anthropic/src/anthropic-api-types.ts b/packages/anthropic/src/anthropic-api-types.ts index 205df3859b1c..0e2e0c67ac5c 100644 --- a/packages/anthropic/src/anthropic-api-types.ts +++ b/packages/anthropic/src/anthropic-api-types.ts @@ -48,28 +48,26 @@ export interface AnthropicRedactedThinkingContent { cache_control: AnthropicCacheControl | undefined; } +type AnthropicContentSource = + | { + type: 'base64'; + media_type: string; + data: string; + } + | { + type: 'url'; + url: string; + }; + export interface AnthropicImageContent { type: 'image'; - source: - | { - type: 'base64'; - media_type: string; - data: string; - } - | { - type: 'url'; - url: string; - }; + source: AnthropicContentSource; cache_control: AnthropicCacheControl | undefined; } export interface AnthropicDocumentContent { type: 'document'; - source: { - type: 'base64'; - media_type: 'application/pdf'; - data: string; - }; + source: AnthropicContentSource; cache_control: AnthropicCacheControl | undefined; } diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index e506f98e903b..22ad916aff91 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -57,6 +57,10 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV2 { this.config = config; } + supportsUrl(url: URL): boolean { + return url.protocol === 'https:'; + } + get provider(): string { return this.config.provider; } diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts index 0f017ffb72f7..5108704c34d9 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts @@ -123,7 +123,7 @@ describe('user messages', () => { }); }); - it('should add PDF file parts', async () => { + it('should add PDF file parts for base64 PDFs', async () => { const result = convertToAnthropicMessagesPrompt({ prompt: [ { @@ -164,6 +164,46 @@ describe('user messages', () => { }); }); + it('should add PDF file parts for URL PDFs', async () => { + const result = convertToAnthropicMessagesPrompt({ + prompt: [ + { + role: 'user', + content: [ + { + type: 'file', + data: new URL('https://example.com/document.pdf'), + mediaType: 'application/pdf', + }, + ], + }, + ], + sendReasoning: true, + warnings: [], + }); + + expect(result).toEqual({ + prompt: { + messages: [ + { + role: 'user', + content: [ + { + type: 'document', + source: { + type: 'url', + url: 'https://example.com/document.pdf', + }, + }, + ], + }, + ], + system: undefined, + }, + betas: new Set(['pdfs-2024-09-25']), + }); + }); + it('should throw error for non-PDF file types', async () => { expect(() => convertToAnthropicMessagesPrompt({ diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts index 5109bd49bbb5..1bafd457940b 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts @@ -120,22 +120,21 @@ export function convertToAnthropicMessagesPrompt({ cache_control: cacheControl, }); } else if (part.mediaType === 'application/pdf') { - if (part.data instanceof URL) { - // The AI SDK automatically downloads files for user file parts with URLs - throw new UnsupportedFunctionalityError({ - functionality: 'PDF File URLs in user messages', - }); - } - betas.add('pdfs-2024-09-25'); anthropicContent.push({ type: 'document', - source: { - type: 'base64', - media_type: 'application/pdf', - data: part.data, - }, + source: + part.data instanceof URL + ? { + type: 'url', + url: part.data.toString(), + } + : { + type: 'base64', + media_type: 'application/pdf', + data: part.data, + }, cache_control: cacheControl, }); } else { From 59f8222ed91534eb4251765b80973e5128b237f6 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 9 Apr 2025 11:11:24 +0200 Subject: [PATCH 0054/1307] fix (ci): test move (#5620) --- .github/scripts/cleanup-examples-changesets.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/scripts/cleanup-examples-changesets.mjs b/.github/scripts/cleanup-examples-changesets.mjs index f728fb35a78e..0d132eefc6f5 100644 --- a/.github/scripts/cleanup-examples-changesets.mjs +++ b/.github/scripts/cleanup-examples-changesets.mjs @@ -46,5 +46,5 @@ for (const app of readdirSync(fileURLToPath(examplesUrl))) { // next test server cleanup( '.', - new URL('../../packages/ai/tests/e2e/next-server', import.meta.url), + new URL('../../packages/rsc/tests/e2e/next-server', import.meta.url), ); From d874386fd57b155b06ce9614dcf143b622f7497c Mon Sep 17 00:00:00 2001 From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> Date: Wed, 9 Apr 2025 10:17:02 +0100 Subject: [PATCH 0055/1307] docs: update llms txt header and remove outdated troubleshooting pages (#5621) Co-authored-by: Nico Albanese --- content/docs/01-introduction/index.mdx | 2 +- ...ing-not-working-on-vercel-pages-router.mdx | 43 ------------------- .../06-streaming-not-working-on-vercel.mdx | 40 ----------------- 3 files changed, 1 insertion(+), 84 deletions(-) delete mode 100644 content/docs/09-troubleshooting/06-streaming-not-working-on-vercel-pages-router.mdx delete mode 100644 content/docs/09-troubleshooting/06-streaming-not-working-on-vercel.mdx diff --git a/content/docs/01-introduction/index.mdx b/content/docs/01-introduction/index.mdx index b53909739d58..952fb2260ffa 100644 --- a/content/docs/01-introduction/index.mdx +++ b/content/docs/01-introduction/index.mdx @@ -48,7 +48,7 @@ We've built some [templates](https://vercel.com/templates?type=ai) that include If you have questions about anything related to the AI SDK, you're always welcome to ask our community on [GitHub Discussions](https://github.com/vercel/ai/discussions). -## `llms.txt` +## `llms.txt` (for Cursor, Windsurf, Copilot, Claude etc.) You can access the entire AI SDK documentation in Markdown format at [sdk.vercel.ai/llms.txt](/llms.txt). This can be used to ask any LLM (assuming it has a big enough context window) questions about the AI SDK based on the most up-to-date documentation. diff --git a/content/docs/09-troubleshooting/06-streaming-not-working-on-vercel-pages-router.mdx b/content/docs/09-troubleshooting/06-streaming-not-working-on-vercel-pages-router.mdx deleted file mode 100644 index 5425aae848c7..000000000000 --- a/content/docs/09-troubleshooting/06-streaming-not-working-on-vercel-pages-router.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Streaming Not Working When Deploying on Vercel (Next.js Pages Router) -description: Troubleshooting streaming issues when deploying to Vercel with the Next.js Pages Router. ---- - -# Streaming Not Working When Deploying on Vercel (Next.js Pages Router) - -## Issue - -I'm using the Next.js Pages Router. Streaming with the AI SDK works in my local development environment. -However, when deploying to Vercel, streaming does not work in the deployed app. -Instead of streaming, only the full response is returned after a while. - -## Cause - -The Next.js Pages Router currently does not support streaming with its own routes. - -## Solution - -With Next.js 13+, you can mix and match App Router and Pages Router routes in the same project. -You need to use App Router routes for streaming with the AI SDK. - -Example App Router route: - -```tsx filename="app/api/chat/route.ts" -import { openai } from '@ai-sdk/openai'; -import { StreamingTextResponse, streamText } from 'ai'; - -// Force the route to be dynamic and allow streaming responses up to 30 seconds -export const dynamic = 'force-dynamic'; -export const maxDuration = 30; - -export async function POST(req: Request) { - const { messages } = await req.json(); - - const result = streamText({ - model: openai('gpt-4-turbo'), - messages, - }); - - return result.toDataStreamResponse(); -} -``` diff --git a/content/docs/09-troubleshooting/06-streaming-not-working-on-vercel.mdx b/content/docs/09-troubleshooting/06-streaming-not-working-on-vercel.mdx deleted file mode 100644 index 28ca0f9888a8..000000000000 --- a/content/docs/09-troubleshooting/06-streaming-not-working-on-vercel.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Streaming Not Working When Deploying on Vercel (Next.js App Router) -description: Troubleshooting streaming issues when deploying to Vercel with the Next.js App Router. ---- - -# Streaming Not Working When Deploying on Vercel (Next.js App Router) - -## Issue - -I'm using the Next.js App Router. Streaming with the AI SDK works in my local development environment. -However, when deploying to Vercel, streaming does not work in the deployed app. -Instead of streaming, only the full response is returned after a while. - -## Cause - -The route was inferred as a static route and has been compiled as such. - -## Solution - -You need to explicitly enforce dynamic behavior for the route. - -Add the following to your route file: - -```tsx -export const dynamic = 'force-dynamic'; -``` - -This will enforce dynamic behavior, which is required for streaming. - -> **Note:** When deploying on Vercel you may also need the following workaround: - -```tsx -import { unstable_noStore as noStore } from 'next/cache'; - -export default async function Component() { - noStore(); - const result = await generateText({...}) - ... -} -``` From 9f95b35d0f23b3ff326416736a10fea586603250 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 9 Apr 2025 11:40:14 +0200 Subject: [PATCH 0056/1307] refactor (provider-utils): copy relevant code from `secure-json-parse` into codebase (#5622) Co-authored-by: Gregor Martynus <39992+gr2m@users.noreply.github.com> --- .changeset/late-foxes-battle.md | 5 ++ packages/provider-utils/package.json | 3 +- packages/provider-utils/src/parse-json.ts | 8 +- .../src/secure-json-parse.test.ts | 59 +++++++++++++ .../provider-utils/src/secure-json-parse.ts | 86 +++++++++++++++++++ pnpm-lock.yaml | 3 - 6 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 .changeset/late-foxes-battle.md create mode 100644 packages/provider-utils/src/secure-json-parse.test.ts create mode 100644 packages/provider-utils/src/secure-json-parse.ts diff --git a/.changeset/late-foxes-battle.md b/.changeset/late-foxes-battle.md new file mode 100644 index 000000000000..21d56afcb38b --- /dev/null +++ b/.changeset/late-foxes-battle.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +refactor (provider-utils): copy relevant code from `secure-json-parse` into codebase diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index 53fbd2dcb2c8..493b198ee21b 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -37,8 +37,7 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "secure-json-parse": "^2.7.0" + "@ai-sdk/provider": "2.0.0-canary.1" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/provider-utils/src/parse-json.ts b/packages/provider-utils/src/parse-json.ts index 8fb2035bb095..477ce79ca4e4 100644 --- a/packages/provider-utils/src/parse-json.ts +++ b/packages/provider-utils/src/parse-json.ts @@ -3,7 +3,7 @@ import { JSONValue, TypeValidationError, } from '@ai-sdk/provider'; -import SecureJSON from 'secure-json-parse'; +import { secureJsonParse } from './secure-json-parse'; import { ZodSchema } from 'zod'; import { safeValidateTypes, validateTypes } from './validate-types'; import { Validator } from './validator'; @@ -38,7 +38,7 @@ export function parseJSON({ schema?: ZodSchema | Validator; }): T { try { - const value = SecureJSON.parse(text); + const value = secureJsonParse(text); if (schema == null) { return value; @@ -91,7 +91,7 @@ export function safeParseJSON({ schema?: ZodSchema | Validator; }): ParseResult { try { - const value = SecureJSON.parse(text); + const value = secureJsonParse(text); if (schema == null) { return { success: true, value: value as T, rawValue: value }; @@ -114,7 +114,7 @@ export function safeParseJSON({ export function isParsableJson(input: string): boolean { try { - SecureJSON.parse(input); + secureJsonParse(input); return true; } catch { return false; diff --git a/packages/provider-utils/src/secure-json-parse.test.ts b/packages/provider-utils/src/secure-json-parse.test.ts new file mode 100644 index 000000000000..a69dde9ab2fa --- /dev/null +++ b/packages/provider-utils/src/secure-json-parse.test.ts @@ -0,0 +1,59 @@ +// Licensed under BSD-3-Clause (this file only) +// Code adapted from https://github.com/fastify/secure-json-parse/blob/783fcb1b5434709466759847cec974381939673a/test/index.test.js +// +// Copyright (c) Vercel, Inc. (https://vercel.com) +// Copyright (c) 2019 The Fastify Team +// Copyright (c) 2019, Sideway Inc, and project contributors +// All rights reserved. +// +// The complete list of contributors can be found at: +// - https://github.com/hapijs/bourne/graphs/contributors +// - https://github.com/fastify/secure-json-parse/graphs/contributors +// - https://github.com/vercel/ai/commits/main/packages/provider-utils/src/secure-parse-json.test.ts +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import { describe, it, expect } from 'vitest'; +import { secureJsonParse } from './secure-json-parse'; + +describe('secureJsonParse', () => { + it('parses object string', () => { + expect(secureJsonParse('{"a": 5, "b": 6}')).toStrictEqual( + JSON.parse('{"a": 5, "b": 6}'), + ); + }); + + it('parses null string', () => { + expect(secureJsonParse('null')).toStrictEqual(JSON.parse('null')); + }); + + it('parses 0 string', () => { + expect(secureJsonParse('0')).toStrictEqual(JSON.parse('0')); + }); + + it('parses string string', () => { + expect(secureJsonParse('"X"')).toStrictEqual(JSON.parse('"X"')); + }); + + it('errors on constructor property', () => { + const text = + '{ "a": 5, "b": 6, "constructor": { "x": 7 }, "c": { "d": 0, "e": "text", "__proto__": { "y": 8 }, "f": { "g": 2 } } }'; + + expect(() => secureJsonParse(text)).toThrow(SyntaxError); + }); + + it('errors on proto property', () => { + const text = + '{ "a": 5, "b": 6, "__proto__": { "x": 7 }, "c": { "d": 0, "e": "text", "__proto__": { "y": 8 }, "f": { "g": 2 } } }'; + + expect(() => secureJsonParse(text)).toThrow(SyntaxError); + }); +}); diff --git a/packages/provider-utils/src/secure-json-parse.ts b/packages/provider-utils/src/secure-json-parse.ts new file mode 100644 index 000000000000..04917bc503ed --- /dev/null +++ b/packages/provider-utils/src/secure-json-parse.ts @@ -0,0 +1,86 @@ +// Licensed under BSD-3-Clause (this file only) +// Code adapted from https://github.com/fastify/secure-json-parse/blob/783fcb1b5434709466759847cec974381939673a/index.js +// +// Copyright (c) Vercel, Inc. (https://vercel.com) +// Copyright (c) 2019 The Fastify Team +// Copyright (c) 2019, Sideway Inc, and project contributors +// All rights reserved. +// +// The complete list of contributors can be found at: +// - https://github.com/hapijs/bourne/graphs/contributors +// - https://github.com/fastify/secure-json-parse/graphs/contributors +// - https://github.com/vercel/ai/commits/main/packages/provider-utils/src/secure-parse-json.ts +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +const suspectProtoRx = /"__proto__"\s*:/; +const suspectConstructorRx = /"constructor"\s*:/; + +function _parse(text: string) { + // Parse normally + const obj = JSON.parse(text); + + // Ignore null and non-objects + if (obj === null || typeof obj !== 'object') { + return obj; + } + + if ( + suspectProtoRx.test(text) === false && + suspectConstructorRx.test(text) === false + ) { + return obj; + } + + // Scan result for proto keys + return filter(obj); +} + +function filter(obj: any) { + let next = [obj]; + + while (next.length) { + const nodes = next; + next = []; + + for (const node of nodes) { + if (Object.prototype.hasOwnProperty.call(node, '__proto__')) { + throw new SyntaxError('Object contains forbidden prototype property'); + } + + if ( + Object.prototype.hasOwnProperty.call(node, 'constructor') && + Object.prototype.hasOwnProperty.call(node.constructor, 'prototype') + ) { + throw new SyntaxError('Object contains forbidden prototype property'); + } + + for (const key in node) { + const value = node[key]; + if (value && typeof value === 'object') { + next.push(value); + } + } + } + } + return obj; +} + +export function secureJsonParse(text: string) { + // Performance optimization, see https://github.com/fastify/secure-json-parse/pull/90 + const { stackTraceLimit } = Error; + Error.stackTraceLimit = 0; + try { + return _parse(text); + } finally { + Error.stackTraceLimit = stackTraceLimit; + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f7e4963b26c0..24cd3635155c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1668,9 +1668,6 @@ importers: '@ai-sdk/provider': specifier: 2.0.0-canary.1 version: link:../provider - secure-json-parse: - specifier: ^2.7.0 - version: 2.7.0 devDependencies: '@types/node': specifier: 20.17.24 From 19b432f13af46cb916648be68c2c160cb559c8b2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 11:44:54 +0200 Subject: [PATCH 0057/1307] Version Packages (canary) (#5591) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .changeset/pre.json | 12 +- examples/ai-core/package.json | 46 +-- examples/express/package.json | 4 +- examples/fastify/package.json | 4 +- examples/hono/package.json | 4 +- examples/mcp/package.json | 4 +- examples/nest/package.json | 4 +- examples/next-fastapi/package.json | 6 +- examples/next-google-vertex/package.json | 4 +- examples/next-langchain/package.json | 4 +- .../package.json | 6 +- examples/next-openai-pages/package.json | 6 +- .../next-openai-telemetry-sentry/package.json | 6 +- examples/next-openai-telemetry/package.json | 6 +- .../package.json | 6 +- examples/next-openai/package.json | 22 +- examples/node-http-server/package.json | 4 +- examples/nuxt-openai/package.json | 6 +- examples/sveltekit-openai/package.json | 10 +- packages/ai/CHANGELOG.md | 20 ++ packages/ai/package.json | 8 +- packages/amazon-bedrock/CHANGELOG.md | 12 + packages/amazon-bedrock/package.json | 6 +- packages/anthropic/CHANGELOG.md | 13 + packages/anthropic/package.json | 6 +- packages/azure/CHANGELOG.md | 14 + packages/azure/package.json | 8 +- packages/cerebras/CHANGELOG.md | 13 + packages/cerebras/package.json | 8 +- packages/cohere/CHANGELOG.md | 12 + packages/cohere/package.json | 6 +- packages/deepinfra/CHANGELOG.md | 13 + packages/deepinfra/package.json | 8 +- packages/deepseek/CHANGELOG.md | 13 + packages/deepseek/package.json | 8 +- packages/fal/CHANGELOG.md | 12 + packages/fal/package.json | 6 +- packages/fireworks/CHANGELOG.md | 13 + packages/fireworks/package.json | 8 +- packages/google-vertex/CHANGELOG.md | 15 + packages/google-vertex/package.json | 10 +- packages/google/CHANGELOG.md | 12 + packages/google/package.json | 6 +- packages/groq/CHANGELOG.md | 12 + packages/groq/package.json | 6 +- packages/luma/CHANGELOG.md | 12 + packages/luma/package.json | 6 +- packages/mistral/CHANGELOG.md | 12 + packages/mistral/package.json | 6 +- packages/openai-compatible/CHANGELOG.md | 12 + packages/openai-compatible/package.json | 6 +- packages/openai/CHANGELOG.md | 15 + packages/openai/package.json | 6 +- packages/perplexity/CHANGELOG.md | 12 + packages/perplexity/package.json | 6 +- packages/provider-utils/CHANGELOG.md | 12 + packages/provider-utils/package.json | 4 +- packages/provider/CHANGELOG.md | 12 + packages/provider/package.json | 2 +- packages/react/CHANGELOG.md | 9 + packages/react/package.json | 6 +- packages/replicate/CHANGELOG.md | 12 + packages/replicate/package.json | 6 +- packages/rsc/CHANGELOG.md | 20 ++ packages/rsc/package.json | 8 +- .../rsc/tests/e2e/next-server/CHANGELOG.md | 10 + packages/svelte/CHANGELOG.md | 9 + packages/svelte/package.json | 6 +- packages/togetherai/CHANGELOG.md | 13 + packages/togetherai/package.json | 8 +- packages/ui-utils/CHANGELOG.md | 12 + packages/ui-utils/package.json | 6 +- packages/valibot/CHANGELOG.md | 10 + packages/valibot/package.json | 4 +- packages/vue/CHANGELOG.md | 9 + packages/vue/package.json | 6 +- packages/xai/CHANGELOG.md | 13 + packages/xai/package.json | 8 +- pnpm-lock.yaml | 282 +++++++++--------- 79 files changed, 700 insertions(+), 312 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index 9ab0a367ea93..532c03a0c762 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -53,23 +53,33 @@ "eslint-config-vercel-ai": "0.0.0", "@vercel/ai-tsconfig": "0.0.0", "analyze-downloads": "0.0.0", - "generate-llms-txt": "0.0.0" + "generate-llms-txt": "0.0.0", + "@ai-sdk/rsc": "1.0.0-canary.1" }, "changesets": [ + "afraid-moles-cross", "angry-poems-learn", "beige-ligers-kneel", "beige-penguins-greet", "clean-numbers-cover", "cuddly-icons-kick", "eleven-lobsters-rescue", + "fix-env-mutation", "flat-plums-bake", "green-deers-scream", + "happy-kangaroos-roll", "huge-cloths-burn", + "hungry-pets-hear", + "late-foxes-battle", + "nasty-spiders-sparkle", "pink-deers-switch", + "selfish-rice-own", "seven-pens-itch", "silent-nails-taste", + "smart-swans-drive", "smooth-mirrors-kneel", "tall-rice-flash", + "tender-buses-glow", "thick-chairs-remain", "thin-numbers-shave", "twelve-kids-travel", diff --git a/examples/ai-core/package.json b/examples/ai-core/package.json index f4c59a38ae8d..566e137a9945 100644 --- a/examples/ai-core/package.json +++ b/examples/ai-core/package.json @@ -3,33 +3,33 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/amazon-bedrock": "3.0.0-canary.2", - "@ai-sdk/anthropic": "2.0.0-canary.2", - "@ai-sdk/azure": "2.0.0-canary.3", - "@ai-sdk/cerebras": "1.0.0-canary.2", - "@ai-sdk/cohere": "2.0.0-canary.2", - "@ai-sdk/deepinfra": "1.0.0-canary.2", - "@ai-sdk/deepseek": "1.0.0-canary.2", - "@ai-sdk/fal": "1.0.0-canary.2", - "@ai-sdk/fireworks": "1.0.0-canary.2", - "@ai-sdk/google": "2.0.0-canary.3", - "@ai-sdk/google-vertex": "3.0.0-canary.3", - "@ai-sdk/groq": "2.0.0-canary.3", - "@ai-sdk/luma": "1.0.0-canary.2", - "@ai-sdk/mistral": "2.0.0-canary.2", - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/perplexity": "2.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/replicate": "1.0.0-canary.2", - "@ai-sdk/togetherai": "1.0.0-canary.2", - "@ai-sdk/xai": "2.0.0-canary.2", - "@ai-sdk/valibot": "1.0.0-canary.3", + "@ai-sdk/amazon-bedrock": "3.0.0-canary.3", + "@ai-sdk/anthropic": "2.0.0-canary.3", + "@ai-sdk/azure": "2.0.0-canary.4", + "@ai-sdk/cerebras": "1.0.0-canary.3", + "@ai-sdk/cohere": "2.0.0-canary.3", + "@ai-sdk/deepinfra": "1.0.0-canary.3", + "@ai-sdk/deepseek": "1.0.0-canary.3", + "@ai-sdk/fal": "1.0.0-canary.3", + "@ai-sdk/fireworks": "1.0.0-canary.3", + "@ai-sdk/google": "2.0.0-canary.4", + "@ai-sdk/google-vertex": "3.0.0-canary.4", + "@ai-sdk/groq": "2.0.0-canary.4", + "@ai-sdk/luma": "1.0.0-canary.3", + "@ai-sdk/mistral": "2.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/perplexity": "2.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/replicate": "1.0.0-canary.3", + "@ai-sdk/togetherai": "1.0.0-canary.3", + "@ai-sdk/xai": "2.0.0-canary.3", + "@ai-sdk/valibot": "1.0.0-canary.4", "@google/generative-ai": "0.21.0", "@opentelemetry/auto-instrumentations-node": "0.54.0", "@opentelemetry/sdk-node": "0.54.2", "@opentelemetry/sdk-trace-node": "1.28.0", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "dotenv": "16.4.5", "image-type": "^5.2.0", "mathjs": "14.0.0", diff --git a/examples/express/package.json b/examples/express/package.json index 48fef040055e..7b704236fdac 100644 --- a/examples/express/package.json +++ b/examples/express/package.json @@ -7,8 +7,8 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "ai": "5.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "ai": "5.0.0-canary.4", "dotenv": "16.4.5", "express": "5.0.1" }, diff --git a/examples/fastify/package.json b/examples/fastify/package.json index 8f06ce8bcb8f..01be6087731a 100644 --- a/examples/fastify/package.json +++ b/examples/fastify/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "ai": "5.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "ai": "5.0.0-canary.4", "dotenv": "16.4.5", "fastify": "5.1.0" }, diff --git a/examples/hono/package.json b/examples/hono/package.json index a1356a16e83d..befbc6776a94 100644 --- a/examples/hono/package.json +++ b/examples/hono/package.json @@ -3,9 +3,9 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", "@hono/node-server": "1.13.7", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "dotenv": "16.4.5", "hono": "4.6.9" }, diff --git a/examples/mcp/package.json b/examples/mcp/package.json index 941219e543ed..bbe1ccfd177c 100644 --- a/examples/mcp/package.json +++ b/examples/mcp/package.json @@ -12,9 +12,9 @@ "type-check": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", "@modelcontextprotocol/sdk": "^1.7.0", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "dotenv": "16.4.5", "express": "5.0.1", "zod": "3.23.8" diff --git a/examples/nest/package.json b/examples/nest/package.json index 858178b94529..5f154b3f4dfb 100644 --- a/examples/nest/package.json +++ b/examples/nest/package.json @@ -15,11 +15,11 @@ "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", "@nestjs/common": "^10.4.15", "@nestjs/core": "^10.0.0", "@nestjs/platform-express": "^10.4.9", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1" }, diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index fa74f66ab028..9bd664fb1b06 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -11,9 +11,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/ui-utils": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", - "ai": "5.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", + "ai": "5.0.0-canary.4", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-google-vertex/package.json b/examples/next-google-vertex/package.json index 945c0c039c60..7fc931f1fbe3 100644 --- a/examples/next-google-vertex/package.json +++ b/examples/next-google-vertex/package.json @@ -9,8 +9,8 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/google-vertex": "3.0.0-canary.3", - "ai": "5.0.0-canary.3", + "@ai-sdk/google-vertex": "3.0.0-canary.4", + "ai": "5.0.0-canary.4", "geist": "^1.3.1", "next": "latest", "react": "^18", diff --git a/examples/next-langchain/package.json b/examples/next-langchain/package.json index 9bb911adb485..4ab045b51be6 100644 --- a/examples/next-langchain/package.json +++ b/examples/next-langchain/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/react": "2.0.0-canary.2", + "@ai-sdk/react": "2.0.0-canary.3", "@langchain/openai": "0.0.28", "@langchain/core": "0.1.63", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "langchain": "0.1.36", "next": "latest", "react": "^18", diff --git a/examples/next-openai-kasada-bot-protection/package.json b/examples/next-openai-kasada-bot-protection/package.json index 35cfa435e1ff..66e06936cd5c 100644 --- a/examples/next-openai-kasada-bot-protection/package.json +++ b/examples/next-openai-kasada-bot-protection/package.json @@ -9,10 +9,10 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", "@vercel/functions": "latest", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai-pages/package.json b/examples/next-openai-pages/package.json index deb0b001b3eb..437c19261d0e 100644 --- a/examples/next-openai-pages/package.json +++ b/examples/next-openai-pages/package.json @@ -9,9 +9,9 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", - "ai": "5.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", + "ai": "5.0.0-canary.4", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry-sentry/package.json b/examples/next-openai-telemetry-sentry/package.json index e6f92690c929..ec55ebf2ce53 100644 --- a/examples/next-openai-telemetry-sentry/package.json +++ b/examples/next-openai-telemetry-sentry/package.json @@ -9,15 +9,15 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@opentelemetry/sdk-logs": "0.55.0", "@sentry/nextjs": "^8.42.0", "@sentry/opentelemetry": "8.22.0", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-telemetry/package.json b/examples/next-openai-telemetry/package.json index 5a90d8d61553..a9e69f97da17 100644 --- a/examples/next-openai-telemetry/package.json +++ b/examples/next-openai-telemetry/package.json @@ -9,13 +9,13 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", "@opentelemetry/api-logs": "0.55.0", "@opentelemetry/sdk-logs": "0.55.0", "@opentelemetry/instrumentation": "0.52.1", "@vercel/otel": "1.10.0", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/next-openai-upstash-rate-limits/package.json b/examples/next-openai-upstash-rate-limits/package.json index 4fbe2a3583f1..db8272777a8e 100644 --- a/examples/next-openai-upstash-rate-limits/package.json +++ b/examples/next-openai-upstash-rate-limits/package.json @@ -9,11 +9,11 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", "@upstash/ratelimit": "^0.4.3", "@vercel/kv": "^0.2.2", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "next": "latest", "react": "^18", "react-dom": "^18", diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 5feded50c027..6292d1abee6b 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -9,18 +9,18 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/anthropic": "2.0.0-canary.2", - "@ai-sdk/deepseek": "1.0.0-canary.2", - "@ai-sdk/fireworks": "1.0.0-canary.2", - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/google": "2.0.0-canary.3", - "@ai-sdk/google-vertex": "3.0.0-canary.3", - "@ai-sdk/perplexity": "2.0.0-canary.2", - "@ai-sdk/ui-utils": "2.0.0-canary.2", - "@ai-sdk/react": "2.0.0-canary.2", - "@ai-sdk/rsc": "1.0.0-canary.1", + "@ai-sdk/anthropic": "2.0.0-canary.3", + "@ai-sdk/deepseek": "1.0.0-canary.3", + "@ai-sdk/fireworks": "1.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/google": "2.0.0-canary.4", + "@ai-sdk/google-vertex": "3.0.0-canary.4", + "@ai-sdk/perplexity": "2.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3", + "@ai-sdk/react": "2.0.0-canary.3", + "@ai-sdk/rsc": "1.0.0-canary.2", "@vercel/blob": "^0.26.0", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "next": "latest", "openai": "4.52.6", "react": "^18", diff --git a/examples/node-http-server/package.json b/examples/node-http-server/package.json index dc1059c2b35b..54bc72f58692 100644 --- a/examples/node-http-server/package.json +++ b/examples/node-http-server/package.json @@ -3,8 +3,8 @@ "version": "0.0.0", "private": true, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "ai": "5.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "ai": "5.0.0-canary.4", "dotenv": "16.4.5", "zod": "3.23.8", "zod-to-json-schema": "3.23.5" diff --git a/examples/nuxt-openai/package.json b/examples/nuxt-openai/package.json index ebfca76cd68c..97df84724bba 100644 --- a/examples/nuxt-openai/package.json +++ b/examples/nuxt-openai/package.json @@ -9,9 +9,9 @@ "postinstall": "nuxt prepare" }, "dependencies": { - "@ai-sdk/vue": "2.0.0-canary.2", - "@ai-sdk/openai": "2.0.0-canary.2", - "ai": "5.0.0-canary.3", + "@ai-sdk/vue": "2.0.0-canary.3", + "@ai-sdk/openai": "2.0.0-canary.3", + "ai": "5.0.0-canary.4", "zod": "3.23.8" }, "devDependencies": { diff --git a/examples/sveltekit-openai/package.json b/examples/sveltekit-openai/package.json index ea52dc39a68a..0c07734d8d4b 100644 --- a/examples/sveltekit-openai/package.json +++ b/examples/sveltekit-openai/package.json @@ -16,16 +16,16 @@ }, "type": "module", "devDependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/provider-utils": "3.0.0-canary.2", - "@ai-sdk/svelte": "3.0.0-canary.2", - "@ai-sdk/ui-utils": "2.0.0-canary.2", + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/provider-utils": "3.0.0-canary.3", + "@ai-sdk/svelte": "3.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3", "@eslint/compat": "^1.2.5", "@eslint/js": "^9.18.0", "@sveltejs/adapter-vercel": "^5.5.2", "@sveltejs/kit": "^2.16.0", "@sveltejs/vite-plugin-svelte": "^5.0.0", - "ai": "5.0.0-canary.3", + "ai": "5.0.0-canary.4", "autoprefixer": "^10.4.20", "bits-ui": "^1.3.9", "clsx": "^2.1.1", diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 5a2b43e3eb6b..be9583ccc7d7 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -1,5 +1,25 @@ # ai +## 5.0.0-canary.4 + +### Major Changes + +- e1cbf8a: chore(@ai-sdk/rsc): extract to separate package + +### Patch Changes + +- 225f087: fix (ai/mcp): prevent mutation of customEnv +- a166433: feat: add transcription with experimental_transcribe +- 0a87932: core (ai): change transcription model mimeType to mediaType +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/ui-utils@2.0.0-canary.3 + ## 5.0.0-canary.3 ### Patch Changes diff --git a/packages/ai/package.json b/packages/ai/package.json index 96a428635187..3a67a636dd74 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "ai", - "version": "5.0.0-canary.3", + "version": "5.0.0-canary.4", "description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript", "license": "Apache-2.0", "sideEffects": false, @@ -55,9 +55,9 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2", - "@ai-sdk/ui-utils": "2.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3", "@opentelemetry/api": "1.9.0", "jsondiffpatch": "0.6.0" }, diff --git a/packages/amazon-bedrock/CHANGELOG.md b/packages/amazon-bedrock/CHANGELOG.md index 0c3189462f18..8b0e6789613b 100644 --- a/packages/amazon-bedrock/CHANGELOG.md +++ b/packages/amazon-bedrock/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/amazon-bedrock +## 3.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 3.0.0-canary.2 ### Patch Changes diff --git a/packages/amazon-bedrock/package.json b/packages/amazon-bedrock/package.json index a9b496e4fb56..002b3f74f0a1 100644 --- a/packages/amazon-bedrock/package.json +++ b/packages/amazon-bedrock/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/amazon-bedrock", - "version": "3.0.0-canary.2", + "version": "3.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" diff --git a/packages/anthropic/CHANGELOG.md b/packages/anthropic/CHANGELOG.md index 2b1ed92d9c57..08bfcb6af6d6 100644 --- a/packages/anthropic/CHANGELOG.md +++ b/packages/anthropic/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/anthropic +## 2.0.0-canary.3 + +### Patch Changes + +- 61ab528: Add support for URL-based PDF documents in the Anthropic provider +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/anthropic/package.json b/packages/anthropic/package.json index 199f541d4b3b..d51febeb2890 100644 --- a/packages/anthropic/package.json +++ b/packages/anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/anthropic", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,8 +37,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index cddca167afe6..e696a14beed2 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,19 @@ # @ai-sdk/azure +## 2.0.0-canary.4 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] +- Updated dependencies [0a87932] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai@2.0.0-canary.3 + ## 2.0.0-canary.3 ### Patch Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index bcaaa7f5e6c1..8780147b7209 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "2.0.0-canary.3", + "version": "2.0.0-canary.4", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,9 +31,9 @@ } }, "dependencies": { - "@ai-sdk/openai": "2.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai": "2.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cerebras/CHANGELOG.md b/packages/cerebras/CHANGELOG.md index 9136348e4c5a..56219b8c4634 100644 --- a/packages/cerebras/CHANGELOG.md +++ b/packages/cerebras/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/cerebras +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai-compatible@1.0.0-canary.3 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/cerebras/package.json b/packages/cerebras/package.json index 23d07aebae92..3d5d42c0617b 100644 --- a/packages/cerebras/package.json +++ b/packages/cerebras/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cerebras", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/cohere/CHANGELOG.md b/packages/cohere/CHANGELOG.md index b95c3dcc4396..7a29eb2cb6da 100644 --- a/packages/cohere/CHANGELOG.md +++ b/packages/cohere/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/cohere +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/cohere/package.json b/packages/cohere/package.json index ddebede1e18c..4f8d0b6d055a 100644 --- a/packages/cohere/package.json +++ b/packages/cohere/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cohere", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepinfra/CHANGELOG.md b/packages/deepinfra/CHANGELOG.md index 27ad85010af8..19e3335004e7 100644 --- a/packages/deepinfra/CHANGELOG.md +++ b/packages/deepinfra/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/deepinfra +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai-compatible@1.0.0-canary.3 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/deepinfra/package.json b/packages/deepinfra/package.json index 82c5562e5cd3..b2fb3323132f 100644 --- a/packages/deepinfra/package.json +++ b/packages/deepinfra/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepinfra", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/deepseek/CHANGELOG.md b/packages/deepseek/CHANGELOG.md index 6ec3ad950894..1fcdfe3d1f62 100644 --- a/packages/deepseek/CHANGELOG.md +++ b/packages/deepseek/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/deepseek +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai-compatible@1.0.0-canary.3 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/deepseek/package.json b/packages/deepseek/package.json index 284c9c8573b8..da8d59851763 100644 --- a/packages/deepseek/package.json +++ b/packages/deepseek/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/deepseek", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fal/CHANGELOG.md b/packages/fal/CHANGELOG.md index 5631b24c2b15..25c359306f51 100644 --- a/packages/fal/CHANGELOG.md +++ b/packages/fal/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/fal +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/fal/package.json b/packages/fal/package.json index 69f8f46b0fe2..09904a5d9ce5 100644 --- a/packages/fal/package.json +++ b/packages/fal/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fal", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/fireworks/CHANGELOG.md b/packages/fireworks/CHANGELOG.md index a8528ebe09db..866404f2e867 100644 --- a/packages/fireworks/CHANGELOG.md +++ b/packages/fireworks/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/fireworks +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai-compatible@1.0.0-canary.3 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/fireworks/package.json b/packages/fireworks/package.json index 4ca5c76e47b8..fba370399738 100644 --- a/packages/fireworks/package.json +++ b/packages/fireworks/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/fireworks", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/google-vertex/CHANGELOG.md b/packages/google-vertex/CHANGELOG.md index 01a571f6834f..4e7dbd52cd9a 100644 --- a/packages/google-vertex/CHANGELOG.md +++ b/packages/google-vertex/CHANGELOG.md @@ -1,5 +1,20 @@ # @ai-sdk/google-vertex +## 3.0.0-canary.4 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] +- Updated dependencies [61ab528] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/anthropic@2.0.0-canary.3 + - @ai-sdk/google@2.0.0-canary.4 + ## 3.0.0-canary.3 ### Patch Changes diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index 268c001d7bbf..213733393714 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google-vertex", - "version": "3.0.0-canary.3", + "version": "3.0.0-canary.4", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -49,10 +49,10 @@ } }, "dependencies": { - "@ai-sdk/anthropic": "2.0.0-canary.2", - "@ai-sdk/google": "2.0.0-canary.3", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/anthropic": "2.0.0-canary.3", + "@ai-sdk/google": "2.0.0-canary.4", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", "google-auth-library": "^9.15.0" }, "devDependencies": { diff --git a/packages/google/CHANGELOG.md b/packages/google/CHANGELOG.md index 902bbf056b3b..20f7f402c4ef 100644 --- a/packages/google/CHANGELOG.md +++ b/packages/google/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/google +## 2.0.0-canary.4 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.3 ### Patch Changes diff --git a/packages/google/package.json b/packages/google/package.json index 443f0eb97e5c..d1dff37712b6 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google", - "version": "2.0.0-canary.3", + "version": "2.0.0-canary.4", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/groq/CHANGELOG.md b/packages/groq/CHANGELOG.md index efc8dc247844..5949afebd8a1 100644 --- a/packages/groq/CHANGELOG.md +++ b/packages/groq/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/groq +## 2.0.0-canary.4 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.3 ### Patch Changes diff --git a/packages/groq/package.json b/packages/groq/package.json index d8dc0148190d..3eac89ab010e 100644 --- a/packages/groq/package.json +++ b/packages/groq/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/groq", - "version": "2.0.0-canary.3", + "version": "2.0.0-canary.4", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/luma/CHANGELOG.md b/packages/luma/CHANGELOG.md index 70a3f5c2df27..386d89141380 100644 --- a/packages/luma/CHANGELOG.md +++ b/packages/luma/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/luma +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/luma/package.json b/packages/luma/package.json index 7392b8209004..8369a06c2d6c 100644 --- a/packages/luma/package.json +++ b/packages/luma/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/luma", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/mistral/CHANGELOG.md b/packages/mistral/CHANGELOG.md index 023782c0a67c..534e3cdb50d6 100644 --- a/packages/mistral/CHANGELOG.md +++ b/packages/mistral/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/mistral +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/mistral/package.json b/packages/mistral/package.json index 85a2726d6cf2..57586933f260 100644 --- a/packages/mistral/package.json +++ b/packages/mistral/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/mistral", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai-compatible/CHANGELOG.md b/packages/openai-compatible/CHANGELOG.md index b911e6fc791b..32a690ad474f 100644 --- a/packages/openai-compatible/CHANGELOG.md +++ b/packages/openai-compatible/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/openai-compatible +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/openai-compatible/package.json b/packages/openai-compatible/package.json index 8d412f348986..e9a7744cf569 100644 --- a/packages/openai-compatible/package.json +++ b/packages/openai-compatible/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai-compatible", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/openai/CHANGELOG.md b/packages/openai/CHANGELOG.md index 5954dacabeb0..2d82800e3daf 100644 --- a/packages/openai/CHANGELOG.md +++ b/packages/openai/CHANGELOG.md @@ -1,5 +1,20 @@ # @ai-sdk/openai +## 2.0.0-canary.3 + +### Patch Changes + +- a166433: feat: add transcription with experimental_transcribe +- 0a87932: core (ai): change transcription model mimeType to mediaType +- 0a87932: fix (provider/openai): increase transcription model resilience +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/openai/package.json b/packages/openai/package.json index 1b08d27f63a0..454f9d4e6809 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -38,8 +38,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/perplexity/CHANGELOG.md b/packages/perplexity/CHANGELOG.md index d21a577dc0e6..25f6a45d9545 100644 --- a/packages/perplexity/CHANGELOG.md +++ b/packages/perplexity/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/perplexity +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/perplexity/package.json b/packages/perplexity/package.json index d19e58753996..660c0b8111c2 100644 --- a/packages/perplexity/package.json +++ b/packages/perplexity/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/perplexity", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/provider-utils/CHANGELOG.md b/packages/provider-utils/CHANGELOG.md index 4759f5e441ab..5a9a1dcfacd3 100644 --- a/packages/provider-utils/CHANGELOG.md +++ b/packages/provider-utils/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/provider-utils +## 3.0.0-canary.3 + +### Patch Changes + +- a166433: feat: add transcription with experimental_transcribe +- 9f95b35: refactor (provider-utils): copy relevant code from `secure-json-parse` into codebase +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider@2.0.0-canary.2 + ## 3.0.0-canary.2 ### Patch Changes diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index 493b198ee21b..186ac57c28c1 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider-utils", - "version": "3.0.0-canary.2", + "version": "3.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,7 +37,7 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1" + "@ai-sdk/provider": "2.0.0-canary.2" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/provider/CHANGELOG.md b/packages/provider/CHANGELOG.md index 1644ce63e9ab..51a8d85c2e6d 100644 --- a/packages/provider/CHANGELOG.md +++ b/packages/provider/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/provider +## 2.0.0-canary.2 + +### Major Changes + +- abf9a79: chore: rename mimeType to mediaType +- 6dc848c: chore (provider): remove image parts + +### Patch Changes + +- a166433: feat: add transcription with experimental_transcribe +- 0a87932: core (ai): change transcription model mimeType to mediaType + ## 2.0.0-canary.1 ### Major Changes diff --git a/packages/provider/package.json b/packages/provider/package.json index e305411faca3..b4690ac1cabd 100644 --- a/packages/provider/package.json +++ b/packages/provider/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider", - "version": "2.0.0-canary.1", + "version": "2.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/react/CHANGELOG.md b/packages/react/CHANGELOG.md index 93beba822479..568d4e136f93 100644 --- a/packages/react/CHANGELOG.md +++ b/packages/react/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/react +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [9f95b35] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/ui-utils@2.0.0-canary.3 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/react/package.json b/packages/react/package.json index 77e390f77e7d..581915f14723 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/react", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.2", - "@ai-sdk/ui-utils": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3", "swr": "^2.2.5", "throttleit": "2.1.0" }, diff --git a/packages/replicate/CHANGELOG.md b/packages/replicate/CHANGELOG.md index 63661c7dbf22..1349fe8b6690 100644 --- a/packages/replicate/CHANGELOG.md +++ b/packages/replicate/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/replicate +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/replicate/package.json b/packages/replicate/package.json index 47af0b8ea9a5..6d97f4c3a16b 100644 --- a/packages/replicate/package.json +++ b/packages/replicate/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/replicate", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/rsc/CHANGELOG.md b/packages/rsc/CHANGELOG.md index e69de29bb2d1..44ee35f6f832 100644 --- a/packages/rsc/CHANGELOG.md +++ b/packages/rsc/CHANGELOG.md @@ -0,0 +1,20 @@ +# @ai-sdk/rsc + +## 1.0.0-canary.2 + +### Major Changes + +- e1cbf8a: chore(@ai-sdk/rsc): extract to separate package + +### Patch Changes + +- Updated dependencies [e1cbf8a] +- Updated dependencies [225f087] +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - ai@5.0.0-canary.4 + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 diff --git a/packages/rsc/package.json b/packages/rsc/package.json index 75df8e7e513b..5f5bbebdc403 100644 --- a/packages/rsc/package.json +++ b/packages/rsc/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/rsc", - "version": "1.0.0-canary.1", + "version": "1.0.0-canary.2", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/rsc-client.mjs", @@ -38,9 +38,9 @@ "CHANGELOG.md" ], "dependencies": { - "ai": "5.0.0-canary.3", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2", + "ai": "5.0.0-canary.4", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", "jsondiffpatch": "0.6.0" }, "devDependencies": { diff --git a/packages/rsc/tests/e2e/next-server/CHANGELOG.md b/packages/rsc/tests/e2e/next-server/CHANGELOG.md index 66a4fc665081..294ec78febc0 100644 --- a/packages/rsc/tests/e2e/next-server/CHANGELOG.md +++ b/packages/rsc/tests/e2e/next-server/CHANGELOG.md @@ -4,6 +4,16 @@ ### Patch Changes +- Updated dependencies [e1cbf8a] +- Updated dependencies [225f087] +- Updated dependencies [a166433] +- Updated dependencies [0a87932] + - ai@5.0.0-canary.4 + +## 0.0.1-canary.0 + +### Patch Changes + - ai@5.0.0-canary.3 ## 0.0.1-canary.0 diff --git a/packages/svelte/CHANGELOG.md b/packages/svelte/CHANGELOG.md index 6577d86f0cbf..58ec995e17d9 100644 --- a/packages/svelte/CHANGELOG.md +++ b/packages/svelte/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/svelte +## 3.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [9f95b35] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/ui-utils@2.0.0-canary.3 + ## 3.0.0-canary.2 ### Patch Changes diff --git a/packages/svelte/package.json b/packages/svelte/package.json index 262cd6666678..2f3a17220f19 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/svelte", - "version": "3.0.0-canary.2", + "version": "3.0.0-canary.3", "license": "Apache-2.0", "scripts": { "build": "pnpm prepack", @@ -51,8 +51,8 @@ } }, "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.2", - "@ai-sdk/ui-utils": "2.0.0-canary.2" + "@ai-sdk/provider-utils": "3.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3" }, "devDependencies": { "@eslint/compat": "^1.2.5", diff --git a/packages/togetherai/CHANGELOG.md b/packages/togetherai/CHANGELOG.md index 741563541183..6dbb5022b916 100644 --- a/packages/togetherai/CHANGELOG.md +++ b/packages/togetherai/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/togetherai +## 1.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai-compatible@1.0.0-canary.3 + ## 1.0.0-canary.2 ### Patch Changes diff --git a/packages/togetherai/package.json b/packages/togetherai/package.json index ff3b81dc508b..ffe07c8c853a 100644 --- a/packages/togetherai/package.json +++ b/packages/togetherai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/togetherai", - "version": "1.0.0-canary.2", + "version": "1.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/ui-utils/CHANGELOG.md b/packages/ui-utils/CHANGELOG.md index 050c9c4da5e5..f9fd305e005f 100644 --- a/packages/ui-utils/CHANGELOG.md +++ b/packages/ui-utils/CHANGELOG.md @@ -1,5 +1,17 @@ # @ai-sdk/ui-utils +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index 9f3aa26e484e..9f96c4278edf 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/ui-utils", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -31,8 +31,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", "zod-to-json-schema": "^3.24.1" }, "devDependencies": { diff --git a/packages/valibot/CHANGELOG.md b/packages/valibot/CHANGELOG.md index ebf897a183e1..a22ed3bbed1e 100644 --- a/packages/valibot/CHANGELOG.md +++ b/packages/valibot/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/valibot +## 1.0.0-canary.4 + +### Patch Changes + +- Updated dependencies [e1cbf8a] +- Updated dependencies [225f087] +- Updated dependencies [a166433] +- Updated dependencies [0a87932] + - ai@5.0.0-canary.4 + ## 1.0.0-canary.3 ### Patch Changes diff --git a/packages/valibot/package.json b/packages/valibot/package.json index 3ab7957a42b3..6a9e148fd891 100644 --- a/packages/valibot/package.json +++ b/packages/valibot/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/valibot", - "version": "1.0.0-canary.3", + "version": "1.0.0-canary.4", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -27,7 +27,7 @@ } }, "dependencies": { - "ai": "5.0.0-canary.3" + "ai": "5.0.0-canary.4" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/packages/vue/CHANGELOG.md b/packages/vue/CHANGELOG.md index 9b7adeaa39d9..12dd0c08cef2 100644 --- a/packages/vue/CHANGELOG.md +++ b/packages/vue/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/vue +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [9f95b35] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/ui-utils@2.0.0-canary.3 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/vue/package.json b/packages/vue/package.json index 44c9a57331e2..b4afdedb16ae 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/vue", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.2", - "@ai-sdk/ui-utils": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3", + "@ai-sdk/ui-utils": "2.0.0-canary.3", "swrv": "^1.0.4" }, "devDependencies": { diff --git a/packages/xai/CHANGELOG.md b/packages/xai/CHANGELOG.md index fca755a5d9cb..b792c027df24 100644 --- a/packages/xai/CHANGELOG.md +++ b/packages/xai/CHANGELOG.md @@ -1,5 +1,18 @@ # @ai-sdk/xai +## 2.0.0-canary.3 + +### Patch Changes + +- Updated dependencies [a166433] +- Updated dependencies [abf9a79] +- Updated dependencies [9f95b35] +- Updated dependencies [0a87932] +- Updated dependencies [6dc848c] + - @ai-sdk/provider-utils@3.0.0-canary.3 + - @ai-sdk/provider@2.0.0-canary.2 + - @ai-sdk/openai-compatible@1.0.0-canary.3 + ## 2.0.0-canary.2 ### Patch Changes diff --git a/packages/xai/package.json b/packages/xai/package.json index 54968246730b..1eeaa3252861 100644 --- a/packages/xai/package.json +++ b/packages/xai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/xai", - "version": "2.0.0-canary.2", + "version": "2.0.0-canary.3", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai-compatible": "1.0.0-canary.2", - "@ai-sdk/provider": "2.0.0-canary.1", - "@ai-sdk/provider-utils": "3.0.0-canary.2" + "@ai-sdk/openai-compatible": "1.0.0-canary.3", + "@ai-sdk/provider": "2.0.0-canary.2", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@types/node": "20.17.24", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 24cd3635155c..9bf5b5a1f508 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,70 +60,70 @@ importers: examples/ai-core: dependencies: '@ai-sdk/amazon-bedrock': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../../packages/amazon-bedrock '@ai-sdk/anthropic': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/anthropic '@ai-sdk/azure': - specifier: 2.0.0-canary.3 + specifier: 2.0.0-canary.4 version: link:../../packages/azure '@ai-sdk/cerebras': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/cerebras '@ai-sdk/cohere': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/cohere '@ai-sdk/deepinfra': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/deepinfra '@ai-sdk/deepseek': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/deepseek '@ai-sdk/fal': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/fal '@ai-sdk/fireworks': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.3 + specifier: 2.0.0-canary.4 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.3 + specifier: 3.0.0-canary.4 version: link:../../packages/google-vertex '@ai-sdk/groq': - specifier: 2.0.0-canary.3 + specifier: 2.0.0-canary.4 version: link:../../packages/groq '@ai-sdk/luma': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/luma '@ai-sdk/mistral': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/mistral '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/openai-compatible '@ai-sdk/perplexity': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/perplexity '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../../packages/provider '@ai-sdk/replicate': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/replicate '@ai-sdk/togetherai': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/togetherai '@ai-sdk/valibot': - specifier: 1.0.0-canary.3 + specifier: 1.0.0-canary.4 version: link:../../packages/valibot '@ai-sdk/xai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/xai '@google/generative-ai': specifier: 0.21.0 @@ -138,7 +138,7 @@ importers: specifier: 1.28.0 version: 1.28.0(@opentelemetry/api@1.9.0) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -178,10 +178,10 @@ importers: examples/express: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -206,10 +206,10 @@ importers: examples/fastify: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -231,13 +231,13 @@ importers: examples/hono: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@hono/node-server': specifier: 1.13.7 version: 1.13.7(hono@4.6.9) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -259,13 +259,13 @@ importers: examples/mcp: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@modelcontextprotocol/sdk': specifier: ^1.7.0 version: 1.7.0 ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -293,7 +293,7 @@ importers: examples/nest: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@nestjs/common': specifier: ^10.4.15 @@ -305,7 +305,7 @@ importers: specifier: ^10.4.9 version: 10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai reflect-metadata: specifier: ^0.2.0 @@ -381,13 +381,13 @@ importers: examples/next-fastapi: dependencies: '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/ui-utils ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -436,10 +436,10 @@ importers: examples/next-google-vertex: dependencies: '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.3 + specifier: 3.0.0-canary.4 version: link:../../packages/google-vertex ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai geist: specifier: ^1.3.1 @@ -479,7 +479,7 @@ importers: examples/next-langchain: dependencies: '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@langchain/core': specifier: 0.1.63 @@ -488,7 +488,7 @@ importers: specifier: 0.0.28 version: 0.0.28 ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai langchain: specifier: 0.1.36 @@ -534,40 +534,40 @@ importers: examples/next-openai: dependencies: '@ai-sdk/anthropic': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/anthropic '@ai-sdk/deepseek': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/deepseek '@ai-sdk/fireworks': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../../packages/fireworks '@ai-sdk/google': - specifier: 2.0.0-canary.3 + specifier: 2.0.0-canary.4 version: link:../../packages/google '@ai-sdk/google-vertex': - specifier: 3.0.0-canary.3 + specifier: 3.0.0-canary.4 version: link:../../packages/google-vertex '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/perplexity': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/perplexity '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@ai-sdk/rsc': - specifier: 1.0.0-canary.1 + specifier: 1.0.0-canary.2 version: link:../../packages/rsc '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/ui-utils '@vercel/blob': specifier: ^0.26.0 version: 0.26.0 ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai next: specifier: latest @@ -619,16 +619,16 @@ importers: examples/next-openai-kasada-bot-protection: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@vercel/functions': specifier: latest version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0)) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai next: specifier: latest @@ -674,13 +674,13 @@ importers: examples/next-openai-pages: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai next: specifier: latest @@ -729,10 +729,10 @@ importers: examples/next-openai-telemetry: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -747,7 +747,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.29.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai next: specifier: latest @@ -796,10 +796,10 @@ importers: examples/next-openai-telemetry-sentry: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@opentelemetry/api-logs': specifier: 0.55.0 @@ -820,7 +820,7 @@ importers: specifier: 1.10.0 version: 1.10.0(@opentelemetry/api-logs@0.55.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.55.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.29.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.28.0(@opentelemetry/api@1.9.0)) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai next: specifier: latest @@ -869,10 +869,10 @@ importers: examples/next-openai-upstash-rate-limits: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/react': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/react '@upstash/ratelimit': specifier: ^0.4.3 @@ -881,7 +881,7 @@ importers: specifier: ^0.2.2 version: 0.2.4 ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai next: specifier: latest @@ -927,10 +927,10 @@ importers: examples/node-http-server: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai dotenv: specifier: 16.4.5 @@ -955,13 +955,13 @@ importers: examples/nuxt-openai: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/vue': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/vue ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai zod: specifier: 3.23.8 @@ -1010,16 +1010,16 @@ importers: examples/sveltekit-openai: devDependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/openai '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../../packages/provider-utils '@ai-sdk/svelte': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../../packages/svelte '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../../packages/ui-utils '@eslint/compat': specifier: ^1.2.5 @@ -1037,7 +1037,7 @@ importers: specifier: ^5.0.0 version: 5.0.3(svelte@5.22.4)(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../../packages/ai autoprefixer: specifier: ^10.4.20 @@ -1094,13 +1094,13 @@ importers: packages/ai: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../ui-utils '@opentelemetry/api': specifier: 1.9.0 @@ -1137,10 +1137,10 @@ importers: packages/amazon-bedrock: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils '@smithy/eventstream-codec': specifier: ^4.0.1 @@ -1171,10 +1171,10 @@ importers: packages/anthropic: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1196,13 +1196,13 @@ importers: packages/azure: dependencies: '@ai-sdk/openai': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../openai '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1224,13 +1224,13 @@ importers: packages/cerebras: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1301,10 +1301,10 @@ importers: packages/cohere: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1326,13 +1326,13 @@ importers: packages/deepinfra: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1354,13 +1354,13 @@ importers: packages/deepseek: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1382,10 +1382,10 @@ importers: packages/fal: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1407,13 +1407,13 @@ importers: packages/fireworks: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1435,10 +1435,10 @@ importers: packages/google: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1460,16 +1460,16 @@ importers: packages/google-vertex: dependencies: '@ai-sdk/anthropic': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../anthropic '@ai-sdk/google': - specifier: 2.0.0-canary.3 + specifier: 2.0.0-canary.4 version: link:../google '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils google-auth-library: specifier: ^9.15.0 @@ -1494,10 +1494,10 @@ importers: packages/groq: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1519,10 +1519,10 @@ importers: packages/luma: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1544,10 +1544,10 @@ importers: packages/mistral: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1569,10 +1569,10 @@ importers: packages/openai: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1594,10 +1594,10 @@ importers: packages/openai-compatible: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1619,10 +1619,10 @@ importers: packages/perplexity: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1666,7 +1666,7 @@ importers: packages/provider-utils: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider devDependencies: '@types/node': @@ -1691,10 +1691,10 @@ importers: packages/react: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../ui-utils react: specifier: ^18 || ^19 || ^19.0.0-rc @@ -1755,10 +1755,10 @@ importers: packages/replicate: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1780,13 +1780,13 @@ importers: packages/rsc: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../ai jsondiffpatch: specifier: 0.6.0 @@ -1865,10 +1865,10 @@ importers: packages/svelte: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../ui-utils devDependencies: '@eslint/compat': @@ -1929,13 +1929,13 @@ importers: packages/togetherai: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': @@ -1957,10 +1957,10 @@ importers: packages/ui-utils: dependencies: '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils zod-to-json-schema: specifier: ^3.24.1 @@ -1994,7 +1994,7 @@ importers: specifier: ^1.0.0-rc.0 || ^1.0.0 version: 1.0.0-rc.0(valibot@1.0.0-rc.0(typescript@5.8.3)) ai: - specifier: 5.0.0-canary.3 + specifier: 5.0.0-canary.4 version: link:../ai devDependencies: '@types/node': @@ -2016,10 +2016,10 @@ importers: packages/vue: dependencies: '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 2.0.0-canary.2 + specifier: 2.0.0-canary.3 version: link:../ui-utils swrv: specifier: ^1.0.4 @@ -2071,13 +2071,13 @@ importers: packages/xai: dependencies: '@ai-sdk/openai-compatible': - specifier: 1.0.0-canary.2 + specifier: 1.0.0-canary.3 version: link:../openai-compatible '@ai-sdk/provider': - specifier: 2.0.0-canary.1 + specifier: 2.0.0-canary.2 version: link:../provider '@ai-sdk/provider-utils': - specifier: 3.0.0-canary.2 + specifier: 3.0.0-canary.3 version: link:../provider-utils devDependencies: '@types/node': From b71fe8d21159d97233a5d92d5304ce0ad68531af Mon Sep 17 00:00:00 2001 From: Sam Denty Date: Wed, 9 Apr 2025 11:34:12 +0100 Subject: [PATCH 0058/1307] fix(ai): remove jsondiffpatch dependency (#5623) --- .changeset/lemon-terms-hug.md | 5 +++++ packages/ai/package.json | 3 +-- pnpm-lock.yaml | 3 --- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 .changeset/lemon-terms-hug.md diff --git a/.changeset/lemon-terms-hug.md b/.changeset/lemon-terms-hug.md new file mode 100644 index 000000000000..3585ea8b54d9 --- /dev/null +++ b/.changeset/lemon-terms-hug.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix(ai): remove jsondiffpatch dependency diff --git a/packages/ai/package.json b/packages/ai/package.json index 3a67a636dd74..ab43a193255d 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -58,8 +58,7 @@ "@ai-sdk/provider": "2.0.0-canary.2", "@ai-sdk/provider-utils": "3.0.0-canary.3", "@ai-sdk/ui-utils": "2.0.0-canary.3", - "@opentelemetry/api": "1.9.0", - "jsondiffpatch": "0.6.0" + "@opentelemetry/api": "1.9.0" }, "devDependencies": { "@edge-runtime/vm": "^5.0.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9bf5b5a1f508..efdae054d906 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1105,9 +1105,6 @@ importers: '@opentelemetry/api': specifier: 1.9.0 version: 1.9.0 - jsondiffpatch: - specifier: 0.6.0 - version: 0.6.0 devDependencies: '@edge-runtime/vm': specifier: ^5.0.0 From d91b50d90a91277f5f9711988b45a3cbd533ccd2 Mon Sep 17 00:00:00 2001 From: Sam Denty Date: Wed, 9 Apr 2025 12:30:24 +0100 Subject: [PATCH 0059/1307] chore(ui-utils): merge into ai package (#5624) --- .changeset/cuddly-icons-kick.md | 1 - .changeset/green-deers-scream.md | 1 - .changeset/pre.json | 1 - .changeset/seven-pens-itch.md | 1 - .changeset/tiny-deers-kick.md | 5 + .../cookbook/01-next/75-human-in-the-loop.mdx | 3 +- content/docs/04-ai-sdk-ui/02-chatbot.mdx | 2 +- .../(examples)/03-chat-attachments/page.tsx | 2 +- examples/next-fastapi/package.json | 1 - .../api/use-chat-human-in-the-loop/utils.ts | 3 +- .../app/use-chat-attachments-append/page.tsx | 2 +- .../app/use-chat-attachments-url/page.tsx | 2 +- .../app/use-chat-attachments/page.tsx | 2 +- examples/next-openai/package.json | 1 - examples/sveltekit-openai/package.json | 1 - .../create-data-stream-response.test.ts | 2 +- .../data-stream/create-data-stream.test.ts | 2 +- .../ai/core/data-stream/create-data-stream.ts | 2 +- .../ai/core/data-stream/data-stream-writer.ts | 2 +- .../pipe-data-stream-to-response.test.ts | 2 +- .../generate-object/generate-object.test.ts | 2 +- .../core/generate-object/generate-object.ts | 2 +- .../core/generate-object/output-strategy.ts | 2 +- .../generate-object/stream-object.test.ts | 2 +- .../ai/core/generate-object/stream-object.ts | 2 +- .../validate-object-generation-input.ts | 2 +- .../core/generate-text/generate-text.test.ts | 2 +- packages/ai/core/generate-text/output.ts | 7 +- .../ai/core/generate-text/parse-tool-call.ts | 2 +- .../generate-text/run-tools-transformation.ts | 2 +- .../ai/core/generate-text/stream-text.test.ts | 2 +- packages/ai/core/generate-text/stream-text.ts | 2 +- packages/ai/core/index.ts | 24 +- .../core/prompt/append-client-message.test.ts | 2 +- .../ai/core/prompt/append-client-message.ts | 2 +- .../core/prompt/append-response-messages.ts | 4 +- .../ai/core/prompt/attachments-to-parts.ts | 2 +- .../prompt/convert-to-core-messages.test.ts | 2 +- .../core/prompt/convert-to-core-messages.ts | 2 +- .../ai/core/prompt/detect-prompt-type.test.ts | 2 +- .../core/prompt/message-conversion-error.ts | 2 +- .../prompt/prepare-tools-and-tool-choice.ts | 2 +- packages/ai/core/prompt/prompt.ts | 2 +- packages/ai/core/prompt/standardize-prompt.ts | 2 +- packages/ai/core/tool/mcp/mcp-client.ts | 2 +- packages/ai/core/tool/tool.ts | 2 +- .../src => ai/core/types}/duplicated/usage.ts | 0 packages/ai/core/types/index.ts | 2 + .../types.ts => ai/core/types/messages.ts} | 0 .../process-chat-response.test.ts.snap | 0 .../process-chat-text-response.test.ts.snap | 0 .../__snapshots__/zod-schema.test.ts.snap | 0 .../src => ai/core/util}/call-chat-api.ts | 2 +- .../core/util}/call-completion-api.ts | 2 +- .../core/util}/data-stream-parts.test.ts | 0 .../src => ai/core/util}/data-stream-parts.ts | 2 +- .../src => ai/core/util}/data-url.ts | 0 .../src => ai/core/util}/deep-partial.ts | 0 .../util}/extract-max-tool-invocation-step.ts | 2 +- .../core/util}/fill-message-parts.ts | 2 +- .../src => ai/core/util}/fix-json.test.ts | 0 .../src => ai/core/util}/fix-json.ts | 0 .../core/util}/get-message-parts.test.ts | 0 .../src => ai/core/util}/get-message-parts.ts | 2 +- .../{ui-utils/src => ai/core/util}/index.ts | 2 - .../core/util}/is-deep-equal-data.test.ts | 0 .../core/util}/is-deep-equal-data.ts | 0 .../core/util}/parse-partial-json.test.ts | 0 .../core/util}/parse-partial-json.ts | 0 .../util}/prepare-attachments-for-request.ts | 2 +- .../core/util}/process-chat-response.test.ts | 4 +- .../core/util}/process-chat-response.ts | 4 +- .../util}/process-chat-text-response.test.ts | 2 +- .../core/util}/process-chat-text-response.ts | 2 +- .../core/util}/process-data-stream.test.ts | 0 .../core/util}/process-data-stream.ts | 0 .../core/util}/process-text-stream.test.ts | 0 .../core/util}/process-text-stream.ts | 0 .../{ui-utils/src => ai/core/util}/schema.ts | 0 .../util}/should-resubmit-messages.test.ts | 0 .../core/util}/should-resubmit-messages.ts | 2 +- .../core/util}/update-tool-call-result.ts | 2 +- .../src => ai/core/util}/zod-schema.test.ts | 0 .../src => ai/core/util}/zod-schema.ts | 0 packages/ai/package.json | 7 +- packages/ai/streams/langchain-adapter.ts | 2 +- packages/ai/streams/llamaindex-adapter.ts | 2 +- packages/ai/streams/stream-data.ts | 2 +- packages/ai/tsup.config.ts | 4 - packages/react/package.json | 2 +- packages/react/src/use-chat.ts | 4 +- packages/react/src/use-chat.ui.test.tsx | 2 +- packages/react/src/use-completion.ts | 2 +- packages/react/src/use-object.ts | 2 +- packages/react/src/util/use-stable-value.ts | 2 +- packages/svelte/package.json | 4 +- packages/svelte/src/chat-context.svelte.ts | 2 +- packages/svelte/src/chat.svelte.test.ts | 2 +- packages/svelte/src/chat.svelte.ts | 2 +- .../svelte/src/completion-context.svelte.ts | 2 +- packages/svelte/src/completion.svelte.ts | 2 +- .../src/structured-object-context.svelte.ts | 2 +- .../svelte/src/structured-object.svelte.ts | 2 +- .../structured-object-synchronization.svelte | 2 +- packages/ui-utils/.eslintrc.js | 4 - packages/ui-utils/.gitignore | 2 - packages/ui-utils/CHANGELOG.md | 790 ------------------ packages/ui-utils/README.md | 3 - packages/ui-utils/package.json | 67 -- packages/ui-utils/tsconfig.json | 9 - packages/ui-utils/tsup.config.ts | 10 - packages/ui-utils/turbo.json | 12 - packages/ui-utils/vitest.edge.config.js | 10 - packages/ui-utils/vitest.node.config.js | 10 - packages/vue/package.json | 2 +- .../vue/src/TestChatAttachmentsComponent.vue | 2 +- .../TestChatPrepareRequestBodyComponent.vue | 2 +- packages/vue/src/use-chat.ts | 4 +- packages/vue/src/use-chat.ui.test.tsx | 2 +- packages/vue/src/use-completion.ts | 8 +- pnpm-lock.yaml | 84 +- 121 files changed, 118 insertions(+), 1107 deletions(-) create mode 100644 .changeset/tiny-deers-kick.md rename packages/{ui-utils/src => ai/core/types}/duplicated/usage.ts (100%) rename packages/{ui-utils/src/types.ts => ai/core/types/messages.ts} (100%) rename packages/{ui-utils/src => ai/core/util}/__snapshots__/process-chat-response.test.ts.snap (100%) rename packages/{ui-utils/src => ai/core/util}/__snapshots__/process-chat-text-response.test.ts.snap (100%) rename packages/{ui-utils/src => ai/core/util}/__snapshots__/zod-schema.test.ts.snap (100%) rename packages/{ui-utils/src => ai/core/util}/call-chat-api.ts (99%) rename packages/{ui-utils/src => ai/core/util}/call-completion-api.ts (98%) rename packages/{ui-utils/src => ai/core/util}/data-stream-parts.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/data-stream-parts.ts (99%) rename packages/{ui-utils/src => ai/core/util}/data-url.ts (100%) rename packages/{ui-utils/src => ai/core/util}/deep-partial.ts (100%) rename packages/{ui-utils/src => ai/core/util}/extract-max-tool-invocation-step.ts (84%) rename packages/{ui-utils/src => ai/core/util}/fill-message-parts.ts (82%) rename packages/{ui-utils/src => ai/core/util}/fix-json.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/fix-json.ts (100%) rename packages/{ui-utils/src => ai/core/util}/get-message-parts.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/get-message-parts.ts (98%) rename packages/{ui-utils/src => ai/core/util}/index.ts (98%) rename packages/{ui-utils/src => ai/core/util}/is-deep-equal-data.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/is-deep-equal-data.ts (100%) rename packages/{ui-utils/src => ai/core/util}/parse-partial-json.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/parse-partial-json.ts (100%) rename packages/{ui-utils/src => ai/core/util}/prepare-attachments-for-request.ts (96%) rename packages/{ui-utils/src => ai/core/util}/process-chat-response.test.ts (99%) rename packages/{ui-utils/src => ai/core/util}/process-chat-response.ts (99%) rename packages/{ui-utils/src => ai/core/util}/process-chat-text-response.test.ts (98%) rename packages/{ui-utils/src => ai/core/util}/process-chat-text-response.ts (95%) rename packages/{ui-utils/src => ai/core/util}/process-data-stream.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/process-data-stream.ts (100%) rename packages/{ui-utils/src => ai/core/util}/process-text-stream.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/process-text-stream.ts (100%) rename packages/{ui-utils/src => ai/core/util}/schema.ts (100%) rename packages/{ui-utils/src => ai/core/util}/should-resubmit-messages.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/should-resubmit-messages.ts (97%) rename packages/{ui-utils/src => ai/core/util}/update-tool-call-result.ts (95%) rename packages/{ui-utils/src => ai/core/util}/zod-schema.test.ts (100%) rename packages/{ui-utils/src => ai/core/util}/zod-schema.ts (100%) delete mode 100644 packages/ui-utils/.eslintrc.js delete mode 100644 packages/ui-utils/.gitignore delete mode 100644 packages/ui-utils/CHANGELOG.md delete mode 100644 packages/ui-utils/README.md delete mode 100644 packages/ui-utils/package.json delete mode 100644 packages/ui-utils/tsconfig.json delete mode 100644 packages/ui-utils/tsup.config.ts delete mode 100644 packages/ui-utils/turbo.json delete mode 100644 packages/ui-utils/vitest.edge.config.js delete mode 100644 packages/ui-utils/vitest.node.config.js diff --git a/.changeset/cuddly-icons-kick.md b/.changeset/cuddly-icons-kick.md index f912f644f070..6450dd0ce5ea 100644 --- a/.changeset/cuddly-icons-kick.md +++ b/.changeset/cuddly-icons-kick.md @@ -2,7 +2,6 @@ '@ai-sdk/provider-utils': major '@ai-sdk/google-vertex': major '@ai-sdk/anthropic': major -'@ai-sdk/ui-utils': major '@ai-sdk/react': major '@ai-sdk/vue': major 'ai': major diff --git a/.changeset/green-deers-scream.md b/.changeset/green-deers-scream.md index e629b089f6ac..debc42602568 100644 --- a/.changeset/green-deers-scream.md +++ b/.changeset/green-deers-scream.md @@ -1,5 +1,4 @@ --- -'@ai-sdk/ui-utils': major '@ai-sdk/react': major '@ai-sdk/vue': major 'ai': major diff --git a/.changeset/pre.json b/.changeset/pre.json index 532c03a0c762..605156d563d8 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -46,7 +46,6 @@ "@ai-sdk/replicate": "0.2.3", "@ai-sdk/svelte": "2.1.5", "@ai-sdk/togetherai": "0.2.5", - "@ai-sdk/ui-utils": "1.2.4", "@ai-sdk/valibot": "0.1.10", "@ai-sdk/vue": "1.2.4", "@ai-sdk/xai": "1.2.6", diff --git a/.changeset/seven-pens-itch.md b/.changeset/seven-pens-itch.md index d0c9f2a60649..671981d33d90 100644 --- a/.changeset/seven-pens-itch.md +++ b/.changeset/seven-pens-itch.md @@ -1,6 +1,5 @@ --- '@ai-sdk/provider-utils': patch -'@ai-sdk/ui-utils': patch --- chore (utils): remove unused test helpers diff --git a/.changeset/tiny-deers-kick.md b/.changeset/tiny-deers-kick.md new file mode 100644 index 000000000000..5e75717f9f74 --- /dev/null +++ b/.changeset/tiny-deers-kick.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore(ui-utils): merge into ai package diff --git a/content/cookbook/01-next/75-human-in-the-loop.mdx b/content/cookbook/01-next/75-human-in-the-loop.mdx index 4faba3d440b2..dacb8121e68a 100644 --- a/content/cookbook/01-next/75-human-in-the-loop.mdx +++ b/content/cookbook/01-next/75-human-in-the-loop.mdx @@ -333,8 +333,9 @@ The solution above is low-level and not very friendly to use in a production env ### Create Utility Functions ```ts filename="utils.ts" -import { formatDataStreamPart, Message } from '@ai-sdk/ui-utils'; import { + formatDataStreamPart, + Message, convertToCoreMessages, DataStreamWriter, ToolExecutionOptions, diff --git a/content/docs/04-ai-sdk-ui/02-chatbot.mdx b/content/docs/04-ai-sdk-ui/02-chatbot.mdx index 222e6495b6a3..b7fe23c4725a 100644 --- a/content/docs/04-ai-sdk-ui/02-chatbot.mdx +++ b/content/docs/04-ai-sdk-ui/02-chatbot.mdx @@ -738,7 +738,7 @@ You can also send URLs as attachments along with a message. This can be useful f import { useChat } from '@ai-sdk/react'; import { useState } from 'react'; -import { Attachment } from '@ai-sdk/ui-utils'; +import { Attachment } from 'ai'; export default function Page() { const { messages, input, handleSubmit, handleInputChange, status } = diff --git a/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx b/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx index 7eeb7d2850bb..fb77d938020e 100644 --- a/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx +++ b/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx @@ -2,7 +2,7 @@ import { Card } from '@/app/components'; /* eslint-disable @next/next/no-img-element */ -import { getTextFromDataUrl } from '@ai-sdk/ui-utils'; +import { getTextFromDataUrl } from 'ai'; import { useChat } from '@ai-sdk/react'; import { useRef, useState } from 'react'; diff --git a/examples/next-fastapi/package.json b/examples/next-fastapi/package.json index 9bd664fb1b06..4545de250b02 100644 --- a/examples/next-fastapi/package.json +++ b/examples/next-fastapi/package.json @@ -11,7 +11,6 @@ "lint": "next lint" }, "dependencies": { - "@ai-sdk/ui-utils": "2.0.0-canary.3", "@ai-sdk/react": "2.0.0-canary.3", "ai": "5.0.0-canary.4", "geist": "^1.3.1", diff --git a/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts b/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts index 5ebb118a6d59..451c0aa1eef1 100644 --- a/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts +++ b/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts @@ -1,6 +1,7 @@ -import { formatDataStreamPart, Message } from '@ai-sdk/ui-utils'; import { convertToCoreMessages, + formatDataStreamPart, + Message, DataStreamWriter, ToolExecutionOptions, ToolSet, diff --git a/examples/next-openai/app/use-chat-attachments-append/page.tsx b/examples/next-openai/app/use-chat-attachments-append/page.tsx index a27c473618dd..60c9c6f87090 100644 --- a/examples/next-openai/app/use-chat-attachments-append/page.tsx +++ b/examples/next-openai/app/use-chat-attachments-append/page.tsx @@ -1,7 +1,7 @@ 'use client'; /* eslint-disable @next/next/no-img-element */ -import { getTextFromDataUrl } from '@ai-sdk/ui-utils'; +import { getTextFromDataUrl } from 'ai'; import { useChat } from '@ai-sdk/react'; import { useRef, useState } from 'react'; diff --git a/examples/next-openai/app/use-chat-attachments-url/page.tsx b/examples/next-openai/app/use-chat-attachments-url/page.tsx index e9afe47a9234..56617c4cad47 100644 --- a/examples/next-openai/app/use-chat-attachments-url/page.tsx +++ b/examples/next-openai/app/use-chat-attachments-url/page.tsx @@ -4,7 +4,7 @@ import { useChat } from '@ai-sdk/react'; import { useRef, useState } from 'react'; import { upload } from '@vercel/blob/client'; -import { Attachment } from '@ai-sdk/ui-utils'; +import { Attachment } from 'ai'; export default function Page() { const { messages, input, handleSubmit, handleInputChange, status } = useChat({ diff --git a/examples/next-openai/app/use-chat-attachments/page.tsx b/examples/next-openai/app/use-chat-attachments/page.tsx index 6baf27fbbf15..bd48c582be65 100644 --- a/examples/next-openai/app/use-chat-attachments/page.tsx +++ b/examples/next-openai/app/use-chat-attachments/page.tsx @@ -1,7 +1,7 @@ 'use client'; /* eslint-disable @next/next/no-img-element */ -import { getTextFromDataUrl } from '@ai-sdk/ui-utils'; +import { getTextFromDataUrl } from 'ai'; import { useChat } from '@ai-sdk/react'; import { useRef, useState } from 'react'; diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 6292d1abee6b..d3f0011adeeb 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -16,7 +16,6 @@ "@ai-sdk/google": "2.0.0-canary.4", "@ai-sdk/google-vertex": "3.0.0-canary.4", "@ai-sdk/perplexity": "2.0.0-canary.3", - "@ai-sdk/ui-utils": "2.0.0-canary.3", "@ai-sdk/react": "2.0.0-canary.3", "@ai-sdk/rsc": "1.0.0-canary.2", "@vercel/blob": "^0.26.0", diff --git a/examples/sveltekit-openai/package.json b/examples/sveltekit-openai/package.json index 0c07734d8d4b..3121bbd0dedb 100644 --- a/examples/sveltekit-openai/package.json +++ b/examples/sveltekit-openai/package.json @@ -19,7 +19,6 @@ "@ai-sdk/openai": "2.0.0-canary.3", "@ai-sdk/provider-utils": "3.0.0-canary.3", "@ai-sdk/svelte": "3.0.0-canary.3", - "@ai-sdk/ui-utils": "2.0.0-canary.3", "@eslint/compat": "^1.2.5", "@eslint/js": "^9.18.0", "@sveltejs/adapter-vercel": "^5.5.2", diff --git a/packages/ai/core/data-stream/create-data-stream-response.test.ts b/packages/ai/core/data-stream/create-data-stream-response.test.ts index 908dde107151..093c447e80b5 100644 --- a/packages/ai/core/data-stream/create-data-stream-response.test.ts +++ b/packages/ai/core/data-stream/create-data-stream-response.test.ts @@ -1,7 +1,7 @@ import { expect, it, describe } from 'vitest'; import { createDataStreamResponse } from './create-data-stream-response'; import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test'; -import { formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { formatDataStreamPart } from 'ai'; describe('createDataStreamResponse', () => { it('should create a Response with correct headers and encoded stream', async () => { diff --git a/packages/ai/core/data-stream/create-data-stream.test.ts b/packages/ai/core/data-stream/create-data-stream.test.ts index 63eea5c8d13d..2b13129517a2 100644 --- a/packages/ai/core/data-stream/create-data-stream.test.ts +++ b/packages/ai/core/data-stream/create-data-stream.test.ts @@ -1,6 +1,6 @@ import { delay } from '@ai-sdk/provider-utils'; import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test'; -import { formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { formatDataStreamPart } from 'ai'; import { expect, it } from 'vitest'; import { DelayedPromise } from '../../util/delayed-promise'; import { Source } from '../types/language-model'; diff --git a/packages/ai/core/data-stream/create-data-stream.ts b/packages/ai/core/data-stream/create-data-stream.ts index df9b7f5f3266..d09ba7e30ea4 100644 --- a/packages/ai/core/data-stream/create-data-stream.ts +++ b/packages/ai/core/data-stream/create-data-stream.ts @@ -1,4 +1,4 @@ -import { DataStreamString, formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { DataStreamString, formatDataStreamPart } from '../util'; import { DataStreamWriter } from './data-stream-writer'; export function createDataStream({ diff --git a/packages/ai/core/data-stream/data-stream-writer.ts b/packages/ai/core/data-stream/data-stream-writer.ts index 236652732573..3fbcafa803de 100644 --- a/packages/ai/core/data-stream/data-stream-writer.ts +++ b/packages/ai/core/data-stream/data-stream-writer.ts @@ -1,5 +1,5 @@ import { JSONValue } from '@ai-sdk/provider'; -import { DataStreamString } from '@ai-sdk/ui-utils'; +import { DataStreamString } from '../util'; import { Source } from '../types/language-model'; export interface DataStreamWriter { diff --git a/packages/ai/core/data-stream/pipe-data-stream-to-response.test.ts b/packages/ai/core/data-stream/pipe-data-stream-to-response.test.ts index 8291d6da89c9..60edeb966e4f 100644 --- a/packages/ai/core/data-stream/pipe-data-stream-to-response.test.ts +++ b/packages/ai/core/data-stream/pipe-data-stream-to-response.test.ts @@ -1,6 +1,6 @@ import { expect, it, describe } from 'vitest'; import { pipeDataStreamToResponse } from './pipe-data-stream-to-response'; -import { formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { formatDataStreamPart } from 'ai'; import { createMockServerResponse } from '../test/mock-server-response'; describe('pipeDataStreamToResponse', () => { diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index 47c38592b12d..63e6c2431e08 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -1,9 +1,9 @@ import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test'; -import { jsonSchema } from '@ai-sdk/ui-utils'; import assert, { fail } from 'node:assert'; import { z } from 'zod'; import { verifyNoObjectGeneratedError as originalVerifyNoObjectGeneratedError } from '../../errors/no-object-generated-error'; import { MockLanguageModelV2 } from '../test/mock-language-model-v1'; +import { jsonSchema } from '../util'; import { MockTracer } from '../test/mock-tracer'; import { generateObject } from './generate-object'; import { JSONParseError, TypeValidationError } from '@ai-sdk/provider'; diff --git a/packages/ai/core/generate-object/generate-object.ts b/packages/ai/core/generate-object/generate-object.ts index 3b6c3822272e..4e2f366cd6d4 100644 --- a/packages/ai/core/generate-object/generate-object.ts +++ b/packages/ai/core/generate-object/generate-object.ts @@ -4,7 +4,7 @@ import { TypeValidationError, } from '@ai-sdk/provider'; import { createIdGenerator, safeParseJSON } from '@ai-sdk/provider-utils'; -import { Schema } from '@ai-sdk/ui-utils'; +import { Schema } from '../util'; import { z } from 'zod'; import { NoObjectGeneratedError } from '../../errors/no-object-generated-error'; import { CallSettings } from '../prompt/call-settings'; diff --git a/packages/ai/core/generate-object/output-strategy.ts b/packages/ai/core/generate-object/output-strategy.ts index 6da624bd331c..5e0dd03c06a6 100644 --- a/packages/ai/core/generate-object/output-strategy.ts +++ b/packages/ai/core/generate-object/output-strategy.ts @@ -8,7 +8,7 @@ import { UnsupportedFunctionalityError, } from '@ai-sdk/provider'; import { safeValidateTypes, ValidationResult } from '@ai-sdk/provider-utils'; -import { asSchema, DeepPartial, Schema } from '@ai-sdk/ui-utils'; +import { asSchema, DeepPartial, Schema } from '../util'; import { z } from 'zod'; import { NoObjectGeneratedError } from '../../errors/no-object-generated-error'; import { diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index 5e6b8f1a9fa1..6abc1b559738 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -3,7 +3,7 @@ import { convertAsyncIterableToArray, convertReadableStreamToArray, } from '@ai-sdk/provider-utils/test'; -import { jsonSchema } from '@ai-sdk/ui-utils'; +import { jsonSchema } from '../util'; import assert, { fail } from 'node:assert'; import { z } from 'zod'; import { diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index 883add9ce324..41a77f536f3f 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -10,7 +10,7 @@ import { Schema, isDeepEqualData, parsePartialJson, -} from '@ai-sdk/ui-utils'; +} from '../util'; import { ServerResponse } from 'http'; import { z } from 'zod'; import { NoObjectGeneratedError } from '../../errors/no-object-generated-error'; diff --git a/packages/ai/core/generate-object/validate-object-generation-input.ts b/packages/ai/core/generate-object/validate-object-generation-input.ts index 1f997a887a94..1ecdc6ffd461 100644 --- a/packages/ai/core/generate-object/validate-object-generation-input.ts +++ b/packages/ai/core/generate-object/validate-object-generation-input.ts @@ -1,6 +1,6 @@ import { z } from 'zod'; import { InvalidArgumentError } from '../../errors/invalid-argument-error'; -import { Schema } from '@ai-sdk/ui-utils'; +import { Schema } from '../util'; export function validateObjectGenerationInput({ output, diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index 7789da54832b..7badb1e0ae11 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -1,6 +1,6 @@ import { LanguageModelV2CallOptions } from '@ai-sdk/provider'; import { mockId } from '@ai-sdk/provider-utils/test'; -import { jsonSchema } from '@ai-sdk/ui-utils'; +import { jsonSchema } from '../util'; import assert from 'node:assert'; import { z } from 'zod'; import { Output } from '.'; diff --git a/packages/ai/core/generate-text/output.ts b/packages/ai/core/generate-text/output.ts index 1ca8ab5e4634..c331fb97ae61 100644 --- a/packages/ai/core/generate-text/output.ts +++ b/packages/ai/core/generate-text/output.ts @@ -1,10 +1,5 @@ import { safeParseJSON, safeValidateTypes } from '@ai-sdk/provider-utils'; -import { - asSchema, - DeepPartial, - parsePartialJson, - Schema, -} from '@ai-sdk/ui-utils'; +import { asSchema, DeepPartial, parsePartialJson, Schema } from '../../core'; import { z } from 'zod'; import { NoObjectGeneratedError } from '../../errors'; import { injectJsonInstruction } from '../generate-object/inject-json-instruction'; diff --git a/packages/ai/core/generate-text/parse-tool-call.ts b/packages/ai/core/generate-text/parse-tool-call.ts index 177724f24db2..02e3337b323c 100644 --- a/packages/ai/core/generate-text/parse-tool-call.ts +++ b/packages/ai/core/generate-text/parse-tool-call.ts @@ -1,6 +1,6 @@ import { LanguageModelV2FunctionToolCall } from '@ai-sdk/provider'; import { safeParseJSON, safeValidateTypes } from '@ai-sdk/provider-utils'; -import { Schema, asSchema } from '@ai-sdk/ui-utils'; +import { Schema, asSchema } from '../util'; import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; import { NoSuchToolError } from '../../errors/no-such-tool-error'; import { ToolCallRepairError } from '../../errors/tool-call-repair-error'; diff --git a/packages/ai/core/generate-text/run-tools-transformation.ts b/packages/ai/core/generate-text/run-tools-transformation.ts index 3109ac09ff9e..30952e233f59 100644 --- a/packages/ai/core/generate-text/run-tools-transformation.ts +++ b/packages/ai/core/generate-text/run-tools-transformation.ts @@ -1,5 +1,5 @@ import { LanguageModelV2StreamPart } from '@ai-sdk/provider'; -import { generateId } from '@ai-sdk/ui-utils'; +import { generateId } from '../util'; import { Tracer } from '@opentelemetry/api'; import { ToolExecutionError } from '../../errors'; import { CoreMessage } from '../prompt/message'; diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index 4c0beea11cfd..5781f8d170b7 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -12,7 +12,7 @@ import { convertResponseStreamToArray, mockId, } from '@ai-sdk/provider-utils/test'; -import { jsonSchema } from '@ai-sdk/ui-utils'; +import { jsonSchema } from '../util'; import assert from 'node:assert'; import { z } from 'zod'; import { ToolExecutionError } from '../../errors/tool-execution-error'; diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index 51308ce08605..e5f2717f7f7e 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -1,6 +1,6 @@ import { AISDKError, LanguageModelV2Source } from '@ai-sdk/provider'; import { createIdGenerator, IDGenerator } from '@ai-sdk/provider-utils'; -import { DataStreamString, formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { DataStreamString, formatDataStreamPart } from '../util'; import { Span } from '@opentelemetry/api'; import { ServerResponse } from 'node:http'; import { InvalidArgumentError } from '../../errors/invalid-argument-error'; diff --git a/packages/ai/core/index.ts b/packages/ai/core/index.ts index e553b3df5b71..cd797283e0ed 100644 --- a/packages/ai/core/index.ts +++ b/packages/ai/core/index.ts @@ -1,29 +1,6 @@ // re-exports: export { createIdGenerator, generateId } from '@ai-sdk/provider-utils'; export type { IDGenerator } from '@ai-sdk/provider-utils'; -export { - formatDataStreamPart, - jsonSchema, - parseDataStreamPart, - processDataStream, - processTextStream, - zodSchema, -} from '@ai-sdk/ui-utils'; -export type { - Attachment, - ChatRequest, - ChatRequestOptions, - CreateMessage, - DataStreamPart, - DeepPartial, - IdGenerator, - JSONValue, - Message, - UIMessage, - RequestOptions, - Schema, - ToolInvocation, -} from '@ai-sdk/ui-utils'; // directory exports: export * from './data-stream'; @@ -37,6 +14,7 @@ export * from './prompt'; export * from './registry'; export * from './tool'; export * from './types'; +export * from './util'; // telemetry types: export type { TelemetrySettings } from './telemetry/telemetry-settings'; diff --git a/packages/ai/core/prompt/append-client-message.test.ts b/packages/ai/core/prompt/append-client-message.test.ts index 699d4008dad6..c5d491869301 100644 --- a/packages/ai/core/prompt/append-client-message.test.ts +++ b/packages/ai/core/prompt/append-client-message.test.ts @@ -1,5 +1,5 @@ import { appendClientMessage } from './append-client-message'; -import { Message } from '@ai-sdk/ui-utils'; +import { Message } from '../types'; describe('appendClientMessage', () => { it('should append a new message to an empty array', () => { diff --git a/packages/ai/core/prompt/append-client-message.ts b/packages/ai/core/prompt/append-client-message.ts index 524da2ccb991..0ae709a5e3a8 100644 --- a/packages/ai/core/prompt/append-client-message.ts +++ b/packages/ai/core/prompt/append-client-message.ts @@ -1,4 +1,4 @@ -import { Message } from '@ai-sdk/ui-utils'; +import { Message } from '../types'; /** * Appends a client message to the messages array. diff --git a/packages/ai/core/prompt/append-response-messages.ts b/packages/ai/core/prompt/append-response-messages.ts index ca263687e44d..522f2d766b23 100644 --- a/packages/ai/core/prompt/append-response-messages.ts +++ b/packages/ai/core/prompt/append-response-messages.ts @@ -1,5 +1,4 @@ import { - extractMaxToolInvocationStep, FileUIPart, Message, ReasoningUIPart, @@ -7,7 +6,8 @@ import { TextUIPart, ToolInvocation, ToolInvocationUIPart, -} from '@ai-sdk/ui-utils'; +} from '../types'; +import { extractMaxToolInvocationStep } from '../util'; import { ResponseMessage } from '../generate-text/step-result'; import { convertDataContentToBase64String } from './data-content'; import { AISDKError } from '@ai-sdk/provider'; diff --git a/packages/ai/core/prompt/attachments-to-parts.ts b/packages/ai/core/prompt/attachments-to-parts.ts index fa825fd39a7a..b60fd182293f 100644 --- a/packages/ai/core/prompt/attachments-to-parts.ts +++ b/packages/ai/core/prompt/attachments-to-parts.ts @@ -1,4 +1,4 @@ -import { Attachment } from '@ai-sdk/ui-utils'; +import { Attachment } from '../types'; import { FilePart, ImagePart, TextPart } from './content-part'; import { convertDataContentToUint8Array, diff --git a/packages/ai/core/prompt/convert-to-core-messages.test.ts b/packages/ai/core/prompt/convert-to-core-messages.test.ts index a37b9c3b9613..d223f601d286 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.test.ts +++ b/packages/ai/core/prompt/convert-to-core-messages.test.ts @@ -1,4 +1,4 @@ -import { Attachment, Message } from '@ai-sdk/ui-utils'; +import { Attachment, Message } from '../types'; import { convertToCoreMessages } from './convert-to-core-messages'; import { tool } from '../tool/tool'; import { z } from 'zod'; diff --git a/packages/ai/core/prompt/convert-to-core-messages.ts b/packages/ai/core/prompt/convert-to-core-messages.ts index acec7aacfc03..98a5e5adc77f 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.ts +++ b/packages/ai/core/prompt/convert-to-core-messages.ts @@ -4,7 +4,7 @@ import { ReasoningUIPart, TextUIPart, ToolInvocationUIPart, -} from '@ai-sdk/ui-utils'; +} from '../types'; import { ToolSet } from '../generate-text/tool-set'; import { AssistantContent, diff --git a/packages/ai/core/prompt/detect-prompt-type.test.ts b/packages/ai/core/prompt/detect-prompt-type.test.ts index d0aca919722c..33288c1ba465 100644 --- a/packages/ai/core/prompt/detect-prompt-type.test.ts +++ b/packages/ai/core/prompt/detect-prompt-type.test.ts @@ -1,4 +1,4 @@ -import { Message } from '@ai-sdk/ui-utils'; +import { Message } from '../types'; import { detectPromptType } from './detect-prompt-type'; import type { CoreMessage } from './message'; diff --git a/packages/ai/core/prompt/message-conversion-error.ts b/packages/ai/core/prompt/message-conversion-error.ts index 6472b828b35b..9c13ffb16eb2 100644 --- a/packages/ai/core/prompt/message-conversion-error.ts +++ b/packages/ai/core/prompt/message-conversion-error.ts @@ -1,5 +1,5 @@ import { AISDKError } from '@ai-sdk/provider'; -import { Message } from '@ai-sdk/ui-utils'; +import { Message } from '../types'; const name = 'AI_MessageConversionError'; const marker = `vercel.ai.error.${name}`; diff --git a/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts b/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts index 200c6175f3e4..92c021778715 100644 --- a/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts +++ b/packages/ai/core/prompt/prepare-tools-and-tool-choice.ts @@ -3,7 +3,7 @@ import { LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, } from '@ai-sdk/provider'; -import { asSchema } from '@ai-sdk/ui-utils'; +import { asSchema } from '../util'; import { ToolSet } from '../generate-text'; import { ToolChoice } from '../types/language-model'; import { isNonEmptyObject } from '../util/is-non-empty-object'; diff --git a/packages/ai/core/prompt/prompt.ts b/packages/ai/core/prompt/prompt.ts index 5688b1609f23..51261e7e81de 100644 --- a/packages/ai/core/prompt/prompt.ts +++ b/packages/ai/core/prompt/prompt.ts @@ -1,4 +1,4 @@ -import { Message } from '@ai-sdk/ui-utils'; +import { Message } from '../types'; import { CoreMessage } from './message'; /** diff --git a/packages/ai/core/prompt/standardize-prompt.ts b/packages/ai/core/prompt/standardize-prompt.ts index 8e1c68aefe24..f914559cdbaa 100644 --- a/packages/ai/core/prompt/standardize-prompt.ts +++ b/packages/ai/core/prompt/standardize-prompt.ts @@ -1,6 +1,6 @@ import { InvalidPromptError } from '@ai-sdk/provider'; import { safeValidateTypes } from '@ai-sdk/provider-utils'; -import { Message } from '@ai-sdk/ui-utils'; +import { Message } from '../types'; import { z } from 'zod'; import { ToolSet } from '../generate-text/tool-set'; import { convertToCoreMessages } from './convert-to-core-messages'; diff --git a/packages/ai/core/tool/mcp/mcp-client.ts b/packages/ai/core/tool/mcp/mcp-client.ts index 1b860b7b55b3..28dd46b1ab97 100644 --- a/packages/ai/core/tool/mcp/mcp-client.ts +++ b/packages/ai/core/tool/mcp/mcp-client.ts @@ -1,5 +1,5 @@ import { JSONSchema7 } from '@ai-sdk/provider'; -import { jsonSchema } from '@ai-sdk/ui-utils'; +import { jsonSchema } from '../../util'; import { z, ZodType } from 'zod'; import { MCPClientError } from '../../../errors'; import { inferParameters, tool, Tool, ToolExecutionOptions } from '../tool'; diff --git a/packages/ai/core/tool/tool.ts b/packages/ai/core/tool/tool.ts index 58d71ac89fab..6d9b17d4c62b 100644 --- a/packages/ai/core/tool/tool.ts +++ b/packages/ai/core/tool/tool.ts @@ -1,4 +1,4 @@ -import { Schema } from '@ai-sdk/ui-utils'; +import { Schema } from '../util'; import { z } from 'zod'; import { ToolResultContent } from '../prompt/tool-result-content'; import { CoreMessage } from '../prompt/message'; diff --git a/packages/ui-utils/src/duplicated/usage.ts b/packages/ai/core/types/duplicated/usage.ts similarity index 100% rename from packages/ui-utils/src/duplicated/usage.ts rename to packages/ai/core/types/duplicated/usage.ts diff --git a/packages/ai/core/types/index.ts b/packages/ai/core/types/index.ts index 1c77586ff9a3..1fca5d1bfb6a 100644 --- a/packages/ai/core/types/index.ts +++ b/packages/ai/core/types/index.ts @@ -21,3 +21,5 @@ export type { LanguageModelResponseMetadata } from './language-model-response-me export type { Provider } from './provider'; export type { ProviderOptions, ProviderMetadata } from './provider-metadata'; export type { EmbeddingModelUsage, LanguageModelUsage } from './usage'; + +export * from './messages'; diff --git a/packages/ui-utils/src/types.ts b/packages/ai/core/types/messages.ts similarity index 100% rename from packages/ui-utils/src/types.ts rename to packages/ai/core/types/messages.ts diff --git a/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap b/packages/ai/core/util/__snapshots__/process-chat-response.test.ts.snap similarity index 100% rename from packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap rename to packages/ai/core/util/__snapshots__/process-chat-response.test.ts.snap diff --git a/packages/ui-utils/src/__snapshots__/process-chat-text-response.test.ts.snap b/packages/ai/core/util/__snapshots__/process-chat-text-response.test.ts.snap similarity index 100% rename from packages/ui-utils/src/__snapshots__/process-chat-text-response.test.ts.snap rename to packages/ai/core/util/__snapshots__/process-chat-text-response.test.ts.snap diff --git a/packages/ui-utils/src/__snapshots__/zod-schema.test.ts.snap b/packages/ai/core/util/__snapshots__/zod-schema.test.ts.snap similarity index 100% rename from packages/ui-utils/src/__snapshots__/zod-schema.test.ts.snap rename to packages/ai/core/util/__snapshots__/zod-schema.test.ts.snap diff --git a/packages/ui-utils/src/call-chat-api.ts b/packages/ai/core/util/call-chat-api.ts similarity index 99% rename from packages/ui-utils/src/call-chat-api.ts rename to packages/ai/core/util/call-chat-api.ts index 80fc1636f8eb..27e95b55c150 100644 --- a/packages/ui-utils/src/call-chat-api.ts +++ b/packages/ai/core/util/call-chat-api.ts @@ -1,6 +1,6 @@ import { processChatResponse } from './process-chat-response'; import { processChatTextResponse } from './process-chat-text-response'; -import { IdGenerator, JSONValue, UIMessage, UseChatOptions } from './types'; +import { IdGenerator, JSONValue, UIMessage, UseChatOptions } from '../types'; // use function to allow for mocking in tests: const getOriginalFetch = () => fetch; diff --git a/packages/ui-utils/src/call-completion-api.ts b/packages/ai/core/util/call-completion-api.ts similarity index 98% rename from packages/ui-utils/src/call-completion-api.ts rename to packages/ai/core/util/call-completion-api.ts index fe0980f68ae2..2ddb06bc395f 100644 --- a/packages/ui-utils/src/call-completion-api.ts +++ b/packages/ai/core/util/call-completion-api.ts @@ -1,6 +1,6 @@ import { processTextStream } from './process-text-stream'; import { processDataStream } from './process-data-stream'; -import { JSONValue } from './types'; +import { JSONValue } from '../types'; // use function to allow for mocking in tests: const getOriginalFetch = () => fetch; diff --git a/packages/ui-utils/src/data-stream-parts.test.ts b/packages/ai/core/util/data-stream-parts.test.ts similarity index 100% rename from packages/ui-utils/src/data-stream-parts.test.ts rename to packages/ai/core/util/data-stream-parts.test.ts diff --git a/packages/ui-utils/src/data-stream-parts.ts b/packages/ai/core/util/data-stream-parts.ts similarity index 99% rename from packages/ui-utils/src/data-stream-parts.ts rename to packages/ai/core/util/data-stream-parts.ts index 4fe395fe84c8..0b758e1b0231 100644 --- a/packages/ui-utils/src/data-stream-parts.ts +++ b/packages/ai/core/util/data-stream-parts.ts @@ -3,7 +3,7 @@ import { LanguageModelV2Source, } from '@ai-sdk/provider'; import { ToolCall, ToolResult } from '@ai-sdk/provider-utils'; -import { JSONValue } from './types'; +import { JSONValue } from '../types'; export type DataStreamString = `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`; diff --git a/packages/ui-utils/src/data-url.ts b/packages/ai/core/util/data-url.ts similarity index 100% rename from packages/ui-utils/src/data-url.ts rename to packages/ai/core/util/data-url.ts diff --git a/packages/ui-utils/src/deep-partial.ts b/packages/ai/core/util/deep-partial.ts similarity index 100% rename from packages/ui-utils/src/deep-partial.ts rename to packages/ai/core/util/deep-partial.ts diff --git a/packages/ui-utils/src/extract-max-tool-invocation-step.ts b/packages/ai/core/util/extract-max-tool-invocation-step.ts similarity index 84% rename from packages/ui-utils/src/extract-max-tool-invocation-step.ts rename to packages/ai/core/util/extract-max-tool-invocation-step.ts index e2e06ec8c4be..3d5c37325ffa 100644 --- a/packages/ui-utils/src/extract-max-tool-invocation-step.ts +++ b/packages/ai/core/util/extract-max-tool-invocation-step.ts @@ -1,4 +1,4 @@ -import { ToolInvocation } from './types'; +import { ToolInvocation } from '../types'; export function extractMaxToolInvocationStep( toolInvocations: ToolInvocation[] | undefined, diff --git a/packages/ui-utils/src/fill-message-parts.ts b/packages/ai/core/util/fill-message-parts.ts similarity index 82% rename from packages/ui-utils/src/fill-message-parts.ts rename to packages/ai/core/util/fill-message-parts.ts index 84e9d3b36273..b512a54dcc73 100644 --- a/packages/ui-utils/src/fill-message-parts.ts +++ b/packages/ai/core/util/fill-message-parts.ts @@ -1,5 +1,5 @@ import { getMessageParts } from './get-message-parts'; -import { Message, UIMessage } from './types'; +import { Message, UIMessage } from '../types'; export function fillMessageParts(messages: Message[]): UIMessage[] { return messages.map(message => ({ diff --git a/packages/ui-utils/src/fix-json.test.ts b/packages/ai/core/util/fix-json.test.ts similarity index 100% rename from packages/ui-utils/src/fix-json.test.ts rename to packages/ai/core/util/fix-json.test.ts diff --git a/packages/ui-utils/src/fix-json.ts b/packages/ai/core/util/fix-json.ts similarity index 100% rename from packages/ui-utils/src/fix-json.ts rename to packages/ai/core/util/fix-json.ts diff --git a/packages/ui-utils/src/get-message-parts.test.ts b/packages/ai/core/util/get-message-parts.test.ts similarity index 100% rename from packages/ui-utils/src/get-message-parts.test.ts rename to packages/ai/core/util/get-message-parts.test.ts diff --git a/packages/ui-utils/src/get-message-parts.ts b/packages/ai/core/util/get-message-parts.ts similarity index 98% rename from packages/ui-utils/src/get-message-parts.ts rename to packages/ai/core/util/get-message-parts.ts index 8ab0c6ba5af5..f17e42ecf310 100644 --- a/packages/ui-utils/src/get-message-parts.ts +++ b/packages/ai/core/util/get-message-parts.ts @@ -8,7 +8,7 @@ import { TextUIPart, ToolInvocationUIPart, UIMessage, -} from './types'; +} from '../types'; export function getMessageParts( message: Message | CreateMessage | UIMessage, diff --git a/packages/ui-utils/src/index.ts b/packages/ai/core/util/index.ts similarity index 98% rename from packages/ui-utils/src/index.ts rename to packages/ai/core/util/index.ts index 308df0be4d3e..8451a3c07c2d 100644 --- a/packages/ui-utils/src/index.ts +++ b/packages/ai/core/util/index.ts @@ -1,5 +1,3 @@ -export * from './types'; - export { generateId } from '@ai-sdk/provider-utils'; // Export stream data utilities for custom stream implementations, diff --git a/packages/ui-utils/src/is-deep-equal-data.test.ts b/packages/ai/core/util/is-deep-equal-data.test.ts similarity index 100% rename from packages/ui-utils/src/is-deep-equal-data.test.ts rename to packages/ai/core/util/is-deep-equal-data.test.ts diff --git a/packages/ui-utils/src/is-deep-equal-data.ts b/packages/ai/core/util/is-deep-equal-data.ts similarity index 100% rename from packages/ui-utils/src/is-deep-equal-data.ts rename to packages/ai/core/util/is-deep-equal-data.ts diff --git a/packages/ui-utils/src/parse-partial-json.test.ts b/packages/ai/core/util/parse-partial-json.test.ts similarity index 100% rename from packages/ui-utils/src/parse-partial-json.test.ts rename to packages/ai/core/util/parse-partial-json.test.ts diff --git a/packages/ui-utils/src/parse-partial-json.ts b/packages/ai/core/util/parse-partial-json.ts similarity index 100% rename from packages/ui-utils/src/parse-partial-json.ts rename to packages/ai/core/util/parse-partial-json.ts diff --git a/packages/ui-utils/src/prepare-attachments-for-request.ts b/packages/ai/core/util/prepare-attachments-for-request.ts similarity index 96% rename from packages/ui-utils/src/prepare-attachments-for-request.ts rename to packages/ai/core/util/prepare-attachments-for-request.ts index f542b0446d6e..aa8b46eaf0b8 100644 --- a/packages/ui-utils/src/prepare-attachments-for-request.ts +++ b/packages/ai/core/util/prepare-attachments-for-request.ts @@ -1,4 +1,4 @@ -import { Attachment } from './types'; +import { Attachment } from '../types'; export async function prepareAttachmentsForRequest( attachmentsFromOptions: FileList | Array | undefined, diff --git a/packages/ui-utils/src/process-chat-response.test.ts b/packages/ai/core/util/process-chat-response.test.ts similarity index 99% rename from packages/ui-utils/src/process-chat-response.test.ts rename to packages/ai/core/util/process-chat-response.test.ts index f7805ee3e343..81befb7495d2 100644 --- a/packages/ui-utils/src/process-chat-response.test.ts +++ b/packages/ai/core/util/process-chat-response.test.ts @@ -2,9 +2,9 @@ import { LanguageModelV2FinishReason } from '@ai-sdk/provider'; import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; import { describe, expect, it, vi } from 'vitest'; import { DataStreamString, formatDataStreamPart } from './data-stream-parts'; -import { LanguageModelUsage } from './duplicated/usage'; +import { LanguageModelUsage } from '../types/duplicated/usage'; import { processChatResponse } from './process-chat-response'; -import { JSONValue, Message } from './types'; +import { JSONValue, Message } from '../types'; function createDataProtocolStream( dataPartTexts: DataStreamString[], diff --git a/packages/ui-utils/src/process-chat-response.ts b/packages/ai/core/util/process-chat-response.ts similarity index 99% rename from packages/ui-utils/src/process-chat-response.ts rename to packages/ai/core/util/process-chat-response.ts index 15c637e43709..4ff965cf2c8e 100644 --- a/packages/ui-utils/src/process-chat-response.ts +++ b/packages/ai/core/util/process-chat-response.ts @@ -3,7 +3,7 @@ import { generateId as generateIdFunction } from '@ai-sdk/provider-utils'; import { calculateLanguageModelUsage, LanguageModelUsage, -} from './duplicated/usage'; +} from '../types/duplicated/usage'; import { parsePartialJson } from './parse-partial-json'; import { processDataStream } from './process-data-stream'; import type { @@ -14,7 +14,7 @@ import type { ToolInvocationUIPart, UIMessage, UseChatOptions, -} from './types'; +} from '../types'; export async function processChatResponse({ stream, diff --git a/packages/ui-utils/src/process-chat-text-response.test.ts b/packages/ai/core/util/process-chat-text-response.test.ts similarity index 98% rename from packages/ui-utils/src/process-chat-text-response.test.ts rename to packages/ai/core/util/process-chat-text-response.test.ts index 8a13ae201a88..87eaaba627ed 100644 --- a/packages/ui-utils/src/process-chat-text-response.test.ts +++ b/packages/ai/core/util/process-chat-text-response.test.ts @@ -1,7 +1,7 @@ import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test'; import { beforeEach, describe, expect, it } from 'vitest'; import { processChatTextResponse } from './process-chat-text-response'; -import { Message } from './types'; +import { Message } from '../types'; function createTextStream(chunks: string[]): ReadableStream { return convertArrayToReadableStream(chunks).pipeThrough( diff --git a/packages/ui-utils/src/process-chat-text-response.ts b/packages/ai/core/util/process-chat-text-response.ts similarity index 95% rename from packages/ui-utils/src/process-chat-text-response.ts rename to packages/ai/core/util/process-chat-text-response.ts index d673f46d8e1b..756d40f0d93c 100644 --- a/packages/ui-utils/src/process-chat-text-response.ts +++ b/packages/ai/core/util/process-chat-text-response.ts @@ -1,7 +1,7 @@ import { JSONValue } from '@ai-sdk/provider'; import { generateId as generateIdFunction } from '@ai-sdk/provider-utils'; import { processTextStream } from './process-text-stream'; -import { TextUIPart, UIMessage, UseChatOptions } from './types'; +import { TextUIPart, UIMessage, UseChatOptions } from '../types'; export async function processChatTextResponse({ stream, diff --git a/packages/ui-utils/src/process-data-stream.test.ts b/packages/ai/core/util/process-data-stream.test.ts similarity index 100% rename from packages/ui-utils/src/process-data-stream.test.ts rename to packages/ai/core/util/process-data-stream.test.ts diff --git a/packages/ui-utils/src/process-data-stream.ts b/packages/ai/core/util/process-data-stream.ts similarity index 100% rename from packages/ui-utils/src/process-data-stream.ts rename to packages/ai/core/util/process-data-stream.ts diff --git a/packages/ui-utils/src/process-text-stream.test.ts b/packages/ai/core/util/process-text-stream.test.ts similarity index 100% rename from packages/ui-utils/src/process-text-stream.test.ts rename to packages/ai/core/util/process-text-stream.test.ts diff --git a/packages/ui-utils/src/process-text-stream.ts b/packages/ai/core/util/process-text-stream.ts similarity index 100% rename from packages/ui-utils/src/process-text-stream.ts rename to packages/ai/core/util/process-text-stream.ts diff --git a/packages/ui-utils/src/schema.ts b/packages/ai/core/util/schema.ts similarity index 100% rename from packages/ui-utils/src/schema.ts rename to packages/ai/core/util/schema.ts diff --git a/packages/ui-utils/src/should-resubmit-messages.test.ts b/packages/ai/core/util/should-resubmit-messages.test.ts similarity index 100% rename from packages/ui-utils/src/should-resubmit-messages.test.ts rename to packages/ai/core/util/should-resubmit-messages.test.ts diff --git a/packages/ui-utils/src/should-resubmit-messages.ts b/packages/ai/core/util/should-resubmit-messages.ts similarity index 97% rename from packages/ui-utils/src/should-resubmit-messages.ts rename to packages/ai/core/util/should-resubmit-messages.ts index 03dbc1996de4..d98e7218f1ff 100644 --- a/packages/ui-utils/src/should-resubmit-messages.ts +++ b/packages/ai/core/util/should-resubmit-messages.ts @@ -1,5 +1,5 @@ import { extractMaxToolInvocationStep } from './extract-max-tool-invocation-step'; -import { UIMessage } from './types'; +import { UIMessage } from '../types'; export function shouldResubmitMessages({ originalMaxToolInvocationStep, diff --git a/packages/ui-utils/src/update-tool-call-result.ts b/packages/ai/core/util/update-tool-call-result.ts similarity index 95% rename from packages/ui-utils/src/update-tool-call-result.ts rename to packages/ai/core/util/update-tool-call-result.ts index 3421b52015ec..787565a8994c 100644 --- a/packages/ui-utils/src/update-tool-call-result.ts +++ b/packages/ai/core/util/update-tool-call-result.ts @@ -1,4 +1,4 @@ -import { ToolInvocationUIPart, UIMessage } from './types'; +import { ToolInvocationUIPart, UIMessage } from '../types'; /** * Updates the result of a specific tool invocation in the last message of the given messages array. diff --git a/packages/ui-utils/src/zod-schema.test.ts b/packages/ai/core/util/zod-schema.test.ts similarity index 100% rename from packages/ui-utils/src/zod-schema.test.ts rename to packages/ai/core/util/zod-schema.test.ts diff --git a/packages/ui-utils/src/zod-schema.ts b/packages/ai/core/util/zod-schema.ts similarity index 100% rename from packages/ui-utils/src/zod-schema.ts rename to packages/ai/core/util/zod-schema.ts diff --git a/packages/ai/package.json b/packages/ai/package.json index ab43a193255d..84376b18f16a 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -40,6 +40,7 @@ "./internal": { "types": "./dist/internal/index.d.ts", "import": "./dist/internal/index.mjs", + "module": "./dist/internal/index.mjs", "require": "./dist/internal/index.js" }, "./test": { @@ -51,16 +52,18 @@ "./mcp-stdio": { "types": "./mcp-stdio/dist/index.d.ts", "import": "./mcp-stdio/dist/index.mjs", + "module": "./mcp-stdio/dist/index.mjs", "require": "./mcp-stdio/dist/index.js" } }, "dependencies": { "@ai-sdk/provider": "2.0.0-canary.2", "@ai-sdk/provider-utils": "3.0.0-canary.3", - "@ai-sdk/ui-utils": "2.0.0-canary.3", - "@opentelemetry/api": "1.9.0" + "@opentelemetry/api": "1.9.0", + "zod-to-json-schema": "^3.24.1" }, "devDependencies": { + "@types/json-schema": "7.0.15", "@edge-runtime/vm": "^5.0.0", "@types/node": "20.17.24", "@vercel/ai-tsconfig": "workspace:*", diff --git a/packages/ai/streams/langchain-adapter.ts b/packages/ai/streams/langchain-adapter.ts index c2b3777cb059..c95358e09a87 100644 --- a/packages/ai/streams/langchain-adapter.ts +++ b/packages/ai/streams/langchain-adapter.ts @@ -1,4 +1,4 @@ -import { formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { formatDataStreamPart } from '../core'; import { DataStreamWriter } from '../core/data-stream/data-stream-writer'; import { mergeStreams } from '../core/util/merge-streams'; import { prepareResponseHeaders } from '../core/util/prepare-response-headers'; diff --git a/packages/ai/streams/llamaindex-adapter.ts b/packages/ai/streams/llamaindex-adapter.ts index 917498ac0db3..744cf3545b46 100644 --- a/packages/ai/streams/llamaindex-adapter.ts +++ b/packages/ai/streams/llamaindex-adapter.ts @@ -1,5 +1,5 @@ import { convertAsyncIteratorToReadableStream } from '@ai-sdk/provider-utils'; -import { formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { formatDataStreamPart } from '../core'; import { DataStreamWriter } from '../core/data-stream/data-stream-writer'; import { mergeStreams } from '../core/util/merge-streams'; import { prepareResponseHeaders } from '../core/util/prepare-response-headers'; diff --git a/packages/ai/streams/stream-data.ts b/packages/ai/streams/stream-data.ts index c3f0b4408b1d..4320c59f4205 100644 --- a/packages/ai/streams/stream-data.ts +++ b/packages/ai/streams/stream-data.ts @@ -1,4 +1,4 @@ -import { JSONValue, formatDataStreamPart } from '@ai-sdk/ui-utils'; +import { JSONValue, formatDataStreamPart } from '../core'; import { HANGING_STREAM_WARNING_TIME_MS } from '../util/constants'; /** diff --git a/packages/ai/tsup.config.ts b/packages/ai/tsup.config.ts index 73f2ee479b56..149691c97bdc 100644 --- a/packages/ai/tsup.config.ts +++ b/packages/ai/tsup.config.ts @@ -12,10 +12,6 @@ export default defineConfig([ // Internal APIs { entry: ['internal/index.ts'], - // This bundle isn't actually used, - // we export the internal bundle with @internal from the root package - // and provide different types in package.json for the exports - // to save duplicating 40kb for bundle size outDir: 'dist/internal', format: ['cjs', 'esm'], dts: true, diff --git a/packages/react/package.json b/packages/react/package.json index 581915f14723..fbffbc83d1aa 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { + "ai": "5.0.0-canary.4", "@ai-sdk/provider-utils": "3.0.0-canary.3", - "@ai-sdk/ui-utils": "2.0.0-canary.3", "swr": "^2.2.5", "throttleit": "2.1.0" }, diff --git a/packages/react/src/use-chat.ts b/packages/react/src/use-chat.ts index 86b0720fbf86..22b7352dbce1 100644 --- a/packages/react/src/use-chat.ts +++ b/packages/react/src/use-chat.ts @@ -6,7 +6,7 @@ import type { Message, UIMessage, UseChatOptions, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { callChatApi, extractMaxToolInvocationStep, @@ -17,7 +17,7 @@ import { prepareAttachmentsForRequest, shouldResubmitMessages, updateToolCallResult, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { useCallback, useEffect, useMemo, useRef, useState } from 'react'; import useSWR from 'swr'; import { throttle } from './throttle'; diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index 60823b960082..af90fb637d38 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -8,7 +8,7 @@ import { generateId, getTextFromDataUrl, Message, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import '@testing-library/jest-dom/vitest'; import { screen, waitFor } from '@testing-library/react'; import userEvent from '@testing-library/user-event'; diff --git a/packages/react/src/use-completion.ts b/packages/react/src/use-completion.ts index fedd8c314154..5746ad1b9429 100644 --- a/packages/react/src/use-completion.ts +++ b/packages/react/src/use-completion.ts @@ -3,7 +3,7 @@ import { RequestOptions, UseCompletionOptions, callCompletionApi, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { useCallback, useEffect, useId, useRef, useState } from 'react'; import useSWR from 'swr'; import { throttle } from './throttle'; diff --git a/packages/react/src/use-object.ts b/packages/react/src/use-object.ts index e0f6c5b9c590..7ea08d6f0be3 100644 --- a/packages/react/src/use-object.ts +++ b/packages/react/src/use-object.ts @@ -9,7 +9,7 @@ import { isDeepEqualData, parsePartialJson, Schema, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { useCallback, useId, useRef, useState } from 'react'; import useSWR from 'swr'; import z from 'zod'; diff --git a/packages/react/src/util/use-stable-value.ts b/packages/react/src/util/use-stable-value.ts index c7b8b523e14b..3c9510a9019c 100644 --- a/packages/react/src/util/use-stable-value.ts +++ b/packages/react/src/util/use-stable-value.ts @@ -1,4 +1,4 @@ -import { isDeepEqualData } from '@ai-sdk/ui-utils'; +import { isDeepEqualData } from 'ai'; import { useEffect, useState } from 'react'; /** diff --git a/packages/svelte/package.json b/packages/svelte/package.json index 2f3a17220f19..b9da1278890e 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -51,8 +51,8 @@ } }, "dependencies": { - "@ai-sdk/provider-utils": "3.0.0-canary.3", - "@ai-sdk/ui-utils": "2.0.0-canary.3" + "ai": "5.0.0-canary.4", + "@ai-sdk/provider-utils": "3.0.0-canary.3" }, "devDependencies": { "@eslint/compat": "^1.2.5", diff --git a/packages/svelte/src/chat-context.svelte.ts b/packages/svelte/src/chat-context.svelte.ts index 26c8dc0083ec..917a4ab6fb40 100644 --- a/packages/svelte/src/chat-context.svelte.ts +++ b/packages/svelte/src/chat-context.svelte.ts @@ -1,4 +1,4 @@ -import type { JSONValue, UIMessage } from '@ai-sdk/ui-utils'; +import type { JSONValue, UIMessage } from 'ai'; import { createContext, KeyedStore } from './utils.svelte.js'; class ChatStore { diff --git a/packages/svelte/src/chat.svelte.test.ts b/packages/svelte/src/chat.svelte.test.ts index d2cd6e1d91d8..8f5c25ca60d5 100644 --- a/packages/svelte/src/chat.svelte.test.ts +++ b/packages/svelte/src/chat.svelte.test.ts @@ -2,7 +2,7 @@ import { createTestServer, TestResponseController, } from '@ai-sdk/provider-utils/test'; -import { formatDataStreamPart, type Message } from '@ai-sdk/ui-utils'; +import { formatDataStreamPart, type Message } from 'ai'; import { render } from '@testing-library/svelte'; import { Chat } from './chat.svelte.js'; import ChatSynchronization from './tests/chat-synchronization.svelte'; diff --git a/packages/svelte/src/chat.svelte.ts b/packages/svelte/src/chat.svelte.ts index 32c6defdd52b..181d8a93cfa6 100644 --- a/packages/svelte/src/chat.svelte.ts +++ b/packages/svelte/src/chat.svelte.ts @@ -15,7 +15,7 @@ import { getMessageParts, updateToolCallResult, isAssistantMessageWithCompletedToolCalls, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { isAbortError } from '@ai-sdk/provider-utils'; import { KeyedChatStore, diff --git a/packages/svelte/src/completion-context.svelte.ts b/packages/svelte/src/completion-context.svelte.ts index 72de109fd128..2b23893f8339 100644 --- a/packages/svelte/src/completion-context.svelte.ts +++ b/packages/svelte/src/completion-context.svelte.ts @@ -1,4 +1,4 @@ -import type { JSONValue } from '@ai-sdk/ui-utils'; +import type { JSONValue } from 'ai'; import { SvelteMap } from 'svelte/reactivity'; import { createContext, KeyedStore } from './utils.svelte.js'; diff --git a/packages/svelte/src/completion.svelte.ts b/packages/svelte/src/completion.svelte.ts index f7462562b79c..863cb2da1aa5 100644 --- a/packages/svelte/src/completion.svelte.ts +++ b/packages/svelte/src/completion.svelte.ts @@ -4,7 +4,7 @@ import { type JSONValue, type RequestOptions, callCompletionApi, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { KeyedCompletionStore, getCompletionContext, diff --git a/packages/svelte/src/structured-object-context.svelte.ts b/packages/svelte/src/structured-object-context.svelte.ts index 3a85471f28e5..94aa9d6a2359 100644 --- a/packages/svelte/src/structured-object-context.svelte.ts +++ b/packages/svelte/src/structured-object-context.svelte.ts @@ -1,4 +1,4 @@ -import type { DeepPartial } from '@ai-sdk/ui-utils'; +import type { DeepPartial } from 'ai'; import { createContext, KeyedStore } from './utils.svelte.js'; export class StructuredObjectStore { diff --git a/packages/svelte/src/structured-object.svelte.ts b/packages/svelte/src/structured-object.svelte.ts index 46e6c80a5392..9401769ec4d8 100644 --- a/packages/svelte/src/structured-object.svelte.ts +++ b/packages/svelte/src/structured-object.svelte.ts @@ -10,7 +10,7 @@ import { parsePartialJson, type DeepPartial, type Schema, -} from '@ai-sdk/ui-utils'; +} from 'ai'; import { type z } from 'zod'; import { getStructuredObjectContext, diff --git a/packages/svelte/src/tests/structured-object-synchronization.svelte b/packages/svelte/src/tests/structured-object-synchronization.svelte index cf2f96dfb41c..5c340ced075d 100644 --- a/packages/svelte/src/tests/structured-object-synchronization.svelte +++ b/packages/svelte/src/tests/structured-object-synchronization.svelte @@ -1,7 +1,7 @@