diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a1800228883..153eea86de4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -120,45 +120,49 @@ Flowise has 3 different modules in a single mono repository. Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables) -| Variable | Description | Type | Default | -| ---------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- | -| PORT | The HTTP port Flowise runs on | Number | 3000 | -| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | | -| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | | -| FLOWISE_USERNAME | Username to login | String | | -| FLOWISE_PASSWORD | Password to login | String | | -| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb | -| DEBUG | Print logs from components | Boolean | | -| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | -| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | -| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 | -| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` | -| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` | -| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | | -| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | | -| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` | -| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` | -| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false | -| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | -| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | -| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | | -| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean | | -| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` | -| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` | -| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` | -| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | | -| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | | -| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | | -| S3_STORAGE_REGION | Region for S3 bucket | String | | -| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | | -| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false | -| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | | -| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | | +| Variable | Description | Type | Default | +| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- | +| PORT | The HTTP port Flowise runs on | Number | 3000 | +| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | | +| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | | +| FLOWISE_USERNAME | Username to login | String | | +| FLOWISE_PASSWORD | Password to login | String | | +| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb | +| DEBUG | Print logs from components | Boolean | | +| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | +| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | +| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 | +| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` | +| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` | +| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | | +| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | | +| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` | +| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` | +| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false | +| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | +| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | +| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | | +| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean | | +| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` | +| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` | +| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` | +| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | | +| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | | +| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | | +| S3_STORAGE_REGION | Region for S3 bucket | String | | +| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | | +| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false | +| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | | +| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | | +| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | | +| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true | +| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | | +| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | | You can also specify the env variables when using `npx`. For example: diff --git a/Dockerfile b/Dockerfile index dfbf58d1bd0..a824b7f8090 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,10 @@ RUN apk add --no-cache build-base cairo-dev pango-dev # Install Chromium RUN apk add --no-cache chromium +# Install curl for container-level health checks +# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126 +RUN apk add --no-cache curl + #install PNPM globaly RUN npm install -g pnpm diff --git a/README.md b/README.md index b4139b6ef1e..5f17d946efd 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -English | [繁體中文](./i18n/README-TW.md) | [簡體中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md) +English | [繁體中文](./i18n/README-TW.md) | [简体中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)

Drag & drop UI to build your customized LLM flow

@@ -182,9 +182,9 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [ [![Deploy on Elestio](https://elest.io/images/logos/deploy-to-elestio-btn.png)](https://elest.io/open-source/flowiseai) - - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + - [Sealos](https://template.sealos.io/deploy?templateName=flowise) - [![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + [![Deploy on Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise) - [RepoCloud](https://repocloud.io/details/?app_id=29) diff --git a/docker/.env.example b/docker/.env.example index bff5ef8f922..3f760e2de43 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -48,7 +48,7 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format # MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path -# STORAGE_TYPE=local (local | s3) +# STORAGE_TYPE=local (local | s3 | gcs) # BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage # S3_STORAGE_BUCKET_NAME=flowise # S3_STORAGE_ACCESS_KEY_ID= @@ -56,6 +56,10 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # S3_STORAGE_REGION=us-west-2 # S3_ENDPOINT_URL= # S3_FORCE_PATH_STYLE=false +# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path +# GOOGLE_CLOUD_STORAGE_PROJ_ID= +# GOOGLE_CLOUD_STORAGE_BUCKET_NAME= +# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true # SHOW_COMMUNITY_NODES=true # DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable) @@ -86,6 +90,8 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # QUEUE_NAME=flowise-queue # QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000 # WORKER_CONCURRENCY=100000 +# REMOVE_ON_AGE=86400 +# REMOVE_ON_COUNT=10000 # REDIS_URL= # REDIS_HOST=localhost # REDIS_PORT=6379 diff --git a/docker/Dockerfile b/docker/Dockerfile index 762e3d29641..82a55d6a2b4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ RUN npm install -g flowise FROM node:20-alpine # Install runtime dependencies -RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev +RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev curl # Set the environment variable for Puppeteer to find Chromium ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 42b81bab29c..3806af1d999 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -38,6 +38,8 @@ services: - WORKER_CONCURRENCY=${WORKER_CONCURRENCY} - QUEUE_NAME=${QUEUE_NAME} - QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN} + - REMOVE_ON_AGE=${REMOVE_ON_AGE} + - REMOVE_ON_COUNT=${REMOVE_ON_COUNT} - REDIS_URL=${REDIS_URL} - REDIS_HOST=${REDIS_HOST} - REDIS_PORT=${REDIS_PORT} @@ -49,6 +51,12 @@ services: - REDIS_CA=${REDIS_CA} ports: - '${PORT}:${PORT}' + healthcheck: + test: ['CMD', 'curl', '-f', 'http://localhost:${PORT}/api/v1/ping'] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s volumes: - ~/.flowise:/root/.flowise entrypoint: /bin/sh -c "sleep 3; flowise start" diff --git a/docker/worker/docker-compose.yml b/docker/worker/docker-compose.yml index 88a8631d0df..818bbb35bf7 100644 --- a/docker/worker/docker-compose.yml +++ b/docker/worker/docker-compose.yml @@ -38,6 +38,8 @@ services: - WORKER_CONCURRENCY=${WORKER_CONCURRENCY} - QUEUE_NAME=${QUEUE_NAME} - QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN} + - REMOVE_ON_AGE=${REMOVE_ON_AGE} + - REMOVE_ON_COUNT=${REMOVE_ON_COUNT} - REDIS_URL=${REDIS_URL} - REDIS_HOST=${REDIS_HOST} - REDIS_PORT=${REDIS_PORT} diff --git a/i18n/README-JA.md b/i18n/README-JA.md index c30dbfb28e3..bd3b785dc10 100644 --- a/i18n/README-JA.md +++ b/i18n/README-JA.md @@ -10,7 +10,7 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | [繁體中文](./README-TW.md) | [簡體中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md) +[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)

ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI

diff --git a/i18n/README-KR.md b/i18n/README-KR.md index f994580ad76..f11aa3c4ba2 100644 --- a/i18n/README-KR.md +++ b/i18n/README-KR.md @@ -10,7 +10,7 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | [繁體中文](./README-TW.md) | [簡體中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어 +[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어

드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기

diff --git a/i18n/README-TW.md b/i18n/README-TW.md index 0bf81f74382..f60aa885b85 100644 --- a/i18n/README-TW.md +++ b/i18n/README-TW.md @@ -10,13 +10,13 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | 繁體中文 | [簡體中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md) +[English](../README.md) | 繁體中文 | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | [한국어](./README-KR.md)

拖放 UI 以構建自定義的 LLM 流程

-## ⚡快速開始 +## ⚡ 快速開始 下載並安裝 [NodeJS](https://nodejs.org/en/download) >= 18.15.0 diff --git a/i18n/README-ZH.md b/i18n/README-ZH.md index 9fba33f9d8b..68a6f834e05 100644 --- a/i18n/README-ZH.md +++ b/i18n/README-ZH.md @@ -10,7 +10,7 @@ [![GitHub星图](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub分支](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | 簡體中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md) +[English](../README.md) | [繁體中文](./README-TW.md) | 简体中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)

拖放界面构建定制化的LLM流程

@@ -170,9 +170,9 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) - - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + - [Sealos](https://template.sealos.io/deploy?templateName=flowise) - [![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + [![部署到 Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise) - [RepoCloud](https://repocloud.io/details/?app_id=29) diff --git a/package.json b/package.json index 3a944454b7e..c16dab3d793 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "2.2.7-patch.1", + "version": "2.2.8", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ @@ -83,7 +83,7 @@ "pnpm": ">=9" }, "resolutions": { - "@google/generative-ai": "^0.22.0", + "@google/generative-ai": "^0.24.0", "@grpc/grpc-js": "^1.10.10", "@langchain/core": "0.3.37", "@qdrant/openapi-typescript-fetch": "1.2.6", diff --git a/packages/components/credentials/JiraApi.credential.ts b/packages/components/credentials/JiraApi.credential.ts new file mode 100644 index 00000000000..6638f2e0b40 --- /dev/null +++ b/packages/components/credentials/JiraApi.credential.ts @@ -0,0 +1,33 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class JiraApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Jira API' + this.name = 'jiraApi' + this.version = 1.0 + this.description = + 'Refer to official guide on how to get accessToken on Github' + this.inputs = [ + { + label: 'User Name', + name: 'username', + type: 'string', + placeholder: 'username@example.com' + }, + { + label: 'Access Token', + name: 'accessToken', + type: 'password', + placeholder: '' + } + ] + } +} + +module.exports = { credClass: JiraApi } diff --git a/packages/components/credentials/Mem0MemoryApi.credential.ts b/packages/components/credentials/Mem0MemoryApi.credential.ts new file mode 100644 index 00000000000..dcb3010d532 --- /dev/null +++ b/packages/components/credentials/Mem0MemoryApi.credential.ts @@ -0,0 +1,27 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class Mem0MemoryApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Mem0 Memory API' + this.name = 'mem0MemoryApi' + this.version = 1.0 + this.description = + 'Visit Mem0 Platform to get your API credentials' + this.inputs = [ + { + label: 'API Key', + name: 'apiKey', + type: 'password', + description: 'API Key from Mem0 dashboard' + } + ] + } +} + +module.exports = { credClass: Mem0MemoryApi } diff --git a/packages/components/credentials/NvdiaNIMApi.credential.ts b/packages/components/credentials/NvdiaNIMApi.credential.ts index 3cae6961cfc..4910032df78 100644 --- a/packages/components/credentials/NvdiaNIMApi.credential.ts +++ b/packages/components/credentials/NvdiaNIMApi.credential.ts @@ -1,4 +1,4 @@ -import { INodeParams, INodeCredential } from '../src/Interface' +import { INodeCredential, INodeParams } from '../src/Interface' class NvidiaNIMApi implements INodeCredential { label: string @@ -8,12 +8,12 @@ class NvidiaNIMApi implements INodeCredential { inputs: INodeParams[] constructor() { - this.label = 'Nvdia NIM API Key' + this.label = 'NVIDIA NGC API Key' this.name = 'nvidiaNIMApi' this.version = 1.0 this.inputs = [ { - label: 'Nvidia NIM API Key', + label: 'NVIDIA NGC API Key', name: 'nvidiaNIMApiKey', type: 'password' } diff --git a/packages/components/credentials/OpikApi.credential.ts b/packages/components/credentials/OpikApi.credential.ts new file mode 100644 index 00000000000..db5d6607709 --- /dev/null +++ b/packages/components/credentials/OpikApi.credential.ts @@ -0,0 +1,39 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class OpikApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Opik API' + this.name = 'opikApi' + this.version = 1.0 + this.description = + 'Refer to Opik documentation on how to configure Opik credentials' + this.inputs = [ + { + label: 'API Key', + name: 'opikApiKey', + type: 'password', + placeholder: '' + }, + { + label: 'URL', + name: 'opikUrl', + type: 'string', + placeholder: 'https://www.comet.com/opik/api' + }, + { + label: 'Workspace', + name: 'opikWorkspace', + type: 'string', + placeholder: 'default' + } + ] + } +} + +module.exports = { credClass: OpikApi } diff --git a/packages/components/credentials/TavilyApi.credential.ts b/packages/components/credentials/TavilyApi.credential.ts index 161ff4df0b0..32e1380bf73 100644 --- a/packages/components/credentials/TavilyApi.credential.ts +++ b/packages/components/credentials/TavilyApi.credential.ts @@ -10,8 +10,8 @@ class TavilyApi implements INodeCredential { constructor() { this.label = 'Tavily API' this.name = 'tavilyApi' - this.version = 1.0 - this.description = 'Tavily API is a real-time API to access Google search results' + this.version = 1.1 + this.description = 'Tavily API is a search engine designed for LLMs and AI agents' this.inputs = [ { label: 'Tavily Api Key', diff --git a/packages/components/models.json b/packages/components/models.json index 42d6e8002f3..6b8f5b5ffe9 100644 --- a/packages/components/models.json +++ b/packages/components/models.json @@ -6,87 +6,121 @@ { "label": "anthropic.claude-3-7-sonnet-20250219-v1:0", "name": "anthropic.claude-3-7-sonnet-20250219-v1:0", - "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7 - hybrid reasoning model" + "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7 - hybrid reasoning model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3-5-haiku-20241022-v1:0", "name": "anthropic.claude-3-5-haiku-20241022-v1:0", - "description": "(20241022-v1:0) specific version of Claude Haiku 3.5 - fastest model" + "description": "(20241022-v1:0) specific version of Claude Haiku 3.5 - fastest model", + "input_cost": 8e-7, + "output_cost": 4e-6 }, { "label": "anthropic.claude-3.5-sonnet-20241022-v2:0", "name": "anthropic.claude-3-5-sonnet-20241022-v2:0", - "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5 - most intelligent model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3.5-sonnet-20240620-v1:0", "name": "anthropic.claude-3-5-sonnet-20240620-v1:0", - "description": "(20240620-v1:0) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20240620-v1:0) specific version of Claude Sonnet 3.5 - most intelligent model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3-opus", "name": "anthropic.claude-3-opus-20240229-v1:0", - "description": "Powerful model for highly complex tasks, reasoning and analysis" + "description": "Powerful model for highly complex tasks, reasoning and analysis", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "anthropic.claude-3-sonnet", "name": "anthropic.claude-3-sonnet-20240229-v1:0", - "description": "Balance of intelligence and speed" + "description": "Balance of intelligence and speed", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3-haiku", "name": "anthropic.claude-3-haiku-20240307-v1:0", - "description": "Fastest and most compact model for near-instant responsiveness" + "description": "Fastest and most compact model for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 }, { "label": "anthropic.claude-instant-v1", "name": "anthropic.claude-instant-v1", - "description": "Text generation, conversation" + "description": "Text generation, conversation", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "anthropic.claude-v2:1", "name": "anthropic.claude-v2:1", - "description": "Text generation, conversation, complex reasoning and analysis" + "description": "Text generation, conversation, complex reasoning and analysis", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "anthropic.claude-v2", "name": "anthropic.claude-v2", - "description": "Text generation, conversation, complex reasoning and analysis" + "description": "Text generation, conversation, complex reasoning and analysis", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "meta.llama2-13b-chat-v1", "name": "meta.llama2-13b-chat-v1", - "description": "Text generation, conversation" + "description": "Text generation, conversation", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "meta.llama2-70b-chat-v1", "name": "meta.llama2-70b-chat-v1", - "description": "Text generation, conversation" + "description": "Text generation, conversation", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "meta.llama3-8b-instruct-v1:0", "name": "meta.llama3-8b-instruct-v1:0", - "description": "Text summarization, text classification, sentiment analysis" + "description": "Text summarization, text classification, sentiment analysis", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "meta.llama3-70b-instruct-v1:0", "name": "meta.llama3-70b-instruct-v1:0", - "description": "Language modeling, dialog systems, code generation, text summarization, text classification, sentiment analysis" + "description": "Language modeling, dialog systems, code generation, text summarization, text classification, sentiment analysis", + "input_cost": 0.00195, + "output_cost": 0.00256 }, { "label": "mistral.mistral-7b-instruct-v0:2", "name": "mistral.mistral-7b-instruct-v0:2", - "description": "Classification, text generation, code generation" + "description": "Classification, text generation, code generation", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral.mixtral-8x7b-instruct-v0:1", "name": "mistral.mixtral-8x7b-instruct-v0:1", - "description": "Complex reasoning and analysis, text generation, code generation" + "description": "Complex reasoning and analysis, text generation, code generation", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral.mistral-large-2402-v1:0", "name": "mistral.mistral-large-2402-v1:0", - "description": "Complex reasoning and analysis, text generation, code generation, RAG, agents" + "description": "Complex reasoning and analysis, text generation, code generation, RAG, agents", + "input_cost": 0.002, + "output_cost": 0.006 } ], "regions": [ @@ -235,53 +269,83 @@ { "name": "azureChatOpenAI", "models": [ + { + "label": "gpt-4.1", + "name": "gpt-4.1", + "input_cost": 2e-6, + "output_cost": 8e-6 + }, { "label": "o3-mini", - "name": "o3-mini" + "name": "o3-mini", + "input_cost": 1.1e-6, + "output_cost": 4.4e-6 }, { "label": "o1", - "name": "o1" + "name": "o1", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-preview", - "name": "o1-preview" + "name": "o1-preview", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-mini", - "name": "o1-mini" + "name": "o1-mini", + "input_cost": 3e-6, + "output_cost": 0.000012 }, { "label": "gpt-4o-mini", - "name": "gpt-4o-mini" + "name": "gpt-4o-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-35-turbo", - "name": "gpt-35-turbo" + "name": "gpt-35-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-35-turbo-16k", - "name": "gpt-35-turbo-16k" + "name": "gpt-35-turbo-16k", + "input_cost": 3e-6, + "output_cost": 4e-6 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4.5-preview", - "name": "gpt-4.5-preview" + "name": "gpt-4.5-preview", + "input_cost": 0.000075, + "output_cost": 0.00015 } ] }, @@ -290,39 +354,57 @@ "models": [ { "label": "gpt-4o-mini", - "name": "gpt-4o-mini" + "name": "gpt-4o-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-turbo", - "name": "gpt-4-turbo" + "name": "gpt-4-turbo", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { - "label": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo" + "label": "gpt-35-turbo", + "name": "gpt-35-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { - "label": "gpt-3.5-turbo-16k", - "name": "gpt-3.5-turbo-16k" + "label": "gpt-35-turbo-16k", + "name": "gpt-35-turbo-16k", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-preview", - "name": "gpt-4-1106-preview" + "name": "gpt-4-1106-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 } ] }, @@ -332,57 +414,79 @@ { "label": "claude-3-7-sonnet-latest", "name": "claude-3-7-sonnet-latest", - "description": "Most recent snapshot version of Claude Sonnet 3.7 model - hybrid reasoning model" + "description": "Most recent snapshot version of Claude Sonnet 3.7 model - hybrid reasoning model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-5-haiku-latest", "name": "claude-3-5-haiku-latest", - "description": "Most recent snapshot version of Claude Haiku 3.5 - fastest model" + "description": "Most recent snapshot version of Claude Haiku 3.5 - fastest model", + "input_cost": 8e-7, + "output_cost": 4e-6 }, { "label": "claude-3.5-sonnet-latest", "name": "claude-3-5-sonnet-latest", - "description": "Most recent snapshot version of Claude Sonnet 3.5 model - most intelligent model" + "description": "Most recent snapshot version of Claude Sonnet 3.5 model - most intelligent model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3.5-sonnet-20241022", "name": "claude-3-5-sonnet-20241022", - "description": "(20241022) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20241022) specific version of Claude Sonnet 3.5 - most intelligent model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3.5-sonnet-20240620", "name": "claude-3-5-sonnet-20240620", - "description": "(20240620) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20240620) specific version of Claude Sonnet 3.5 - most intelligent model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-opus", "name": "claude-3-opus-20240229", - "description": "Powerful model for highly complex tasks, reasoning and analysis" + "description": "Powerful model for highly complex tasks, reasoning and analysis", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-sonnet", "name": "claude-3-sonnet-20240229", - "description": "Ideal balance of intelligence and speed for enterprise workloads" + "description": "Ideal balance of intelligence and speed for enterprise workloads", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "claude-3-haiku", "name": "claude-3-haiku-20240307", - "description": "Fastest and most compact model, designed for near-instant responsiveness" + "description": "Fastest and most compact model, designed for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 }, { "label": "claude-2.0 (legacy)", "name": "claude-2.0", - "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + "description": "Claude 2 latest major version, automatically get updates to the model as they are released", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "claude-2.1 (legacy)", "name": "claude-2.1", - "description": "Claude 2 latest full version" + "description": "Claude 2 latest full version", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "claude-instant-1.2 (legacy)", "name": "claude-instant-1.2", - "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + "description": "Claude Instant latest major version, automatically get updates to the model as they are released", + "input_cost": 0.000008, + "output_cost": 0.000024 } ] }, @@ -392,27 +496,37 @@ { "label": "claude-3-haiku", "name": "claude-3-haiku", - "description": "Fastest and most compact model, designed for near-instant responsiveness" + "description": "Fastest and most compact model, designed for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 }, { "label": "claude-3-opus", "name": "claude-3-opus", - "description": "Most powerful model for highly complex tasks" + "description": "Most powerful model for highly complex tasks", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-sonnet", "name": "claude-3-sonnet", - "description": "Ideal balance of intelligence and speed for enterprise workloads" + "description": "Ideal balance of intelligence and speed for enterprise workloads", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "claude-2.1 (legacy)", "name": "claude-2.1", - "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + "description": "Claude 2 latest major version, automatically get updates to the model as they are released", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "claude-instant-1.2 (legacy)", "name": "claude-instant-1.2", - "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + "description": "Claude Instant latest major version, automatically get updates to the model as they are released", + "input_cost": 0.000008, + "output_cost": 0.000024 } ] }, @@ -420,24 +534,40 @@ "name": "chatGoogleGenerativeAI", "models": [ { - "label": "gemini-2.0-flash-001", - "name": "gemini-2.0-flash-001" + "label": "gemini-2.5-pro-preview-03-25", + "name": "gemini-2.5-pro-preview-03-25", + "input_cost": 1.25e-6, + "output_cost": 0.00001 + }, + { + "label": "gemini-2.0-flash", + "name": "gemini-2.0-flash", + "input_cost": 1e-7, + "output_cost": 4e-7 }, { - "label": "gemini-2.0-flash-lite-001", - "name": "gemini-2.0-flash-lite-001" + "label": "gemini-2.0-flash-lite", + "name": "gemini-2.0-flash-lite", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-flash", - "name": "gemini-1.5-flash" + "name": "gemini-1.5-flash", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-flash-8b", - "name": "gemini-1.5-flash-8b" + "name": "gemini-1.5-flash-8b", + "input_cost": 3.75e-8, + "output_cost": 1.5e-7 }, { "label": "gemini-1.5-pro", - "name": "gemini-1.5-pro" + "name": "gemini-1.5-pro", + "input_cost": 1.25e-6, + "output_cost": 5e-6 } ] }, @@ -446,7 +576,9 @@ "models": [ { "label": "qwen-plus", - "name": "qwen-plus" + "name": "qwen-plus", + "input_cost": 0.0016, + "output_cost": 0.0064 } ] }, @@ -455,57 +587,81 @@ "models": [ { "label": "gemini-1.5-flash-002", - "name": "gemini-1.5-flash-002" + "name": "gemini-1.5-flash-002", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-flash-001", - "name": "gemini-1.5-flash-001" + "name": "gemini-1.5-flash-001", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-pro-002", - "name": "gemini-1.5-pro-002" + "name": "gemini-1.5-pro-002", + "input_cost": 1.25e-6, + "output_cost": 5e-6 }, { "label": "gemini-1.5-pro-001", - "name": "gemini-1.5-pro-001" + "name": "gemini-1.5-pro-001", + "input_cost": 1.25e-6, + "output_cost": 5e-6 }, { "label": "gemini-1.0-pro", - "name": "gemini-1.0-pro" + "name": "gemini-1.0-pro", + "input_cost": 1.25e-7, + "output_cost": 3.75e-7 }, { "label": "gemini-1.0-pro-vision", - "name": "gemini-1.0-pro-vision" + "name": "gemini-1.0-pro-vision", + "input_cost": 1.25e-7, + "output_cost": 3.75e-7 }, { "label": "claude-3-7-sonnet@20250219", "name": "claude-3-7-sonnet@20250219", - "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7 - hybrid reasoning model" + "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7 - hybrid reasoning model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-5-haiku@20241022", "name": "claude-3-5-haiku@20241022", - "description": "(20241022-v1:0) specific version of Claude Haiku 3.5 - fastest model" + "description": "(20241022-v1:0) specific version of Claude Haiku 3.5 - fastest model", + "input_cost": 8e-7, + "output_cost": 4e-6 }, { "label": "claude-3-5-sonnet-v2@20241022", "name": "claude-3-5-sonnet-v2@20241022", - "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5 - most intelligent model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-opus@20240229", "name": "claude-3-opus@20240229", - "description": "Powerful model for highly complex tasks, reasoning and analysis" + "description": "Powerful model for highly complex tasks, reasoning and analysis", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-sonnet@20240229", "name": "claude-3-sonnet@20240229", - "description": "Balance of intelligence and speed" + "description": "Balance of intelligence and speed", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "claude-3-haiku@20240307", "name": "claude-3-haiku@20240307", - "description": "Fastest and most compact model for near-instant responsiveness" + "description": "Fastest and most compact model for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 } ] }, @@ -575,6 +731,14 @@ { "label": "mixtral-8x7b-32768", "name": "mixtral-8x7b-32768" + }, + { + "label": "meta-llama/llama-4-maverick-17b-128e-instruct", + "name": "meta-llama/llama-4-maverick-17b-128e-instruct" + }, + { + "label": "meta-llama/llama-4-scout-17b-16e-instruct", + "name": "meta-llama/llama-4-scout-17b-16e-instruct" } ] }, @@ -582,12 +746,16 @@ "name": "chatCohere", "models": [ { - "label": "command-r", - "name": "command-r" + "label": "command-a", + "name": "command-a", + "input_cost": 0.0025, + "output_cost": 0.01 }, { "label": "command-r-plus", - "name": "command-r-plus" + "name": "command-r-plus", + "input_cost": 0.0025, + "output_cost": 0.01 } ] }, @@ -596,132 +764,212 @@ "models": [ { "label": "deepseek-chat", - "name": "deepseek-chat" + "name": "deepseek-chat", + "input_cost": 0.00027, + "output_cost": 0.0011 }, { "label": "deepseek-reasoner", - "name": "deepseek-reasoner" + "name": "deepseek-reasoner", + "input_cost": 0.00055, + "output_cost": 0.00219 } ] }, { "name": "chatOpenAI", "models": [ + { + "label": "gpt-4.1", + "name": "gpt-4.1", + "input_cost": 2e-6, + "output_cost": 8e-6 + }, + { + "label": "gpt-4.1-mini", + "name": "gpt-4.1-mini", + "input_cost": 4e-7, + "output_cost": 1.6e-6 + }, + { + "label": "gpt-4.1-nano", + "name": "gpt-4.1-nano", + "input_cost": 1e-7, + "output_cost": 4e-7 + }, { "label": "gpt-4.5-preview", - "name": "gpt-4.5-preview" + "name": "gpt-4.5-preview", + "input_cost": 0.000075, + "output_cost": 0.00015 }, { "label": "gpt-4o-mini (latest)", - "name": "gpt-4o-mini" + "name": "gpt-4o-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o-mini-2024-07-18", - "name": "gpt-4o-mini-2024-07-18" + "name": "gpt-4o-mini-2024-07-18", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o (latest)", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4o-2024-11-20", - "name": "gpt-4o-2024-11-20" + "name": "gpt-4o-2024-11-20", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4o-2024-08-06", - "name": "gpt-4o-2024-08-06" + "name": "gpt-4o-2024-08-06", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4o-2024-05-13", - "name": "gpt-4o-2024-05-13" + "name": "gpt-4o-2024-05-13", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "o3-mini (latest)", - "name": "o3-mini" + "name": "o3-mini", + "input_cost": 1.1e-6, + "output_cost": 4.4e-6 }, { "label": "o3-mini-2025-01-31", - "name": "o3-mini-2025-01-31" + "name": "o3-mini-2025-01-31", + "input_cost": 1.1e-6, + "output_cost": 4.4e-6 }, { "label": "o1-preview (latest)", - "name": "o1-preview" + "name": "o1-preview", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-preview-2024-09-12", - "name": "o1-preview-2024-09-12" + "name": "o1-preview-2024-09-12", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-mini (latest)", - "name": "o1-mini" + "name": "o1-mini", + "input_cost": 3e-6, + "output_cost": 0.000012 }, { "label": "o1-mini-2024-09-12", - "name": "o1-mini-2024-09-12" + "name": "o1-mini-2024-09-12", + "input_cost": 3e-6, + "output_cost": 0.000012 }, { "label": "gpt-4 (latest)", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-turbo (latest)", - "name": "gpt-4-turbo" + "name": "gpt-4-turbo", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-turbo-preview", - "name": "gpt-4-turbo-preview" + "name": "gpt-4-turbo-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0125-preview", - "name": "gpt-4-0125-preview" + "name": "gpt-4-0125-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-preview", - "name": "gpt-4-1106-preview" + "name": "gpt-4-1106-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-vision-preview", - "name": "gpt-4-1106-vision-preview" + "name": "gpt-4-1106-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0613", - "name": "gpt-4-0613" + "name": "gpt-4-0613", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-4-32k-0613", - "name": "gpt-4-32k-0613" + "name": "gpt-4-32k-0613", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo" + "name": "gpt-3.5-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-3.5-turbo-0125", - "name": "gpt-3.5-turbo-0125" + "name": "gpt-3.5-turbo-0125", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-3.5-turbo-1106", - "name": "gpt-3.5-turbo-1106" + "name": "gpt-3.5-turbo-1106", + "input_cost": 0.000001, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-0613", - "name": "gpt-3.5-turbo-0613" + "name": "gpt-3.5-turbo-0613", + "input_cost": 0.0000015, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-16k", - "name": "gpt-3.5-turbo-16k" + "name": "gpt-3.5-turbo-16k", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-3.5-turbo-16k-0613", - "name": "gpt-3.5-turbo-16k-0613" + "name": "gpt-3.5-turbo-16k-0613", + "input_cost": 0.000003, + "output_cost": 0.000004 } ] }, @@ -730,63 +978,93 @@ "models": [ { "label": "gpt-4o", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-turbo", - "name": "gpt-4-turbo" + "name": "gpt-4-turbo", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-turbo-preview", - "name": "gpt-4-turbo-preview" + "name": "gpt-4-turbo-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0125-preview", - "name": "gpt-4-0125-preview" + "name": "gpt-4-0125-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-preview", - "name": "gpt-4-1106-preview" + "name": "gpt-4-1106-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0613", - "name": "gpt-4-0613" + "name": "gpt-4-0613", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-4-32k-0613", - "name": "gpt-4-32k-0613" + "name": "gpt-4-32k-0613", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo" + "name": "gpt-3.5-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-3.5-turbo-1106", - "name": "gpt-3.5-turbo-1106" + "name": "gpt-3.5-turbo-1106", + "input_cost": 0.000001, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-0613", - "name": "gpt-3.5-turbo-0613" + "name": "gpt-3.5-turbo-0613", + "input_cost": 0.0000015, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-16k", - "name": "gpt-3.5-turbo-16k" + "name": "gpt-3.5-turbo-16k", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-3.5-turbo-16k-0613", - "name": "gpt-3.5-turbo-16k-0613" + "name": "gpt-3.5-turbo-16k-0613", + "input_cost": 0.000003, + "output_cost": 0.000004 } ] }, @@ -795,63 +1073,93 @@ "models": [ { "label": "open-mistral-nemo", - "name": "open-mistral-nemo" + "name": "open-mistral-nemo", + "input_cost": 0.00015, + "output_cost": 0.00015 }, { "label": "open-mistral-7b", - "name": "open-mistral-7b" + "name": "open-mistral-7b", + "input_cost": 0.00025, + "output_cost": 0.00025 }, { "label": "mistral-tiny-2312", - "name": "mistral-tiny-2312" + "name": "mistral-tiny-2312", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "mistral-tiny", - "name": "mistral-tiny" + "name": "mistral-tiny", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "open-mixtral-8x7b", - "name": "open-mixtral-8x7b" + "name": "open-mixtral-8x7b", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "open-mixtral-8x22b", - "name": "open-mixtral-8x22b" + "name": "open-mixtral-8x22b", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral-small-2312", - "name": "mistral-small-2312" + "name": "mistral-small-2312", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-small", - "name": "mistral-small" + "name": "mistral-small", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-small-2402", - "name": "mistral-small-2402" + "name": "mistral-small-2402", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-small-latest", - "name": "mistral-small-latest" + "name": "mistral-small-latest", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-medium-latest", - "name": "mistral-medium-latest" + "name": "mistral-medium-latest", + "input_cost": 0.001, + "output_cost": 0.003 }, { "label": "mistral-medium-2312", - "name": "mistral-medium-2312" + "name": "mistral-medium-2312", + "input_cost": 0.001, + "output_cost": 0.003 }, { "label": "mistral-medium", - "name": "mistral-medium" + "name": "mistral-medium", + "input_cost": 0.001, + "output_cost": 0.003 }, { "label": "mistral-large-latest", - "name": "mistral-large-latest" + "name": "mistral-large-latest", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral-large-2402", - "name": "mistral-large-2402" + "name": "mistral-large-2402", + "input_cost": 0.002, + "output_cost": 0.006 } ] }, @@ -860,15 +1168,21 @@ "models": [ { "label": "mistral-tiny", - "name": "mistral-tiny" + "name": "mistral-tiny", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "mistral-small", - "name": "mistral-small" + "name": "mistral-small", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-medium", - "name": "mistral-medium" + "name": "mistral-medium", + "input_cost": 0.001, + "output_cost": 0.003 } ] } @@ -887,27 +1201,39 @@ }, { "label": "cohere.command-text-v14", - "name": "cohere.command-text-v14" + "name": "cohere.command-text-v14", + "input_cost": 0.0015, + "output_cost": 0.002 }, { "label": "cohere.command-light-text-v14", - "name": "cohere.command-light-text-v14" + "name": "cohere.command-light-text-v14", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "ai21.j2-grande-instruct", - "name": "ai21.j2-grande-instruct" + "name": "ai21.j2-grande-instruct", + "input_cost": 0.0005, + "output_cost": 0.0007 }, { "label": "ai21.j2-jumbo-instruct", - "name": "ai21.j2-jumbo-instruct" + "name": "ai21.j2-jumbo-instruct", + "input_cost": 0.0005, + "output_cost": 0.0007 }, { "label": "ai21.j2-mid", - "name": "ai21.j2-mid" + "name": "ai21.j2-mid", + "input_cost": 0.0125, + "output_cost": 0.0125 }, { "label": "ai21.j2-ultra", - "name": "ai21.j2-ultra" + "name": "ai21.j2-ultra", + "input_cost": 0.0188, + "output_cost": 0.0188 } ], "regions": [ @@ -1058,59 +1384,76 @@ "models": [ { "label": "text-davinci-003", - "name": "text-davinci-003" + "name": "text-davinci-003", + "total_cost": 0.00002 }, { "label": "ada", - "name": "ada" + "name": "ada", + "total_cost": 0.00004 }, { "label": "text-ada-001", - "name": "text-ada-001" + "name": "text-ada-001", + "total_cost": 0.00004 }, { "label": "babbage", - "name": "babbage" + "name": "babbage", + "total_cost": 0.00005 }, { "label": "text-babbage-001", - "name": "text-babbage-001" + "name": "text-babbage-001", + "total_cost": 0.00005 }, { "label": "curie", - "name": "curie" + "name": "curie", + "total_cost": 0.00002 }, { "label": "text-curie-001", - "name": "text-curie-001" + "name": "text-curie-001", + "total_cost": 0.00002 }, { "label": "davinci", - "name": "davinci" + "name": "davinci", + "total_cost": 0.00002 }, { "label": "text-davinci-001", - "name": "text-davinci-001" + "name": "text-davinci-001", + "total_cost": 0.00002 }, { "label": "text-davinci-002", - "name": "text-davinci-002" + "name": "text-davinci-002", + "total_cost": 0.00002 }, { "label": "text-davinci-fine-tune-002", - "name": "text-davinci-fine-tune-002" + "name": "text-davinci-fine-tune-002", + "total_cost": 0.00002 }, { "label": "gpt-35-turbo", - "name": "gpt-35-turbo" + "name": "gpt-35-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 } ] }, @@ -1148,27 +1491,39 @@ "models": [ { "label": "text-bison", - "name": "text-bison" + "name": "text-bison", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-bison", - "name": "code-bison" + "name": "code-bison", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-gecko", - "name": "code-gecko" + "name": "code-gecko", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "text-bison-32k", - "name": "text-bison-32k" + "name": "text-bison-32k", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-bison-32k", - "name": "code-bison-32k" + "name": "code-bison-32k", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-gecko-32k", - "name": "code-gecko-32k" + "name": "code-gecko-32k", + "input_cost": 2.5e-7, + "output_cost": 5e-7 } ] }, @@ -1177,15 +1532,21 @@ "models": [ { "label": "gpt-3.5-turbo-instruct", - "name": "gpt-3.5-turbo-instruct" + "name": "gpt-3.5-turbo-instruct", + "input_cost": 0.0000015, + "output_cost": 0.000002 }, { "label": "babbage-002", - "name": "babbage-002" + "name": "babbage-002", + "input_cost": 4e-7, + "output_cost": 0.0000016 }, { "label": "davinci-002", - "name": "davinci-002" + "name": "davinci-002", + "input_cost": 0.000006, + "output_cost": 0.000012 } ] } @@ -1328,6 +1689,10 @@ { "label": "text-embedding-004", "name": "text-embedding-004" + }, + { + "label": "gemini-embedding-exp-03-07", + "name": "gemini-embedding-exp-03-07" } ] }, diff --git a/packages/components/nodes/agents/ToolAgent/ToolAgent.ts b/packages/components/nodes/agents/ToolAgent/ToolAgent.ts index c2cf38958df..4dce1a46052 100644 --- a/packages/components/nodes/agents/ToolAgent/ToolAgent.ts +++ b/packages/components/nodes/agents/ToolAgent/ToolAgent.ts @@ -24,7 +24,7 @@ import { IUsedTool, IVisionChatModal } from '../../../src/Interface' -import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { ConsoleCallbackHandler, CustomChainHandler, CustomStreamingHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents' import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' @@ -101,6 +101,15 @@ class ToolAgent_Agents implements INode { type: 'number', optional: true, additionalParams: true + }, + { + label: 'Enable Detailed Streaming', + name: 'enableDetailedStreaming', + type: 'boolean', + default: false, + description: 'Stream detailed intermediate steps during agent execution', + optional: true, + additionalParams: true } ] this.sessionId = fields?.sessionId @@ -113,6 +122,7 @@ class ToolAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const moderations = nodeData.inputs?.inputModeration as Moderation[] + const enableDetailedStreaming = nodeData.inputs?.enableDetailedStreaming as boolean const shouldStreamResponse = options.shouldStreamResponse const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer @@ -136,6 +146,13 @@ class ToolAgent_Agents implements INode { const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) + // Add custom streaming handler if detailed streaming is enabled + let customStreamingHandler = null + + if (enableDetailedStreaming && shouldStreamResponse) { + customStreamingHandler = new CustomStreamingHandler(sseStreamer, chatId) + } + let res: ChainValues = {} let sourceDocuments: ICommonObject[] = [] let usedTools: IUsedTool[] = [] @@ -143,7 +160,14 @@ class ToolAgent_Agents implements INode { if (shouldStreamResponse) { const handler = new CustomChainHandler(sseStreamer, chatId) - res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] }) + const allCallbacks = [loggerHandler, handler, ...callbacks] + + // Add detailed streaming handler if enabled + if (enableDetailedStreaming && customStreamingHandler) { + allCallbacks.push(customStreamingHandler) + } + + res = await executor.invoke({ input }, { callbacks: allCallbacks }) if (res.sourceDocuments) { if (sseStreamer) { sseStreamer.streamSourceDocumentsEvent(chatId, flatten(res.sourceDocuments)) @@ -174,7 +198,14 @@ class ToolAgent_Agents implements INode { } } } else { - res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] }) + const allCallbacks = [loggerHandler, ...callbacks] + + // Add detailed streaming handler if enabled + if (enableDetailedStreaming && customStreamingHandler) { + allCallbacks.push(customStreamingHandler) + } + + res = await executor.invoke({ input }, { callbacks: allCallbacks }) if (res.sourceDocuments) { sourceDocuments = res.sourceDocuments } diff --git a/packages/components/nodes/analytic/Opik/Opik.ts b/packages/components/nodes/analytic/Opik/Opik.ts new file mode 100644 index 00000000000..c620bdcc1de --- /dev/null +++ b/packages/components/nodes/analytic/Opik/Opik.ts @@ -0,0 +1,33 @@ +import { INode, INodeParams } from '../../../src/Interface' + +class Opik_Analytic implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs?: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Opik' + this.name = 'opik' + this.version = 1.0 + this.type = 'Opik' + this.icon = 'opik.png' + this.category = 'Analytic' + this.baseClasses = [this.type] + this.inputs = [] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['opikApi'] + } + } +} + +module.exports = { nodeClass: Opik_Analytic } diff --git a/packages/components/nodes/analytic/Opik/opik.png b/packages/components/nodes/analytic/Opik/opik.png new file mode 100644 index 00000000000..20de0c39d47 Binary files /dev/null and b/packages/components/nodes/analytic/Opik/opik.png differ diff --git a/packages/components/nodes/cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager.ts b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager.ts new file mode 100644 index 00000000000..dee5160644b --- /dev/null +++ b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager.ts @@ -0,0 +1,44 @@ +import type { CachedContentBase, CachedContent, Content } from '@google/generative-ai' +import { GoogleAICacheManager as GoogleAICacheManagerBase } from '@google/generative-ai/server' +import hash from 'object-hash' + +type CacheContentOptions = Omit & { contents?: Content[] } + +export class GoogleAICacheManager extends GoogleAICacheManagerBase { + private ttlSeconds: number + private cachedContents: Map = new Map() + + setTtlSeconds(ttlSeconds: number) { + this.ttlSeconds = ttlSeconds + } + + async lookup(options: CacheContentOptions): Promise { + const { model, tools, contents } = options + if (!contents?.length) { + return undefined + } + const hashKey = hash({ + model, + tools, + contents + }) + if (this.cachedContents.has(hashKey)) { + return this.cachedContents.get(hashKey) + } + const { cachedContents } = await this.list() + const cachedContent = (cachedContents ?? []).find((cache) => cache.displayName === hashKey) + if (cachedContent) { + this.cachedContents.set(hashKey, cachedContent) + return cachedContent + } + const res = await this.create({ + ...(options as CachedContentBase), + displayName: hashKey, + ttlSeconds: this.ttlSeconds + }) + this.cachedContents.set(hashKey, res) + return res + } +} + +export default GoogleAICacheManager diff --git a/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGemini.svg b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGemini.svg new file mode 100644 index 00000000000..53b497fa1a0 --- /dev/null +++ b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGemini.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGenerativeAIContextCache.ts b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGenerativeAIContextCache.ts new file mode 100644 index 00000000000..9e6d283176d --- /dev/null +++ b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGenerativeAIContextCache.ts @@ -0,0 +1,53 @@ +import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src' +import FlowiseGoogleAICacheManager from './FlowiseGoogleAICacheManager' + +class GoogleGenerativeAIContextCache implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Google GenAI Context Cache' + this.name = 'googleGenerativeAIContextCache' + this.version = 1.0 + this.type = 'GoogleAICacheManager' + this.description = 'Large context cache for Google Gemini large language models' + this.icon = 'GoogleGemini.svg' + this.category = 'Cache' + this.baseClasses = [this.type, ...getBaseClasses(FlowiseGoogleAICacheManager)] + this.inputs = [ + { + label: 'TTL', + name: 'ttl', + type: 'number', + default: 60 * 60 * 24 * 30 + } + ] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['googleGenerativeAI'], + optional: false, + description: 'Google Generative AI credential.' + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const ttl = nodeData.inputs?.ttl as number + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData) + const manager = new FlowiseGoogleAICacheManager(apiKey) + manager.setTtlSeconds(ttl) + return manager + } +} + +module.exports = { nodeClass: GoogleGenerativeAIContextCache } diff --git a/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts b/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts index 4dec41fd1f7..8a2d2cda340 100644 --- a/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts +++ b/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts @@ -27,7 +27,7 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal { } setVisionModel(): void { - if (!this.model.startsWith('claude-3')) { + if (!this.model.includes('claude-3')) { this.model = DEFAULT_IMAGE_MODEL this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : DEFAULT_IMAGE_MAX_TOKEN } diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index 3b13ab271aa..93fbad9945f 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -5,6 +5,7 @@ import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getModels, MODEL_TYPE } from '../../../src/modelLoader' import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI' +import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager' class GoogleGenerativeAI_ChatModels implements INode { label: string @@ -42,6 +43,12 @@ class GoogleGenerativeAI_ChatModels implements INode { type: 'BaseCache', optional: true }, + { + label: 'Context Cache', + name: 'contextCache', + type: 'GoogleAICacheManager', + optional: true + }, { label: 'Model Name', name: 'modelName', @@ -156,6 +163,14 @@ class GoogleGenerativeAI_ChatModels implements INode { optional: true, additionalParams: true }, + { + label: 'Base URL', + name: 'baseUrl', + type: 'string', + description: 'Base URL for the API. Leave empty to use the default.', + optional: true, + additionalParams: true + }, { label: 'Allow Image Uploads', name: 'allowImageUploads', @@ -188,7 +203,9 @@ class GoogleGenerativeAI_ChatModels implements INode { const harmCategory = nodeData.inputs?.harmCategory as string const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string const cache = nodeData.inputs?.cache as BaseCache + const contextCache = nodeData.inputs?.contextCache as FlowiseGoogleAICacheManager const streaming = nodeData.inputs?.streaming as boolean + const baseUrl = nodeData.inputs?.baseUrl as string | undefined const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean @@ -203,6 +220,7 @@ class GoogleGenerativeAI_ChatModels implements INode { if (topK) obj.topK = parseFloat(topK) if (cache) obj.cache = cache if (temperature) obj.temperature = parseFloat(temperature) + if (baseUrl) obj.baseUrl = baseUrl // Safety Settings let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory) @@ -225,6 +243,7 @@ class GoogleGenerativeAI_ChatModels implements INode { const model = new ChatGoogleGenerativeAI(nodeData.id, obj) model.setMultiModalOption(multiModalOption) + if (contextCache) model.setContextCache(contextCache) return model } diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts index c26bf5a274f..4824810ebde 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts @@ -25,6 +25,7 @@ import { StructuredToolInterface } from '@langchain/core/tools' import { isStructuredTool } from '@langchain/core/utils/function_calling' import { zodToJsonSchema } from 'zod-to-json-schema' import { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base' +import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager' const DEFAULT_IMAGE_MAX_TOKEN = 8192 const DEFAULT_IMAGE_MODEL = 'gemini-1.5-flash-latest' @@ -80,12 +81,16 @@ class LangchainChatGoogleGenerativeAI apiKey?: string + baseUrl?: string + streaming = false streamUsage = true private client: GenerativeModel + private contextCache?: FlowiseGoogleAICacheManager + get _isMultimodalModel() { return this.modelName.includes('vision') || this.modelName.startsWith('gemini-1.5') } @@ -147,20 +152,33 @@ class LangchainChatGoogleGenerativeAI this.getClient() } - getClient(tools?: Tool[]) { - this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel({ - model: this.modelName, - tools, - safetySettings: this.safetySettings as SafetySetting[], - generationConfig: { - candidateCount: 1, - stopSequences: this.stopSequences, - maxOutputTokens: this.maxOutputTokens, - temperature: this.temperature, - topP: this.topP, - topK: this.topK + async getClient(prompt?: Content[], tools?: Tool[]) { + this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel( + { + model: this.modelName, + tools, + safetySettings: this.safetySettings as SafetySetting[], + generationConfig: { + candidateCount: 1, + stopSequences: this.stopSequences, + maxOutputTokens: this.maxOutputTokens, + temperature: this.temperature, + topP: this.topP, + topK: this.topK + } + }, + { + baseUrl: this.baseUrl } - }) + ) + if (this.contextCache) { + const cachedContent = await this.contextCache.lookup({ + contents: prompt ? [{ ...prompt[0], parts: prompt[0].parts.slice(0, 1) }] : [], + model: this.modelName, + tools + }) + this.client.cachedContent = cachedContent as any + } } _combineLLMOutput() { @@ -209,6 +227,16 @@ class LangchainChatGoogleGenerativeAI } } + setContextCache(contextCache: FlowiseGoogleAICacheManager): void { + this.contextCache = contextCache + } + + async getNumTokens(prompt: BaseMessage[]) { + const contents = convertBaseMessagesToContent(prompt, this._isMultimodalModel) + const { totalTokens } = await this.client.countTokens({ contents }) + return totalTokens + } + async _generateNonStreaming( prompt: Content[], options: this['ParsedCallOptions'], @@ -220,9 +248,9 @@ class LangchainChatGoogleGenerativeAI this.convertFunctionResponse(prompt) if (tools.length > 0) { - this.getClient(tools as Tool[]) + await this.getClient(prompt, tools as Tool[]) } else { - this.getClient() + await this.getClient(prompt) } const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => { let output @@ -290,9 +318,9 @@ class LangchainChatGoogleGenerativeAI const tools = options.tools ?? [] if (tools.length > 0) { - this.getClient(tools as Tool[]) + await this.getClient(prompt, tools as Tool[]) } else { - this.getClient() + await this.getClient(prompt) } const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => { @@ -394,24 +422,18 @@ function getMessageAuthor(message: BaseMessage) { } function convertAuthorToRole(author: string) { - switch (author) { - /** - * Note: Gemini currently is not supporting system messages - * we will convert them to human messages and merge with following - * */ + switch (author.toLowerCase()) { case 'ai': - case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type; + case 'assistant': + case 'model': return 'model' - case 'system': - case 'human': - return 'user' case 'function': case 'tool': return 'function' + case 'system': + case 'human': default: - // Instead of throwing, we return model (Needed for Multi Agent) - // throw new Error(`Unknown / unsupported author: ${author}`) - return 'model' + return 'user' } } @@ -499,17 +521,29 @@ function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: b function checkIfEmptyContentAndSameRole(contents: Content[]) { let prevRole = '' - const removedContents: Content[] = [] + const validContents: Content[] = [] + for (const content of contents) { - const role = content.role - if (content.parts.length && content.parts[0].text === '' && role === prevRole) { - removedContents.push(content) + // Skip only if completely empty + if (!content.parts || !content.parts.length) { + continue + } + + // Ensure role is always either 'user' or 'model' + content.role = content.role === 'model' ? 'model' : 'user' + + // Handle consecutive messages + if (content.role === prevRole && validContents.length > 0) { + // Merge with previous content if same role + validContents[validContents.length - 1].parts.push(...content.parts) + continue } - prevRole = role + validContents.push(content) + prevRole = content.role } - return contents.filter((content) => !removedContents.includes(content)) + return validContents } function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) { @@ -547,7 +581,7 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel } } let actualRole = role - if (actualRole === 'function') { + if (actualRole === 'function' || actualRole === 'tool') { // GenerativeAI API will throw an error if the role is not "user" or "model." actualRole = 'user' } @@ -649,13 +683,39 @@ function zodToGeminiParameters(zodObj: any) { const jsonSchema: any = zodToJsonSchema(zodObj) // eslint-disable-next-line unused-imports/no-unused-vars const { $schema, additionalProperties, ...rest } = jsonSchema + + // Ensure all properties have type specified if (rest.properties) { Object.keys(rest.properties).forEach((key) => { - if (rest.properties[key].enum?.length) { - rest.properties[key] = { type: 'string', format: 'enum', enum: rest.properties[key].enum } + const prop = rest.properties[key] + + // Handle enum types + if (prop.enum?.length) { + rest.properties[key] = { + type: 'string', + format: 'enum', + enum: prop.enum + } + } + // Handle missing type + else if (!prop.type && !prop.oneOf && !prop.anyOf && !prop.allOf) { + // Infer type from other properties + if (prop.minimum !== undefined || prop.maximum !== undefined) { + prop.type = 'number' + } else if (prop.format === 'date-time') { + prop.type = 'string' + } else if (prop.items) { + prop.type = 'array' + } else if (prop.properties) { + prop.type = 'object' + } else { + // Default to string if type can't be inferred + prop.type = 'string' + } } }) } + return rest } diff --git a/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts b/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts index 86fdebfbf31..b4636ad3d93 100644 --- a/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts +++ b/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts @@ -1,5 +1,5 @@ -import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai' import { BaseCache } from '@langchain/core/caches' +import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' @@ -16,13 +16,13 @@ class ChatNvdiaNIM_ChatModels implements INode { inputs: INodeParams[] constructor() { - this.label = 'Chat Nvidia NIM' - this.name = 'chatNvidiaNIM' - this.version = 1.0 - this.type = 'ChatNvidiaNIM' + this.label = 'Chat NVIDIA NIM' + this.name = 'Chat NVIDIA NIM' + this.version = 1.1 + this.type = 'Chat NVIDIA NIM' this.icon = 'nvdia.svg' this.category = 'Chat Models' - this.description = 'Wrapper around Nvdia NIM Inference API' + this.description = 'Wrapper around NVIDIA NIM Inference API' this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)] this.credential = { label: 'Connect Credential', @@ -153,7 +153,7 @@ class ChatNvdiaNIM_ChatModels implements INode { try { parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions) } catch (exception) { - throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception) + throw new Error("Invalid JSON in the Chat NVIDIA NIM's baseOptions: " + exception) } } diff --git a/packages/components/nodes/documentloaders/Github/Github.ts b/packages/components/nodes/documentloaders/Github/Github.ts index 116b7f6fd7d..3edef63f342 100644 --- a/packages/components/nodes/documentloaders/Github/Github.ts +++ b/packages/components/nodes/documentloaders/Github/Github.ts @@ -61,6 +61,24 @@ class Github_DocumentLoaders implements INode { optional: true, additionalParams: true }, + { + label: 'Github Base URL', + name: 'githubBaseUrl', + type: 'string', + placeholder: `https://git.example.com`, + description: 'Custom Github Base Url (e.g. Enterprise)', + optional: true, + additionalParams: true + }, + { + label: 'Github Instance API', + name: 'githubInstanceApi', + type: 'string', + placeholder: `https://api.github.com`, + description: 'Custom Github API Url (e.g. Enterprise)', + optional: true, + additionalParams: true + }, { label: 'Ignore Paths', name: 'ignorePath', @@ -134,6 +152,8 @@ class Github_DocumentLoaders implements INode { const ignorePath = nodeData.inputs?.ignorePath as string const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string const output = nodeData.outputs?.output as string + const githubInstanceApi = nodeData.inputs?.githubInstanceApi as string + const githubBaseUrl = nodeData.inputs?.githubBaseUrl as string let omitMetadataKeys: string[] = [] if (_omitMetadataKeys) { @@ -153,6 +173,12 @@ class Github_DocumentLoaders implements INode { if (maxConcurrency) githubOptions.maxConcurrency = parseInt(maxConcurrency, 10) if (maxRetries) githubOptions.maxRetries = parseInt(maxRetries, 10) if (ignorePath) githubOptions.ignorePaths = JSON.parse(ignorePath) + if (githubInstanceApi) { + githubOptions.apiUrl = githubInstanceApi.endsWith('/') ? githubInstanceApi.slice(0, -1) : githubInstanceApi + } + if (githubBaseUrl) { + githubOptions.baseUrl = githubBaseUrl.endsWith('/') ? githubBaseUrl.slice(0, -1) : githubBaseUrl + } const loader = new GithubRepoLoader(repoLink, githubOptions) diff --git a/packages/components/nodes/documentloaders/Jira/Jira.ts b/packages/components/nodes/documentloaders/Jira/Jira.ts new file mode 100644 index 00000000000..9c03282effd --- /dev/null +++ b/packages/components/nodes/documentloaders/Jira/Jira.ts @@ -0,0 +1,194 @@ +import { omit } from 'lodash' +import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface' +import { TextSplitter } from 'langchain/text_splitter' +import { JiraProjectLoaderParams, JiraProjectLoader } from '@langchain/community/document_loaders/web/jira' +import { getCredentialData, getCredentialParam, handleEscapeCharacters, INodeOutputsValue } from '../../../src' + +class Jira_DocumentLoaders implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Jira' + this.name = 'jira' + this.version = 1.0 + this.type = 'Document' + this.icon = 'jira.svg' + this.category = 'Document Loaders' + this.description = `Load issues from Jira` + this.baseClasses = [this.type] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + description: 'Jira API Credential', + credentialNames: ['jiraApi'] + } + this.inputs = [ + { + label: 'Host', + name: 'host', + type: 'string', + placeholder: 'https://jira.example.com' + }, + { + label: 'Project Key', + name: 'projectKey', + type: 'string', + default: 'main' + }, + { + label: 'Limit per request', + name: 'limitPerRequest', + type: 'number', + step: 1, + optional: true, + placeholder: '100' + }, + { + label: 'Created after', + name: 'createdAfter', + type: 'string', + optional: true, + placeholder: '2024-01-01' + }, + { + label: 'Text Splitter', + name: 'textSplitter', + type: 'TextSplitter', + optional: true + }, + { + label: 'Additional Metadata', + name: 'metadata', + type: 'json', + description: 'Additional metadata to be added to the extracted documents', + optional: true, + additionalParams: true + }, + { + label: 'Omit Metadata Keys', + name: 'omitMetadataKeys', + type: 'string', + rows: 4, + description: + 'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, seperated by comma. Use * to omit all metadata keys execept the ones you specify in the Additional Metadata field', + placeholder: 'key1, key2, key3.nestedKey1', + optional: true, + additionalParams: true + } + ] + this.outputs = [ + { + label: 'Document', + name: 'document', + description: 'Array of document objects containing metadata and pageContent', + baseClasses: [...this.baseClasses, 'json'] + }, + { + label: 'Text', + name: 'text', + description: 'Concatenated string from pageContent of documents', + baseClasses: ['string', 'json'] + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const host = nodeData.inputs?.host as string + const projectKey = nodeData.inputs?.projectKey as string + const limitPerRequest = nodeData.inputs?.limitPerRequest as string + const createdAfter = nodeData.inputs?.createdAfter as string + const textSplitter = nodeData.inputs?.textSplitter as TextSplitter + const metadata = nodeData.inputs?.metadata + const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string + const output = nodeData.outputs?.output as string + + let omitMetadataKeys: string[] = [] + if (_omitMetadataKeys) { + omitMetadataKeys = _omitMetadataKeys.split(',').map((key) => key.trim()) + } + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const username = getCredentialParam('username', credentialData, nodeData) + const accessToken = getCredentialParam('accessToken', credentialData, nodeData) + + const jiraOptions: JiraProjectLoaderParams = { + projectKey, + host, + username, + accessToken + } + + if (limitPerRequest) { + jiraOptions.limitPerRequest = parseInt(limitPerRequest) + } + + if (createdAfter) { + jiraOptions.createdAfter = new Date(createdAfter) + } + + const loader = new JiraProjectLoader(jiraOptions) + let docs: IDocument[] = [] + + if (textSplitter) { + docs = await loader.load() + docs = await textSplitter.splitDocuments(docs) + } else { + docs = await loader.load() + } + + if (metadata) { + const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata) + docs = docs.map((doc) => ({ + ...doc, + metadata: + _omitMetadataKeys === '*' + ? { + ...parsedMetadata + } + : omit( + { + ...doc.metadata, + ...parsedMetadata + }, + omitMetadataKeys + ) + })) + } else { + docs = docs.map((doc) => ({ + ...doc, + metadata: + _omitMetadataKeys === '*' + ? {} + : omit( + { + ...doc.metadata + }, + omitMetadataKeys + ) + })) + } + + if (output === 'document') { + return docs + } else { + let finaltext = '' + for (const doc of docs) { + finaltext += `${doc.pageContent}\n` + } + return handleEscapeCharacters(finaltext, false) + } + } +} + +module.exports = { nodeClass: Jira_DocumentLoaders } diff --git a/packages/components/nodes/documentloaders/Jira/jira.svg b/packages/components/nodes/documentloaders/Jira/jira.svg new file mode 100644 index 00000000000..cb2f2dc3a19 --- /dev/null +++ b/packages/components/nodes/documentloaders/Jira/jira.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/packages/components/nodes/memory/Mem0/Mem0.ts b/packages/components/nodes/memory/Mem0/Mem0.ts new file mode 100644 index 00000000000..ba7960163dc --- /dev/null +++ b/packages/components/nodes/memory/Mem0/Mem0.ts @@ -0,0 +1,375 @@ +import { Mem0Memory as BaseMem0Memory, Mem0MemoryInput, ClientOptions } from '@mem0/community' +import { MemoryOptions, SearchOptions } from 'mem0ai' +import { BaseMessage } from '@langchain/core/messages' +import { InputValues, MemoryVariables, OutputValues } from '@langchain/core/memory' +import { ICommonObject, IDatabaseEntity } from '../../../src' +import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam, mapChatMessageToBaseMessage } from '../../../src/utils' +import { DataSource } from 'typeorm' +import { v4 as uuidv4 } from 'uuid' + +interface BufferMemoryExtendedInput { + sessionId: string + appDataSource: DataSource + databaseEntities: IDatabaseEntity + chatflowid: string +} + +class Mem0_Memory implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Mem0' + this.name = 'mem0' + this.version = 1.1 + this.type = 'Mem0' + this.icon = 'mem0.svg' + this.category = 'Memory' + this.description = 'Stores and manages chat memory using Mem0 service' + this.baseClasses = [this.type, ...getBaseClasses(BaseMem0Memory)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + optional: false, + description: 'Configure API Key for Mem0 service', + credentialNames: ['mem0MemoryApi'] + } + this.inputs = [ + { + label: 'User ID', + name: 'user_id', + type: 'string', + description: 'Unique identifier for the user. Required only if "Use Flowise Chat ID" is OFF.', + default: 'flowise-default-user', + optional: true + }, + // Added toggle to use Flowise chat ID + { + label: 'Use Flowise Chat ID', + name: 'useFlowiseChatId', + type: 'boolean', + description: 'Use the Flowise internal Chat ID as the Mem0 User ID, overriding the "User ID" field above.', + default: false, + optional: true + }, + { + label: 'Search Only', + name: 'searchOnly', + type: 'boolean', + description: 'Search only mode', + default: false, + optional: true, + additionalParams: true + }, + { + label: 'Run ID', + name: 'run_id', + type: 'string', + description: 'Unique identifier for the run session', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Agent ID', + name: 'agent_id', + type: 'string', + description: 'Identifier for the agent', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'App ID', + name: 'app_id', + type: 'string', + description: 'Identifier for the application', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Project ID', + name: 'project_id', + type: 'string', + description: 'Identifier for the project', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Organization ID', + name: 'org_id', + type: 'string', + description: 'Identifier for the organization', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Memory Key', + name: 'memoryKey', + type: 'string', + default: 'history', + optional: true, + additionalParams: true + }, + { + label: 'Input Key', + name: 'inputKey', + type: 'string', + default: 'input', + optional: true, + additionalParams: true + }, + { + label: 'Output Key', + name: 'outputKey', + type: 'string', + default: 'text', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + return await initializeMem0(nodeData, options) + } +} + +const initializeMem0 = async (nodeData: INodeData, options: ICommonObject): Promise => { + const initialUserId = nodeData.inputs?.user_id as string + const useFlowiseChatId = nodeData.inputs?.useFlowiseChatId as boolean + + if (!useFlowiseChatId && !initialUserId) { + throw new Error('User ID field cannot be empty when "Use Flowise Chat ID" is OFF.') + } + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('apiKey', credentialData, nodeData) + + const mem0Options: ClientOptions = { + apiKey: apiKey, + host: nodeData.inputs?.host as string, + organizationId: nodeData.inputs?.org_id as string, + projectId: nodeData.inputs?.project_id as string + } + + const memOptionsUserId = initialUserId + + const constructorSessionId = initialUserId || (useFlowiseChatId ? 'flowise-chat-id-placeholder' : '') + + const memoryOptions: MemoryOptions & SearchOptions = { + user_id: memOptionsUserId, + run_id: (nodeData.inputs?.run_id as string) || undefined, + agent_id: (nodeData.inputs?.agent_id as string) || undefined, + app_id: (nodeData.inputs?.app_id as string) || undefined, + project_id: (nodeData.inputs?.project_id as string) || undefined, + org_id: (nodeData.inputs?.org_id as string) || undefined, + api_version: (nodeData.inputs?.api_version as string) || undefined, + enable_graph: (nodeData.inputs?.enable_graph as boolean) || false, + metadata: (nodeData.inputs?.metadata as Record) || {}, + filters: (nodeData.inputs?.filters as Record) || {} + } + + const obj: Mem0MemoryInput & Mem0MemoryExtendedInput & BufferMemoryExtendedInput & { searchOnly: boolean; useFlowiseChatId: boolean } = + { + apiKey: apiKey, + humanPrefix: nodeData.inputs?.humanPrefix as string, + aiPrefix: nodeData.inputs?.aiPrefix as string, + inputKey: nodeData.inputs?.inputKey as string, + sessionId: constructorSessionId, + mem0Options: mem0Options, + memoryOptions: memoryOptions, + separateMessages: false, + returnMessages: false, + appDataSource: options.appDataSource as DataSource, + databaseEntities: options.databaseEntities as IDatabaseEntity, + chatflowid: options.chatflowid as string, + searchOnly: (nodeData.inputs?.searchOnly as boolean) || false, + useFlowiseChatId: useFlowiseChatId + } + + return new Mem0MemoryExtended(obj) +} + +interface Mem0MemoryExtendedInput extends Mem0MemoryInput { + memoryOptions?: MemoryOptions | SearchOptions + useFlowiseChatId: boolean +} + +class Mem0MemoryExtended extends BaseMem0Memory implements MemoryMethods { + initialUserId: string + userId: string + memoryKey: string + inputKey: string + appDataSource: DataSource + databaseEntities: IDatabaseEntity + chatflowid: string + searchOnly: boolean + useFlowiseChatId: boolean + + constructor( + fields: Mem0MemoryInput & Mem0MemoryExtendedInput & BufferMemoryExtendedInput & { searchOnly: boolean; useFlowiseChatId: boolean } + ) { + super(fields) + this.initialUserId = fields.memoryOptions?.user_id ?? '' + this.userId = this.initialUserId + this.memoryKey = 'history' + this.inputKey = fields.inputKey ?? 'input' + this.appDataSource = fields.appDataSource + this.databaseEntities = fields.databaseEntities + this.chatflowid = fields.chatflowid + this.searchOnly = fields.searchOnly + this.useFlowiseChatId = fields.useFlowiseChatId + } + + // Selects Mem0 user_id based on toggle state (Flowise chat ID or input field) + private getEffectiveUserId(overrideUserId?: string): string { + let effectiveUserId: string | undefined + + if (this.useFlowiseChatId) { + if (overrideUserId) { + effectiveUserId = overrideUserId + } else { + throw new Error('Mem0: "Use Flowise Chat ID" is ON, but no runtime chat ID (overrideUserId) was provided.') + } + } else { + // If toggle is OFF, ALWAYS use the ID from the input field. + effectiveUserId = this.initialUserId + } + + // This check is now primarily for the case where the toggle is OFF and the initialUserId was somehow empty (should be caught by init validation). + if (!effectiveUserId) { + throw new Error('Mem0: Could not determine a valid User ID for the operation. Check User ID input field.') + } + return effectiveUserId + } + + async loadMemoryVariables(values: InputValues, overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + this.userId = effectiveUserId + if (this.memoryOptions) { + this.memoryOptions.user_id = effectiveUserId + } + return super.loadMemoryVariables(values) + } + + async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideUserId = ''): Promise { + if (this.searchOnly) { + return + } + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + this.userId = effectiveUserId + if (this.memoryOptions) { + this.memoryOptions.user_id = effectiveUserId + } + return super.saveContext(inputValues, outputValues) + } + + async clear(overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + this.userId = effectiveUserId + if (this.memoryOptions) { + this.memoryOptions.user_id = effectiveUserId + } + return super.clear() + } + + async getChatMessages( + overrideUserId = '', + returnBaseMessages = false, + prependMessages?: IMessage[] + ): Promise { + const flowiseSessionId = overrideUserId + if (!flowiseSessionId) { + console.warn('Mem0: getChatMessages called without overrideUserId (Flowise Session ID). Cannot fetch DB messages.') + return [] + } + + let chatMessage = await this.appDataSource.getRepository(this.databaseEntities['ChatMessage']).find({ + where: { + sessionId: flowiseSessionId, + chatflowid: this.chatflowid + }, + order: { + createdDate: 'DESC' + }, + take: 10 + }) + chatMessage = chatMessage.reverse() + + let returnIMessages: IMessage[] = chatMessage.map((m) => ({ + message: m.content as string, + type: m.role as MessageType + })) + + if (prependMessages?.length) { + returnIMessages.unshift(...prependMessages) + // Reverted to original simpler unshift + chatMessage.unshift(...(prependMessages as any)) // Cast as any + } + + if (returnBaseMessages) { + const memoryVariables = await this.loadMemoryVariables({}, overrideUserId) + const mem0History = memoryVariables[this.memoryKey] + + if (mem0History && typeof mem0History === 'string') { + const systemMessage = { + role: 'apiMessage' as MessageType, + content: mem0History, + id: uuidv4() + } + // Ensure Mem0 history message also conforms structurally if mapChatMessageToBaseMessage is strict + chatMessage.unshift(systemMessage as any) // Cast needed if mixing structures + } else if (mem0History) { + console.warn('Mem0 history is not a string, cannot prepend directly.') + } + + return await mapChatMessageToBaseMessage(chatMessage) + } + + return returnIMessages + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input && output) { + const inputValues = { [this.inputKey ?? 'input']: input.text } + const outputValues = { output: output.text } + await this.saveContext(inputValues, outputValues, effectiveUserId) + } else { + console.warn('Mem0: Could not find both input and output messages to save context.') + } + } + + async clearChatMessages(overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + await this.clear(effectiveUserId) + + const flowiseSessionId = overrideUserId + if (flowiseSessionId) { + await this.appDataSource + .getRepository(this.databaseEntities['ChatMessage']) + .delete({ sessionId: flowiseSessionId, chatflowid: this.chatflowid }) + } else { + console.warn('Mem0: clearChatMessages called without overrideUserId (Flowise Session ID). Cannot clear DB messages.') + } + } +} + +module.exports = { nodeClass: Mem0_Memory } diff --git a/packages/components/nodes/memory/Mem0/mem0.svg b/packages/components/nodes/memory/Mem0/mem0.svg new file mode 100644 index 00000000000..42a7d6d9083 --- /dev/null +++ b/packages/components/nodes/memory/Mem0/mem0.svg @@ -0,0 +1,3 @@ + + + diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index 44832466ec8..df70c49497a 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -155,7 +155,10 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { if (input) { const newInputMessage = new HumanMessage(input.text) - const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + const messageToAdd = [newInputMessage].map((msg) => ({ + ...msg.toDict(), + timestamp: new Date() // Add timestamp to the message + })) await collection.updateOne( { sessionId: id }, { @@ -167,7 +170,10 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { if (output) { const newOutputMessage = new AIMessage(output.text) - const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + const messageToAdd = [newOutputMessage].map((msg) => ({ + ...msg.toDict(), + timestamp: new Date() // Add timestamp to the message + })) await collection.updateOne( { sessionId: id }, { diff --git a/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts b/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts index 182f1a41bf4..371a8986ff0 100644 --- a/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts +++ b/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts @@ -1,4 +1,4 @@ -import { flatten, uniq } from 'lodash' +import { difference, flatten, uniq } from 'lodash' import { DataSource } from 'typeorm' import { z } from 'zod' import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchain/core/runnables' @@ -430,8 +430,15 @@ class LLMNode_SeqAgents implements INode { const abortControllerSignal = options.signal as AbortController const llmNodeInputVariables = uniq([...getInputVariables(systemPrompt), ...getInputVariables(humanPrompt)]) - if (!llmNodeInputVariables.every((element) => Object.keys(llmNodeInputVariablesValues).includes(element))) { - throw new Error('LLM Node input variables values are not provided!') + const missingInputVars = difference(llmNodeInputVariables, Object.keys(llmNodeInputVariablesValues)).join(' ') + const allVariablesSatisfied = missingInputVars.length === 0 + if (!allVariablesSatisfied) { + const nodeInputVars = llmNodeInputVariables.join(' ') + const providedInputVars = Object.keys(llmNodeInputVariablesValues).join(' ') + + throw new Error( + `LLM Node input variables values are not provided! Required: ${nodeInputVars}, Provided: ${providedInputVars}. Missing: ${missingInputVars}` + ) } const workerNode = async (state: ISeqAgentsState, config: RunnableConfig) => { diff --git a/packages/components/nodes/tools/FreeWebScraper/CheerioWebScraper.ts b/packages/components/nodes/tools/FreeWebScraper/CheerioWebScraper.ts new file mode 100644 index 00000000000..b9d2f448734 --- /dev/null +++ b/packages/components/nodes/tools/FreeWebScraper/CheerioWebScraper.ts @@ -0,0 +1,434 @@ +import { INode, INodeParams, INodeData, ICommonObject } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' +import { Tool } from '@langchain/core/tools' +import fetch from 'node-fetch' +import * as cheerio from 'cheerio' +import { URL } from 'url' +import { xmlScrape } from '../../../src/utils' + +interface ScrapedPageData { + url: string + title: string + description: string + body_text: string + error?: string +} + +class WebScraperRecursiveTool extends Tool { + name = 'cheerio_web_scraper' + description = `Scrapes web pages recursively or via default sitemap. Extracts title, description, and paragraph text. Input should be a single URL string. Returns a JSON string array of scraped page data objects.` + + private maxDepth: number + private maxPages: number | null + private timeoutMs: number + private useSitemap: boolean + private visitedUrls: Set + private scrapedPagesCount: number + + constructor(maxDepth: number = 1, maxPages: number | null = 10, timeoutMs: number = 60000, useSitemap: boolean = false) { + super() + + this.maxDepth = Math.max(1, maxDepth) + this.maxPages = maxPages !== null && maxPages > 0 ? maxPages : null + this.timeoutMs = timeoutMs > 0 ? timeoutMs : 60000 + this.useSitemap = useSitemap + this.visitedUrls = new Set() + this.scrapedPagesCount = 0 + + let desc = '' + if (this.useSitemap) { + desc = `Scrapes URLs listed in the detected default sitemap (/sitemap.xml)` + if (this.maxPages !== null) { + desc += ` up to ${this.maxPages} pages` + } + desc += `, with a ${ + this.timeoutMs / 1000 + }-second timeout per page. Falls back to Recursive Link Following if sitemap is not found or empty.` + } else { + desc = `Recursively scrapes web pages starting from a given URL` + if (this.maxDepth > 0) { + desc += ` up to ${this.maxDepth} level(s) deep` + } + if (this.maxPages !== null) { + desc += ` or until ${this.maxPages} pages are scraped` + } + desc += `, with a ${this.timeoutMs / 1000}-second timeout per page, whichever comes first.` + } + desc += ` Extracts title, description, and paragraph text. Input should be a single URL string. Returns a JSON string array of scraped page data.` + this.description = desc + } + + private async scrapeSingleUrl(url: string): Promise & { foundLinks: string[] }> { + try { + const response = await fetch(url, { timeout: this.timeoutMs, redirect: 'follow', follow: 5 }) + if (!response.ok) { + const errorText = await response.text() + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `HTTP Error: ${response.status} ${response.statusText}. ${errorText}` + } + } + const contentType = response.headers.get('content-type') + + if (contentType === null) { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Skipped content due to missing Content-Type header` + } + } + + if (!contentType.includes('text/html') && url !== this.visitedUrls.values().next().value) { + if (!contentType.includes('text/xml') && !contentType.includes('application/xml')) { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Skipped non-HTML/XML content (Content-Type: ${contentType})` + } + } + + if (!contentType.includes('text/html')) { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Skipped non-HTML content (Content-Type: ${contentType})` + } + } + } + + const html = await response.text() + const $ = cheerio.load(html) + const title = $('title').first().text() || 'No title found' + let description = + $('meta[name="description"]').attr('content') || + $('meta[property="og:description"]').attr('content') || + $('meta[name="twitter:description"]').attr('content') || + 'No description found' + const paragraphs: string[] = [] + $('p').each((_i, elem) => { + const paragraphText = $(elem).text() + if (paragraphText) { + paragraphs.push(paragraphText.trim()) + } + }) + const body_text = paragraphs.join(' ').replace(/\s\s+/g, ' ').trim() + const foundLinks: string[] = [] + + $('a').each((_i, elem) => { + const href = $(elem).attr('href') + if (href) { + try { + const absoluteUrl = new URL(href, url).toString() + if (absoluteUrl.startsWith('http') && !absoluteUrl.includes('#')) { + foundLinks.push(absoluteUrl) + } + } catch (e) { + // Ignore invalid URLs + } + } + }) + + return { + title: title.trim(), + description: description.trim(), + body_text: body_text, + foundLinks: [...new Set(foundLinks)] + } + } catch (error: any) { + if (error.type === 'request-timeout') { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Scraping Error: Request Timeout after ${this.timeoutMs}ms` + } + } + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Scraping Error: ${error?.message || 'Unknown error'}` + } + } + } + + private async scrapeRecursive(url: string, currentDepth: number): Promise { + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + return [] + } + if (currentDepth > this.maxDepth) { + return [] + } + if (this.visitedUrls.has(url)) { + return [] + } + try { + new URL(url) + if (!url.startsWith('http')) throw new Error('Invalid protocol') + } catch (e) { + if (this.maxPages !== null) { + this.scrapedPagesCount++ + } + return [{ url, title: '', description: '', body_text: '', error: `Invalid URL format or protocol` }] + } + this.visitedUrls.add(url) + if (this.maxPages !== null) { + this.scrapedPagesCount++ + } + + const { foundLinks, ...scrapedContent } = await this.scrapeSingleUrl(url) + const currentPageData: ScrapedPageData = { url, ...scrapedContent } + let results: ScrapedPageData[] = [currentPageData] + + if (!currentPageData.error && currentDepth < this.maxDepth && (this.maxPages === null || this.scrapedPagesCount < this.maxPages)) { + const recursivePromises: Promise[] = [] + for (const link of foundLinks) { + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + break + } + if (!this.visitedUrls.has(link)) { + recursivePromises.push(this.scrapeRecursive(link, currentDepth + 1)) + } + } + if (recursivePromises.length > 0) { + const nestedResults = await Promise.all(recursivePromises) + results = results.concat(...nestedResults) + } + } else if (currentPageData.error) { + // Do nothing if there was an error scraping the current page + } + return results + } + + private async scrapeUrlsFromList(urlList: string[]): Promise { + const results: ScrapedPageData[] = [] + const scrapePromises: Promise[] = [] + + for (const url of urlList) { + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + break + } + if (this.visitedUrls.has(url)) { + continue + } + + this.visitedUrls.add(url) + this.scrapedPagesCount++ + + const promise = (async () => { + const { foundLinks: _ignoreLinks, ...scrapedContent } = await this.scrapeSingleUrl(url) + results.push({ url, ...scrapedContent }) + })() + scrapePromises.push(promise) + } + + await Promise.all(scrapePromises) + + return results.slice(0, this.maxPages ?? results.length) + } + + async _call(initialInput: string): Promise { + this.visitedUrls = new Set() + this.scrapedPagesCount = 0 + let performedFallback = false + let sitemapAttempted = false + + if (!initialInput || typeof initialInput !== 'string') { + return JSON.stringify({ error: 'Input must be a single URL string.' }) + } + + try { + let allScrapedData: ScrapedPageData[] = [] + let urlsFromSitemap: string[] = [] + + if (this.useSitemap) { + sitemapAttempted = true + let sitemapUrlToFetch: string | undefined = undefined + + try { + const baseUrl = new URL(initialInput) + sitemapUrlToFetch = new URL('/sitemap.xml', baseUrl.origin).toString() + } catch (e) { + return JSON.stringify({ error: 'Invalid initial URL provided for sitemap detection.' }) + } + + if (!sitemapUrlToFetch) { + return JSON.stringify({ error: 'Could not determine sitemap URL.' }) + } + + try { + const limitParam = this.maxPages === null ? Infinity : this.maxPages + urlsFromSitemap = await xmlScrape(sitemapUrlToFetch, limitParam) + } catch (sitemapError) { + urlsFromSitemap = [] + } + + if (urlsFromSitemap.length > 0) { + allScrapedData = await this.scrapeUrlsFromList(urlsFromSitemap) + } else { + performedFallback = true + } + } + + if (!sitemapAttempted || performedFallback) { + allScrapedData = await this.scrapeRecursive(initialInput, 1) + } + + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + // Log or indicate that the max page limit was reached during scraping + } + + if (performedFallback) { + const warningResult = { + warning: 'Sitemap not found or empty; fell back to recursive scraping.', + scrapedData: allScrapedData + } + return JSON.stringify(warningResult) + } else { + return JSON.stringify(allScrapedData) + } + } catch (error: any) { + return JSON.stringify({ error: `Failed scrape operation: ${error?.message || 'Unknown error'}` }) + } + } +} + +class WebScraperRecursive_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Cheerio Web Scraper' + this.name = 'cheerioWebScraper' + this.version = 1.0 + this.type = 'Tool' + this.icon = 'cheerioWebScraper.svg' + this.category = 'Tools' + this.description = 'Scrapes web pages recursively by following links OR by fetching URLs from the default sitemap.' + this.baseClasses = [this.type, ...getBaseClasses(WebScraperRecursiveTool)] + this.inputs = [ + { + label: 'Scraping Mode', + name: 'scrapeMode', + type: 'options', + options: [ + { label: 'Recursive Link Following', name: 'recursive' }, + { label: 'Sitemap', name: 'sitemap' } + ], + default: 'recursive', + description: + "Select discovery method: 'Recursive' follows links found on pages (uses Max Depth). 'Sitemap' tries sitemap.xml first, but falls back to 'Recursive' if the sitemap is not found or empty.", + additionalParams: true + }, + { + label: 'Max Depth', + name: 'maxDepth', + type: 'number', + description: + 'Maximum levels of links to follow (e.g., 1 = only the initial URL, 2 = initial URL + links found on it). Default 1.', + placeholder: '1', + default: 1, + optional: true, + additionalParams: true + }, + { + label: 'Max Pages', + name: 'maxPages', + type: 'number', + description: + 'Maximum total number of pages to scrape, regardless of mode or depth. Stops when this limit is reached. Leave empty for no page limit. Default: 10.', + placeholder: '10', + default: 10, + optional: true, + additionalParams: true + }, + { + label: 'Timeout (s)', + name: 'timeoutS', + type: 'number', + description: 'Maximum time in seconds to wait for each page request to complete. Accepts decimals (e.g., 0.5). Default 60.', + placeholder: '60', + default: 60, + optional: true, + additionalParams: true + }, + { + label: 'Tool Description', + name: 'description', + type: 'string', + description: + 'Custom description of what the tool does. This is for LLM to determine when to use this tool. Overrides the default description.', + rows: 4, + additionalParams: true, + optional: true, + placeholder: `Scrapes web pages recursively or via default sitemap. Extracts title, description, and paragraph text. Input should be a single URL string. Returns a JSON string array of scraped page data objects.` + } + ] + } + + async init(nodeData: INodeData, _: string, _options: ICommonObject): Promise { + const scrapeMode = (nodeData.inputs?.scrapeMode as string) ?? 'recursive' + const useSitemap = scrapeMode === 'sitemap' + + const maxDepthInput = nodeData.inputs?.maxDepth as string | number | undefined + let maxDepth = 1 + if (maxDepthInput !== undefined && maxDepthInput !== '') { + const parsedDepth = parseInt(String(maxDepthInput), 10) + if (!isNaN(parsedDepth) && parsedDepth > 0) { + maxDepth = parsedDepth + } + } + + const maxPagesInput = nodeData.inputs?.maxPages as string | number | undefined + let maxPages: number | null = 10 + if (maxPagesInput === undefined || maxPagesInput === '') { + maxPages = null + } else { + const parsedPages = parseInt(String(maxPagesInput), 10) + if (!isNaN(parsedPages) && parsedPages > 0) { + maxPages = parsedPages + } else if (parsedPages <= 0) { + maxPages = null + } + } + + const timeoutInputS = nodeData.inputs?.timeoutS as string | number | undefined + let timeoutMs = 60000 + if (timeoutInputS !== undefined && timeoutInputS !== '') { + const parsedTimeoutS = parseFloat(String(timeoutInputS)) + if (!isNaN(parsedTimeoutS) && parsedTimeoutS > 0) { + timeoutMs = Math.round(parsedTimeoutS * 1000) + } + } + + const customDescription = nodeData.inputs?.description as string + + const tool = new WebScraperRecursiveTool(maxDepth, maxPages, timeoutMs, useSitemap) + + if (customDescription) { + tool.description = customDescription + } + + return tool + } +} + +module.exports = { nodeClass: WebScraperRecursive_Tools } diff --git a/packages/components/nodes/tools/FreeWebScraper/cheeriowebscraper.svg b/packages/components/nodes/tools/FreeWebScraper/cheeriowebscraper.svg new file mode 100644 index 00000000000..c753ab17568 --- /dev/null +++ b/packages/components/nodes/tools/FreeWebScraper/cheeriowebscraper.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts b/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts index 548c595ecbf..b24144ea561 100644 --- a/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts +++ b/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts @@ -104,7 +104,14 @@ class Custom_MCP implements INode { serverParams = JSON.parse(serverParamsString) } - const toolkit = new MCPToolkit(serverParams, 'stdio') + // Compatible with stdio and SSE + let toolkit: MCPToolkit + if (serverParams?.command === undefined) { + toolkit = new MCPToolkit(serverParams, 'sse') + } else { + toolkit = new MCPToolkit(serverParams, 'stdio') + } + await toolkit.initialize() const tools = toolkit.tools ?? [] diff --git a/packages/components/nodes/tools/MCP/core.ts b/packages/components/nodes/tools/MCP/core.ts index 235f9d50f28..226c70b60ac 100644 --- a/packages/components/nodes/tools/MCP/core.ts +++ b/packages/components/nodes/tools/MCP/core.ts @@ -3,20 +3,21 @@ import { Client } from '@modelcontextprotocol/sdk/client/index.js' import { StdioClientTransport, StdioServerParameters } from '@modelcontextprotocol/sdk/client/stdio.js' import { BaseToolkit, tool, Tool } from '@langchain/core/tools' import { z } from 'zod' +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js' +import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js' export class MCPToolkit extends BaseToolkit { tools: Tool[] = [] _tools: ListToolsResult | null = null model_config: any - transport: StdioClientTransport | null = null + transport: StdioClientTransport | SSEClientTransport | StreamableHTTPClientTransport | null = null client: Client | null = null - constructor(serverParams: StdioServerParameters | any, transport: 'stdio' | 'sse') { + serverParams: StdioServerParameters | any + transportType: 'stdio' | 'sse' + constructor(serverParams: StdioServerParameters | any, transportType: 'stdio' | 'sse') { super() - if (transport === 'stdio') { - this.transport = new StdioClientTransport(serverParams as StdioServerParameters) - } else { - // TODO: this.transport = new SSEClientTransport(serverParams.url); - } + this.serverParams = serverParams + this.transportType = transportType } async initialize() { if (this._tools === null) { @@ -29,10 +30,30 @@ export class MCPToolkit extends BaseToolkit { capabilities: {} } ) - if (this.transport === null) { - throw new Error('Transport is not initialized') + if (this.transportType === 'stdio') { + // Compatible with overridden PATH configuration + this.serverParams.env = { + ...(this.serverParams.env || {}), + PATH: process.env.PATH + } + + this.transport = new StdioClientTransport(this.serverParams as StdioServerParameters) + await this.client.connect(this.transport) + } else { + if (this.serverParams.url === undefined) { + throw new Error('URL is required for SSE transport') + } + + const baseUrl = new URL(this.serverParams.url) + try { + this.transport = new StreamableHTTPClientTransport(baseUrl) + await this.client.connect(this.transport) + } catch (error) { + this.transport = new SSEClientTransport(baseUrl) + await this.client.connect(this.transport) + } } - await this.client.connect(this.transport) + this._tools = await this.client.request({ method: 'tools/list' }, ListToolsResultSchema) this.tools = await this.get_tools() diff --git a/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts b/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts index 68e44c54670..92d07dc30bc 100644 --- a/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts +++ b/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts @@ -13,16 +13,145 @@ class TavilyAPI_Tools implements INode { baseClasses: string[] credential: INodeParams inputs: INodeParams[] + additionalParams: boolean constructor() { this.label = 'Tavily API' this.name = 'tavilyAPI' - this.version = 1.0 + this.version = 1.1 this.type = 'TavilyAPI' this.icon = 'tavily.svg' this.category = 'Tools' - this.description = 'Wrapper around TavilyAPI - a real-time API to access Google search results' - this.inputs = [] + this.description = 'Wrapper around TavilyAPI - A specialized search engine designed for LLMs and AI agents' + this.inputs = [ + { + label: 'Query', + name: 'query', + type: 'string', + optional: false, + description: 'The search query to execute with Tavily', + additionalParams: true + }, + { + label: 'Topic', + name: 'topic', + type: 'options', + options: [ + { label: 'General', name: 'general' }, + { label: 'News', name: 'news' } + ], + default: 'general', + description: 'The category of the search. News for real-time updates, general for broader searches', + additionalParams: true, + optional: true + }, + { + label: 'Search Depth', + name: 'searchDepth', + type: 'options', + options: [ + { label: 'Basic', name: 'basic' }, + { label: 'Advanced', name: 'advanced' } + ], + default: 'basic', + description: 'The depth of the search. Advanced costs 2 API Credits, basic costs 1', + additionalParams: true, + optional: true + }, + { + label: 'Chunks Per Source', + name: 'chunksPerSource', + type: 'number', + default: 3, + description: 'Number of content chunks per source (1-3). Only for advanced search', + additionalParams: true, + optional: true + }, + { + label: 'Max Results', + name: 'maxResults', + type: 'number', + default: 5, + additionalParams: true, + description: 'Maximum number of search results (0-20)', + optional: true + }, + { + label: 'Time Range', + name: 'timeRange', + type: 'options', + options: [ + { label: 'Day', name: 'day' }, + { label: 'Week', name: 'week' }, + { label: 'Month', name: 'month' }, + { label: 'Year', name: 'year' } + ], + optional: true, + additionalParams: true, + description: 'Time range to filter results' + }, + { + label: 'Days', + name: 'days', + type: 'number', + default: 7, + additionalParams: true, + description: 'Number of days back from current date (only for news topic)', + optional: true + }, + { + label: 'Include Answer', + name: 'includeAnswer', + type: 'boolean', + default: false, + description: 'Include an LLM-generated answer to the query', + additionalParams: true, + optional: true + }, + { + label: 'Include Raw Content', + name: 'includeRawContent', + type: 'boolean', + default: false, + description: 'Include cleaned and parsed HTML content of each result', + additionalParams: true, + optional: true + }, + { + label: 'Include Images', + name: 'includeImages', + type: 'boolean', + default: false, + description: 'Include image search results', + additionalParams: true, + optional: true + }, + { + label: 'Include Image Descriptions', + name: 'includeImageDescriptions', + type: 'boolean', + default: false, + description: 'Include descriptive text for each image', + additionalParams: true, + optional: true + }, + { + label: 'Include Domains', + name: 'includeDomains', + type: 'string', + optional: true, + description: 'Comma-separated list of domains to include in results', + additionalParams: true + }, + { + label: 'Exclude Domains', + name: 'excludeDomains', + type: 'string', + optional: true, + description: 'Comma-separated list of domains to exclude from results', + additionalParams: true + } + ] this.credential = { label: 'Connect Credential', name: 'credential', @@ -35,7 +164,40 @@ class TavilyAPI_Tools implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const tavilyApiKey = getCredentialParam('tavilyApiKey', credentialData, nodeData) - return new TavilySearchResults({ apiKey: tavilyApiKey }) + + const query = nodeData.inputs?.query as string + const topic = nodeData.inputs?.topic as string + const searchDepth = nodeData.inputs?.searchDepth as string + const chunksPerSource = nodeData.inputs?.chunksPerSource as number + const maxResults = nodeData.inputs?.maxResults as number + const timeRange = nodeData.inputs?.timeRange as string + const days = nodeData.inputs?.days as number + const includeAnswer = nodeData.inputs?.includeAnswer as boolean + const includeRawContent = nodeData.inputs?.includeRawContent as boolean + const includeImages = nodeData.inputs?.includeImages as boolean + const includeImageDescriptions = nodeData.inputs?.includeImageDescriptions as boolean + const includeDomains = nodeData.inputs?.includeDomains as string + const excludeDomains = nodeData.inputs?.excludeDomains as string + + const config: any = { + apiKey: tavilyApiKey, + query, + topic, + searchDepth, + maxResults, + includeAnswer, + includeRawContent, + includeImages, + includeImageDescriptions + } + + if (chunksPerSource) config.chunksPerSource = chunksPerSource + if (timeRange) config.timeRange = timeRange + if (days) config.days = days + if (includeDomains) config.includeDomains = includeDomains.split(',').map((d) => d.trim()) + if (excludeDomains) config.excludeDomains = excludeDomains.split(',').map((d) => d.trim()) + + return new TavilySearchResults(config) } } diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 7cc96d61d85..ad0f82bb057 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -12,6 +12,26 @@ import { getContentColumnName, getDatabase, getHost, getPort, getTableName } fro const serverCredentialsExists = !!process.env.POSTGRES_VECTORSTORE_USER && !!process.env.POSTGRES_VECTORSTORE_PASSWORD +// added temporarily to fix the base class return for VectorStore when postgres node is using TypeORM +function getVectorStoreBaseClasses() { + // Try getting base classes through the utility function + const baseClasses = getBaseClasses(VectorStore) + + // If we got results, return them + if (baseClasses && baseClasses.length > 0) { + return baseClasses + } + + // If VectorStore is recognized as a class but getBaseClasses returned nothing, + // return the known inheritance chain + if (VectorStore instanceof Function) { + return ['VectorStore'] + } + + // Fallback to minimum required class + return ['VectorStore'] +} + class Postgres_VectorStores implements INode { label: string name: string @@ -195,7 +215,11 @@ class Postgres_VectorStores implements INode { { label: 'Postgres Vector Store', name: 'vectorStore', - baseClasses: [this.type, ...getBaseClasses(VectorStore)] + baseClasses: [ + this.type, + // ...getBaseClasses(VectorStore), // disabled temporarily for using TypeORM + ...getVectorStoreBaseClasses() // added temporarily for using TypeORM + ] } ] } diff --git a/packages/components/package.json b/packages/components/package.json index 36f16153868..7a56eb1b250 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,6 +1,6 @@ { "name": "flowise-components", - "version": "2.2.7-patch.1", + "version": "2.2.8", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", @@ -35,7 +35,8 @@ "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^2.5.0", - "@google/generative-ai": "^0.15.0", + "@google-cloud/storage": "^7.15.2", + "@google/generative-ai": "^0.24.0", "@huggingface/inference": "^2.6.1", "@langchain/anthropic": "0.3.14", "@langchain/aws": "0.1.4", @@ -44,7 +45,7 @@ "@langchain/community": "^0.3.24", "@langchain/core": "0.3.37", "@langchain/exa": "^0.0.5", - "@langchain/google-genai": "0.1.9", + "@langchain/google-genai": "0.2.3", "@langchain/google-vertexai": "^0.2.0", "@langchain/groq": "0.1.2", "@langchain/langgraph": "^0.0.22", @@ -56,9 +57,10 @@ "@langchain/qdrant": "^0.0.5", "@langchain/weaviate": "^0.0.1", "@langchain/xai": "^0.0.1", + "@mem0/community": "^0.0.1", "@mendable/firecrawl-js": "^0.0.28", "@mistralai/mistralai": "0.1.3", - "@modelcontextprotocol/sdk": "^1.6.1", + "@modelcontextprotocol/sdk": "^1.10.1", "@modelcontextprotocol/server-brave-search": "^0.6.2", "@modelcontextprotocol/server-github": "^2025.1.23", "@modelcontextprotocol/server-postgres": "^0.6.2", diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index 4018f218574..a4cafe19b27 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -30,6 +30,7 @@ import { LangWatch, LangWatchSpan, LangWatchTrace, autoconvertTypedValues } from import { DataSource } from 'typeorm' import { ChatGenerationChunk } from '@langchain/core/outputs' import { AIMessageChunk } from '@langchain/core/messages' +import { Serialized } from '@langchain/core/load/serializable' interface AgentRun extends Run { actions: AgentAction[] @@ -120,6 +121,50 @@ function getPhoenixTracer(options: PhoenixTracerOptions): Tracer | undefined { } } +interface OpikTracerOptions { + apiKey: string + baseUrl: string + projectName: string + workspace: string + sdkIntegration?: string + sessionId?: string + enableCallback?: boolean +} + +function getOpikTracer(options: OpikTracerOptions): Tracer | undefined { + const SEMRESATTRS_PROJECT_NAME = 'openinference.project.name' + try { + const traceExporter = new ProtoOTLPTraceExporter({ + url: `${options.baseUrl}/v1/private/otel/v1/traces`, + headers: { + Authorization: options.apiKey, + projectName: options.projectName, + 'Comet-Workspace': options.workspace + } + }) + const tracerProvider = new NodeTracerProvider({ + resource: new Resource({ + [ATTR_SERVICE_NAME]: options.projectName, + [ATTR_SERVICE_VERSION]: '1.0.0', + [SEMRESATTRS_PROJECT_NAME]: options.projectName + }) + }) + tracerProvider.addSpanProcessor(new SimpleSpanProcessor(traceExporter)) + if (options.enableCallback) { + registerInstrumentations({ + instrumentations: [] + }) + const lcInstrumentation = new LangChainInstrumentation() + lcInstrumentation.manuallyInstrument(CallbackManagerModule) + tracerProvider.register() + } + return tracerProvider.getTracer(`opik-tracer-${uuidv4().toString()}`) + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`Error setting up Opik tracer: ${err.message}`) + return undefined + } +} + function tryGetJsonSpaces() { try { return parseInt(getEnvironmentVariable('LOG_JSON_SPACES') ?? '2') @@ -558,6 +603,28 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO const tracer: Tracer | undefined = getPhoenixTracer(phoenixOptions) callbacks.push(tracer) + } else if (provider === 'opik') { + const opikApiKey = getCredentialParam('opikApiKey', credentialData, nodeData) + const opikEndpoint = getCredentialParam('opikUrl', credentialData, nodeData) + const opikWorkspace = getCredentialParam('opikWorkspace', credentialData, nodeData) + const opikProject = analytic[provider].opikProjectName as string + + let opikOptions: OpikTracerOptions = { + apiKey: opikApiKey, + baseUrl: opikEndpoint ?? 'https://www.comet.com/opik/api', + projectName: opikProject ?? 'default', + workspace: opikWorkspace ?? 'default', + sdkIntegration: 'Flowise', + enableCallback: true + } + + if (options.chatId) opikOptions.sessionId = options.chatId + if (nodeData?.inputs?.analytics?.opik) { + opikOptions = { ...opikOptions, ...nodeData?.inputs?.analytics?.opik } + } + + const tracer: Tracer | undefined = getOpikTracer(opikOptions) + callbacks.push(tracer) } } } @@ -672,6 +739,25 @@ export class AnalyticHandler { const rootSpan: Span | undefined = undefined this.handlers['phoenix'] = { client: phoenix, phoenixProject, rootSpan } + } else if (provider === 'opik') { + const opikApiKey = getCredentialParam('opikApiKey', credentialData, this.nodeData) + const opikEndpoint = getCredentialParam('opikUrl', credentialData, this.nodeData) + const opikWorkspace = getCredentialParam('opikWorkspace', credentialData, this.nodeData) + const opikProject = analytic[provider].opikProjectName as string + + let opikOptions: OpikTracerOptions = { + apiKey: opikApiKey, + baseUrl: opikEndpoint ?? 'https://www.comet.com/opik/api', + projectName: opikProject ?? 'default', + workspace: opikWorkspace ?? 'default', + sdkIntegration: 'Flowise', + enableCallback: false + } + + const opik: Tracer | undefined = getOpikTracer(opikOptions) + const rootSpan: Span | undefined = undefined + + this.handlers['opik'] = { client: opik, opikProject, rootSpan } } } } @@ -687,7 +773,8 @@ export class AnalyticHandler { lunary: {}, langWatch: {}, arize: {}, - phoenix: {} + phoenix: {}, + opik: {} } if (Object.prototype.hasOwnProperty.call(this.handlers, 'langSmith')) { @@ -869,6 +956,40 @@ export class AnalyticHandler { returnIds['phoenix'].chainSpan = chainSpanId } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const tracer: Tracer | undefined = this.handlers['opik'].client + let rootSpan: Span | undefined = this.handlers['opik'].rootSpan + + if (!parentIds || !Object.keys(parentIds).length) { + rootSpan = tracer ? tracer.startSpan('Flowise') : undefined + if (rootSpan) { + rootSpan.setAttribute('session.id', this.options.chatId) + rootSpan.setAttribute('openinference.span.kind', 'CHAIN') + rootSpan.setAttribute('input.value', input) + rootSpan.setAttribute('input.mime_type', 'text/plain') + rootSpan.setAttribute('output.value', '[Object]') + rootSpan.setAttribute('output.mime_type', 'text/plain') + rootSpan.setStatus({ code: SpanStatusCode.OK }) + rootSpan.end() + } + this.handlers['opik'].rootSpan = rootSpan + } + + const rootSpanContext = rootSpan + ? opentelemetry.trace.setSpan(opentelemetry.context.active(), rootSpan as Span) + : opentelemetry.context.active() + const chainSpan = tracer?.startSpan(name, undefined, rootSpanContext) + if (chainSpan) { + chainSpan.setAttribute('openinference.span.kind', 'CHAIN') + chainSpan.setAttribute('input.value', JSON.stringify(input)) + chainSpan.setAttribute('input.mime_type', 'application/json') + } + const chainSpanId: any = chainSpan?.spanContext().spanId + + this.handlers['opik'].chainSpan = { [chainSpanId]: chainSpan } + returnIds['opik'].chainSpan = chainSpanId + } + return returnIds } @@ -946,6 +1067,16 @@ export class AnalyticHandler { chainSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const chainSpan: Span | undefined = this.handlers['opik'].chainSpan[returnIds['opik'].chainSpan] + if (chainSpan) { + chainSpan.setAttribute('output.value', JSON.stringify(output)) + chainSpan.setAttribute('output.mime_type', 'application/json') + chainSpan.setStatus({ code: SpanStatusCode.OK }) + chainSpan.end() + } + } } async onChainError(returnIds: ICommonObject, error: string | object, shutdown = false) { @@ -1131,6 +1262,25 @@ export class AnalyticHandler { returnIds['phoenix'].llmSpan = llmSpanId } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const tracer: Tracer | undefined = this.handlers['opik'].client + const rootSpan: Span | undefined = this.handlers['opik'].rootSpan + + const rootSpanContext = rootSpan + ? opentelemetry.trace.setSpan(opentelemetry.context.active(), rootSpan as Span) + : opentelemetry.context.active() + const llmSpan = tracer?.startSpan(name, undefined, rootSpanContext) + if (llmSpan) { + llmSpan.setAttribute('openinference.span.kind', 'LLM') + llmSpan.setAttribute('input.value', JSON.stringify(input)) + llmSpan.setAttribute('input.mime_type', 'application/json') + } + const llmSpanId: any = llmSpan?.spanContext().spanId + + this.handlers['opik'].llmSpan = { [llmSpanId]: llmSpan } + returnIds['opik'].llmSpan = llmSpanId + } + return returnIds } @@ -1196,6 +1346,16 @@ export class AnalyticHandler { llmSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const llmSpan: Span | undefined = this.handlers['opik'].llmSpan[returnIds['opik'].llmSpan] + if (llmSpan) { + llmSpan.setAttribute('output.value', JSON.stringify(output)) + llmSpan.setAttribute('output.mime_type', 'application/json') + llmSpan.setStatus({ code: SpanStatusCode.OK }) + llmSpan.end() + } + } } async onLLMError(returnIds: ICommonObject, error: string | object) { @@ -1260,6 +1420,16 @@ export class AnalyticHandler { llmSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const llmSpan: Span | undefined = this.handlers['opik'].llmSpan[returnIds['opik'].llmSpan] + if (llmSpan) { + llmSpan.setAttribute('error.value', JSON.stringify(error)) + llmSpan.setAttribute('error.mime_type', 'application/json') + llmSpan.setStatus({ code: SpanStatusCode.ERROR, message: error.toString() }) + llmSpan.end() + } + } } async onToolStart(name: string, input: string | object, parentIds: ICommonObject) { @@ -1269,7 +1439,8 @@ export class AnalyticHandler { lunary: {}, langWatch: {}, arize: {}, - phoenix: {} + phoenix: {}, + opik: {} } if (Object.prototype.hasOwnProperty.call(this.handlers, 'langSmith')) { @@ -1368,6 +1539,25 @@ export class AnalyticHandler { returnIds['phoenix'].toolSpan = toolSpanId } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const tracer: Tracer | undefined = this.handlers['opik'].client + const rootSpan: Span | undefined = this.handlers['opik'].rootSpan + + const rootSpanContext = rootSpan + ? opentelemetry.trace.setSpan(opentelemetry.context.active(), rootSpan as Span) + : opentelemetry.context.active() + const toolSpan = tracer?.startSpan(name, undefined, rootSpanContext) + if (toolSpan) { + toolSpan.setAttribute('openinference.span.kind', 'TOOL') + toolSpan.setAttribute('input.value', JSON.stringify(input)) + toolSpan.setAttribute('input.mime_type', 'application/json') + } + const toolSpanId: any = toolSpan?.spanContext().spanId + + this.handlers['opik'].toolSpan = { [toolSpanId]: toolSpan } + returnIds['opik'].toolSpan = toolSpanId + } + return returnIds } @@ -1433,6 +1623,16 @@ export class AnalyticHandler { toolSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const toolSpan: Span | undefined = this.handlers['opik'].toolSpan[returnIds['opik'].toolSpan] + if (toolSpan) { + toolSpan.setAttribute('output.value', JSON.stringify(output)) + toolSpan.setAttribute('output.mime_type', 'application/json') + toolSpan.setStatus({ code: SpanStatusCode.OK }) + toolSpan.end() + } + } } async onToolError(returnIds: ICommonObject, error: string | object) { @@ -1497,5 +1697,98 @@ export class AnalyticHandler { toolSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const toolSpan: Span | undefined = this.handlers['opik'].toolSpan[returnIds['opik'].toolSpan] + if (toolSpan) { + toolSpan.setAttribute('error.value', JSON.stringify(error)) + toolSpan.setAttribute('error.mime_type', 'application/json') + toolSpan.setStatus({ code: SpanStatusCode.ERROR, message: error.toString() }) + toolSpan.end() + } + } + } +} + +/** + * Custom callback handler for streaming detailed intermediate information + * during agent execution, specifically tool invocation inputs and outputs. + */ +export class CustomStreamingHandler extends BaseCallbackHandler { + name = 'custom_streaming_handler' + + private sseStreamer: IServerSideEventStreamer + private chatId: string + + constructor(sseStreamer: IServerSideEventStreamer, chatId: string) { + super() + this.sseStreamer = sseStreamer + this.chatId = chatId + } + + /** + * Handle the start of a tool invocation + */ + async handleToolStart(tool: Serialized, input: string, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + const toolName = typeof tool === 'object' && tool.name ? tool.name : 'unknown-tool' + const toolInput = typeof input === 'string' ? input : JSON.stringify(input, null, 2) + + // Stream the tool invocation details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'tool_start', + name: toolName, + input: toolInput, + runId, + parentRunId: parentRunId || null + }) + } + + /** + * Handle the end of a tool invocation + */ + async handleToolEnd(output: string | object, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + const toolOutput = typeof output === 'string' ? output : JSON.stringify(output, null, 2) + + // Stream the tool output details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'tool_end', + output: toolOutput, + runId, + parentRunId: parentRunId || null + }) + } + + /** + * Handle tool errors + */ + async handleToolError(error: Error, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + // Stream the tool error details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'tool_error', + error: error.message, + runId, + parentRunId: parentRunId || null + }) + } + + /** + * Handle agent actions + */ + async handleAgentAction(action: AgentAction, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + // Stream the agent action details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'agent_action', + action: JSON.stringify(action), + runId, + parentRunId: parentRunId || null + }) } } diff --git a/packages/components/src/storageUtils.ts b/packages/components/src/storageUtils.ts index 7d214220818..a918c4f002e 100644 --- a/packages/components/src/storageUtils.ts +++ b/packages/components/src/storageUtils.ts @@ -8,6 +8,7 @@ import { S3Client, S3ClientConfig } from '@aws-sdk/client-s3' +import { Storage } from '@google-cloud/storage' import { Readable } from 'node:stream' import { getUserHome } from './utils' import sanitize from 'sanitize-filename' @@ -34,6 +35,25 @@ export const addBase64FilesToStorage = async (fileBase64: string, chatflowid: st }) await s3Client.send(putObjCmd) + fileNames.push(sanitizedFilename) + return 'FILE-STORAGE::' + JSON.stringify(fileNames) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const splitDataURI = fileBase64.split(',') + const filename = splitDataURI.pop()?.split(':')[1] ?? '' + const bf = Buffer.from(splitDataURI.pop() || '', 'base64') + const mime = splitDataURI[0].split(':')[1].split(';')[0] + const sanitizedFilename = _sanitizeFilename(filename) + const normalizedChatflowid = chatflowid.replace(/\\/g, '/') + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = `${normalizedChatflowid}/${normalizedFilename}` + const file = bucket.file(filePath) + await new Promise((resolve, reject) => { + file.createWriteStream({ contentType: mime, metadata: { contentEncoding: 'base64' } }) + .on('error', (err) => reject(err)) + .on('finish', () => resolve()) + .end(bf) + }) fileNames.push(sanitizedFilename) return 'FILE-STORAGE::' + JSON.stringify(fileNames) } else { @@ -76,6 +96,20 @@ export const addArrayFilesToStorage = async (mime: string, bf: Buffer, fileName: await s3Client.send(putObjCmd) fileNames.push(sanitizedFilename) return 'FILE-STORAGE::' + JSON.stringify(fileNames) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPaths = paths.map((p) => p.replace(/\\/g, '/')) + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = [...normalizedPaths, normalizedFilename].join('/') + const file = bucket.file(filePath) + await new Promise((resolve, reject) => { + file.createWriteStream() + .on('error', (err) => reject(err)) + .on('finish', () => resolve()) + .end(bf) + }) + fileNames.push(sanitizedFilename) + return 'FILE-STORAGE::' + JSON.stringify(fileNames) } else { const dir = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) if (!fs.existsSync(dir)) { @@ -109,6 +143,19 @@ export const addSingleFileToStorage = async (mime: string, bf: Buffer, fileName: }) await s3Client.send(putObjCmd) return 'FILE-STORAGE::' + sanitizedFilename + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPaths = paths.map((p) => p.replace(/\\/g, '/')) + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = [...normalizedPaths, normalizedFilename].join('/') + const file = bucket.file(filePath) + await new Promise((resolve, reject) => { + file.createWriteStream({ contentType: mime, metadata: { contentEncoding: 'base64' } }) + .on('error', (err) => reject(err)) + .on('finish', () => resolve()) + .end(bf) + }) + return 'FILE-STORAGE::' + sanitizedFilename } else { const dir = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) if (!fs.existsSync(dir)) { @@ -146,6 +193,11 @@ export const getFileFromUpload = async (filePath: string): Promise => { // @ts-ignore const buffer = Buffer.concat(response.Body.toArray()) return buffer + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const file = bucket.file(filePath) + const [buffer] = await file.download() + return buffer } else { return fs.readFileSync(filePath) } @@ -179,6 +231,14 @@ export const getFileFromStorage = async (file: string, ...paths: string[]): Prom // @ts-ignore const buffer = Buffer.concat(response.Body.toArray()) return buffer + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPaths = paths.map((p) => p.replace(/\\/g, '/')) + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = [...normalizedPaths, normalizedFilename].join('/') + const file = bucket.file(filePath) + const [buffer] = await file.download() + return buffer } else { const fileInStorage = path.join(getStoragePath(), ...paths.map(_sanitizeFilename), sanitizedFilename) return fs.readFileSync(fileInStorage) @@ -208,6 +268,10 @@ export const removeFilesFromStorage = async (...paths: string[]) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPath = paths.map((p) => p.replace(/\\/g, '/')).join('/') + await bucket.deleteFiles({ prefix: `${normalizedPath}/` }) } else { const directory = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) _deleteLocalFolderRecursive(directory) @@ -223,6 +287,9 @@ export const removeSpecificFileFromUpload = async (filePath: string) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + await bucket.file(filePath).delete() } else { fs.unlinkSync(filePath) } @@ -237,6 +304,15 @@ export const removeSpecificFileFromStorage = async (...paths: string[]) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const fileName = paths.pop() + if (fileName) { + const sanitizedFilename = _sanitizeFilename(fileName) + paths.push(sanitizedFilename) + } + const normalizedPath = paths.map((p) => p.replace(/\\/g, '/')).join('/') + await bucket.file(normalizedPath).delete() } else { const fileName = paths.pop() if (fileName) { @@ -257,6 +333,10 @@ export const removeFolderFromStorage = async (...paths: string[]) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPath = paths.map((p) => p.replace(/\\/g, '/')).join('/') + await bucket.deleteFiles({ prefix: `${normalizedPath}/` }) } else { const directory = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) _deleteLocalFolderRecursive(directory, true) @@ -355,6 +435,14 @@ export const streamStorageFile = async ( const blob = await body.transformToByteArray() return Buffer.from(blob) } + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedChatflowId = chatflowId.replace(/\\/g, '/') + const normalizedChatId = chatId.replace(/\\/g, '/') + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = `${normalizedChatflowId}/${normalizedChatId}/${normalizedFilename}` + const [buffer] = await bucket.file(filePath).download() + return buffer } else { const filePath = path.join(getStoragePath(), chatflowId, chatId, sanitizedFilename) //raise error if file path is not absolute @@ -372,6 +460,28 @@ export const streamStorageFile = async ( } } +export const getGcsClient = () => { + const pathToGcsCredential = process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL + const projectId = process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID + const bucketName = process.env.GOOGLE_CLOUD_STORAGE_BUCKET_NAME + + if (!pathToGcsCredential) { + throw new Error('GOOGLE_CLOUD_STORAGE_CREDENTIAL env variable is required') + } + if (!bucketName) { + throw new Error('GOOGLE_CLOUD_STORAGE_BUCKET_NAME env variable is required') + } + + const storageConfig = { + keyFilename: pathToGcsCredential, + ...(projectId ? { projectId } : {}) + } + + const storage = new Storage(storageConfig) + const bucket = storage.bucket(bucketName) + return { storage, bucket } +} + export const getS3Config = () => { const accessKeyId = process.env.S3_STORAGE_ACCESS_KEY_ID const secretAccessKey = process.env.S3_STORAGE_SECRET_ACCESS_KEY diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index fb29c129369..7951dc0e75c 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -5,7 +5,7 @@ import * as path from 'path' import { JSDOM } from 'jsdom' import { z } from 'zod' import { DataSource } from 'typeorm' -import { ICommonObject, IDatabaseEntity, IDocument, IMessage, INodeData, IVariable, MessageContentImageUrl } from './Interface' +import { ICommonObject, IDatabaseEntity, IFileUpload, IMessage, INodeData, IVariable, MessageContentImageUrl } from './Interface' import { AES, enc } from 'crypto-js' import { omit } from 'lodash' import { AIMessage, HumanMessage, BaseMessage } from '@langchain/core/messages' @@ -284,14 +284,16 @@ export const getInputVariables = (paramValue: string): string[] => { } /** - * Transform curly braces into double curly braces if the content includes a colon. + * Transform single curly braces into double curly braces if the content includes a colon. * @param input - The original string that may contain { ... } segments. * @returns The transformed string, where { ... } containing a colon has been replaced with {{ ... }}. */ export const transformBracesWithColon = (input: string): string => { - // This regex will match anything of the form `{ ... }` (no nested braces). - // `[^{}]*` means: match any characters that are not `{` or `}` zero or more times. - const regex = /\{([^{}]*?)\}/g + // This regex uses negative lookbehind (? { // groupContent is the text inside the braces `{ ... }`. @@ -541,6 +543,15 @@ const getEncryptionKey = async (): Promise => { return process.env.FLOWISE_SECRETKEY_OVERWRITE } try { + if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { + const secretId = process.env.SECRETKEY_AWS_NAME || 'FlowiseEncryptionKey' + const command = new GetSecretValueCommand({ SecretId: secretId }) + const response = await secretsManagerClient.send(command) + + if (response.SecretString) { + return response.SecretString + } + } return await fs.promises.readFile(getEncryptionKeyPath(), 'utf8') } catch (error) { throw new Error(error) @@ -559,18 +570,24 @@ const decryptCredentialData = async (encryptedData: string): Promise doc.pageContent).join('\n') - messageWithFileUploads += `${pageContents}\n\n` + const documents: string = await fileLoaderNodeInstance.init(nodeData, '', options) + messageWithFileUploads += `${documents}\n\n` } } const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content diff --git a/packages/server/.env.example b/packages/server/.env.example index dabae5a0268..e1bccc2d06f 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -9,6 +9,7 @@ PORT=3000 # SECRETKEY_AWS_ACCESS_KEY= # SECRETKEY_AWS_SECRET_KEY= # SECRETKEY_AWS_REGION=us-west-2 +# SECRETKEY_AWS_NAME=FlowiseEncryptionKey # NUMBER_OF_PROXIES= 1 # CORS_ORIGINS=* @@ -53,6 +54,10 @@ PORT=3000 # S3_STORAGE_REGION=us-west-2 # S3_ENDPOINT_URL= # S3_FORCE_PATH_STYLE=false +# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path +# GOOGLE_CLOUD_STORAGE_PROJ_ID= +# GOOGLE_CLOUD_STORAGE_BUCKET_NAME= +# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true # SHOW_COMMUNITY_NODES=true # DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable) @@ -83,6 +88,8 @@ PORT=3000 # QUEUE_NAME=flowise-queue # QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000 # WORKER_CONCURRENCY=100000 +# REMOVE_ON_AGE=86400 +# REMOVE_ON_COUNT=10000 # REDIS_URL= # REDIS_HOST=localhost # REDIS_PORT=6379 diff --git a/packages/server/marketplaces/agentflows/Prompt Engineering Team.json b/packages/server/marketplaces/agentflows/Prompt Engineering Team.json index a5caa416329..59cda2cca69 100644 --- a/packages/server/marketplaces/agentflows/Prompt Engineering Team.json +++ b/packages/server/marketplaces/agentflows/Prompt Engineering Team.json @@ -167,7 +167,7 @@ ], "inputs": { "workerName": " Prompt Creator", - "workerPrompt": "You are a Prompt Engineer. Your job is to craft system prompts for AI Agents based on user requests.\n\nHere is an example:\n\n1. User asks you to craft two AI Agent prompt messages for \"researching leads and creating personalized email drafts for the sales team\".\n\n2. You generate the following:\n\nAGENT 1\n\nName: \nLead Research\n\nSytyem Prompt: \nAs a member of the sales team at company, your mission is to explore the digital landscape for potential leads. Equipped with advanced tools and a strategic approach, you analyze data, trends, and interactions to discover opportunities that others might miss. Your efforts are vital in creating pathways for meaningful engagements and driving the company's growth.\nYour goal is to identify high-value leads that align with our ideal customer profile.\nPerform a thorough analysis of lead_company, a company that has recently shown interest in our solutions. Use all available data sources to create a detailed profile, concentrating on key decision-makers, recent business developments, and potential needs that match our offerings. This task is essential for effectively customizing our engagement strategy.\nAvoid making assumptions and only use information you are certain about.\nYou should produce a comprehensive report on lead_person, including company background, key personnel, recent milestones, and identified needs. Emphasize potential areas where our solutions can add value and suggest tailored engagement strategies. Pass the info to Lead Sales Representative.\n\nAGENT 2\n\nName: \nLead Sales Representative\n\nSystem Prompt: \nYou play a crucial role within company as the link between potential clients and the solutions they need. By crafting engaging, personalized messages, you not only inform leads about our company offerings but also make them feel valued and understood. Your role is essential in transforming interest into action, guiding leads from initial curiosity to committed engagement.\nYour goal is to nurture leads with tailored, compelling communications.\nLeveraging the insights from the lead profiling report on lead_company, create a personalized outreach campaign targeting lead_person, the position of lead_company. he campaign should highlight their recent lead_activity and demonstrate how our solutions can support their objectives. Your communication should align with lead_company's company culture and values, showcasing a thorough understanding of their business and needs. Avoid making assumptions and use only verified information.\nThe output should be a series of personalized email drafts customized for lead_company, specifically addressing lead_person. Each draft should present a compelling narrative that connects our solutions to their recent accomplishments and future goals. Ensure the tone is engaging, professional, and consistent with lead_company's corporate identity. Keep in natural, don't use strange and fancy words.\n\n3. IMPORTANT: Notice how the prompts in this example work together and are connected by \"Pass the info to Lead Sales Representative.\" The first prompt focuses on researching leads, while the second leverages that information to create personalized email drafts. This creates a cohesive workflow for the AI Agents.\n\n4. If the AI agent needs to use a tool to perform its task, it will indicate this on the system prompt, but you will not write any code for them (they already have the code for the tools they use).", + "workerPrompt": "You are a Prompt Engineer. Your job is to craft system prompts for AI Agents based on user requests.\n\nHere is an example:\n\n1. User asks you to craft two AI Agent prompt messages for \"researching leads and creating personalized email drafts for the sales team\".\n\n2. You generate the following:\n\nAGENT 1\n\nName: \nLead Research\n\nSystem Prompt: \nAs a member of the sales team at company, your mission is to explore the digital landscape for potential leads. Equipped with advanced tools and a strategic approach, you analyze data, trends, and interactions to discover opportunities that others might miss. Your efforts are vital in creating pathways for meaningful engagements and driving the company's growth.\nYour goal is to identify high-value leads that align with our ideal customer profile.\nPerform a thorough analysis of lead_company, a company that has recently shown interest in our solutions. Use all available data sources to create a detailed profile, concentrating on key decision-makers, recent business developments, and potential needs that match our offerings. This task is essential for effectively customizing our engagement strategy.\nAvoid making assumptions and only use information you are certain about.\nYou should produce a comprehensive report on lead_person, including company background, key personnel, recent milestones, and identified needs. Emphasize potential areas where our solutions can add value and suggest tailored engagement strategies. Pass the info to Lead Sales Representative.\n\nAGENT 2\n\nName: \nLead Sales Representative\n\nSystem Prompt: \nYou play a crucial role within company as the link between potential clients and the solutions they need. By crafting engaging, personalized messages, you not only inform leads about our company offerings but also make them feel valued and understood. Your role is essential in transforming interest into action, guiding leads from initial curiosity to committed engagement.\nYour goal is to nurture leads with tailored, compelling communications.\nLeveraging the insights from the lead profiling report on lead_company, create a personalized outreach campaign targeting lead_person, the position of lead_company. The campaign should highlight their recent lead_activity and demonstrate how our solutions can support their objectives. Your communication should align with lead_company's company culture and values, showcasing a thorough understanding of their business and needs. Avoid making assumptions and use only verified information.\nThe output should be a series of personalized email drafts customized for lead_company, specifically addressing lead_person. Each draft should present a compelling narrative that connects our solutions to their recent accomplishments and future goals. Ensure the tone is engaging, professional, and consistent with lead_company's corporate identity. Keep it natural, don't use strange and fancy words.\n\n3. IMPORTANT: Notice how the prompts in this example work together and are connected by \"Pass the info to Lead Sales Representative.\" The first prompt focuses on researching leads, while the second leverages that information to create personalized email drafts. This creates a cohesive workflow for the AI Agents.\n\n4. If the AI agent needs to use a tool to perform its task, it will indicate this on the system prompt, but you will not write any code for them (they already have the code for the tools they use).", "tools": "", "supervisor": "{{supervisor_0.data.instance}}", "model": "", diff --git a/packages/server/package.json b/packages/server/package.json index 9e8308f8025..84364cd9d00 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "2.2.7-patch.1", + "version": "2.2.8", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", @@ -57,6 +57,7 @@ "license": "SEE LICENSE IN LICENSE.md", "dependencies": { "@aws-sdk/client-secrets-manager": "^3.699.0", + "@google-cloud/logging-winston": "^6.0.0", "@oclif/core": "4.0.7", "@opentelemetry/api": "^1.3.0", "@opentelemetry/auto-instrumentations-node": "^0.52.0", @@ -95,9 +96,10 @@ "moment": "^2.29.3", "moment-timezone": "^0.5.34", "multer": "^1.4.5-lts.1", + "multer-cloud-storage": "^4.0.0", "multer-s3": "^3.0.1", "mysql2": "^3.11.3", - "nim-container-manager": "^1.0.5", + "flowise-nim-container-manager": "^1.0.11", "openai": "^4.82.0", "pg": "^8.11.1", "posthog-node": "^3.5.0", diff --git a/packages/server/src/commands/base.ts b/packages/server/src/commands/base.ts index 5bed81e5628..1b136144663 100644 --- a/packages/server/src/commands/base.ts +++ b/packages/server/src/commands/base.ts @@ -49,6 +49,10 @@ export abstract class BaseCommand extends Command { S3_STORAGE_REGION: Flags.string(), S3_ENDPOINT_URL: Flags.string(), S3_FORCE_PATH_STYLE: Flags.string(), + GOOGLE_CLOUD_STORAGE_CREDENTIAL: Flags.string(), + GOOGLE_CLOUD_STORAGE_PROJ_ID: Flags.string(), + GOOGLE_CLOUD_STORAGE_BUCKET_NAME: Flags.string(), + GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS: Flags.string(), SHOW_COMMUNITY_NODES: Flags.string(), SECRETKEY_STORAGE_TYPE: Flags.string(), SECRETKEY_PATH: Flags.string(), @@ -61,6 +65,8 @@ export abstract class BaseCommand extends Command { WORKER_CONCURRENCY: Flags.string(), QUEUE_NAME: Flags.string(), QUEUE_REDIS_EVENT_STREAM_MAX_LEN: Flags.string(), + REMOVE_ON_AGE: Flags.string(), + REMOVE_ON_COUNT: Flags.string(), REDIS_URL: Flags.string(), REDIS_HOST: Flags.string(), REDIS_PORT: Flags.string(), @@ -182,6 +188,11 @@ export abstract class BaseCommand extends Command { if (flags.S3_STORAGE_REGION) process.env.S3_STORAGE_REGION = flags.S3_STORAGE_REGION if (flags.S3_ENDPOINT_URL) process.env.S3_ENDPOINT_URL = flags.S3_ENDPOINT_URL if (flags.S3_FORCE_PATH_STYLE) process.env.S3_FORCE_PATH_STYLE = flags.S3_FORCE_PATH_STYLE + if (flags.GOOGLE_CLOUD_STORAGE_CREDENTIAL) process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL = flags.GOOGLE_CLOUD_STORAGE_CREDENTIAL + if (flags.GOOGLE_CLOUD_STORAGE_PROJ_ID) process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID = flags.GOOGLE_CLOUD_STORAGE_PROJ_ID + if (flags.GOOGLE_CLOUD_STORAGE_BUCKET_NAME) process.env.GOOGLE_CLOUD_STORAGE_BUCKET_NAME = flags.GOOGLE_CLOUD_STORAGE_BUCKET_NAME + if (flags.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS) + process.env.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS = flags.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS // Queue if (flags.MODE) process.env.MODE = flags.MODE @@ -196,6 +207,8 @@ export abstract class BaseCommand extends Command { if (flags.REDIS_CA) process.env.REDIS_CA = flags.REDIS_CA if (flags.WORKER_CONCURRENCY) process.env.WORKER_CONCURRENCY = flags.WORKER_CONCURRENCY if (flags.QUEUE_NAME) process.env.QUEUE_NAME = flags.QUEUE_NAME - if (flags.QUEUE_REDIS_EVENT_STREAM_MAX_LEN) process.env.QUEUE_REDIS_EVENT_STREAM_MAX_LEN = flags.QUEUE_REDIS_EVENT_STREAM + if (flags.QUEUE_REDIS_EVENT_STREAM_MAX_LEN) process.env.QUEUE_REDIS_EVENT_STREAM_MAX_LEN = flags.QUEUE_REDIS_EVENT_STREAM_MAX_LEN + if (flags.REMOVE_ON_AGE) process.env.REMOVE_ON_AGE = flags.REMOVE_ON_AGE + if (flags.REMOVE_ON_COUNT) process.env.REMOVE_ON_COUNT = flags.REMOVE_ON_COUNT } } diff --git a/packages/server/src/controllers/documentstore/index.ts b/packages/server/src/controllers/documentstore/index.ts index ccf451ac9d4..36b1402e1d8 100644 --- a/packages/server/src/controllers/documentstore/index.ts +++ b/packages/server/src/controllers/documentstore/index.ts @@ -201,7 +201,8 @@ const processLoader = async (req: Request, res: Response, next: NextFunction) => } const docLoaderId = req.params.loaderId const body = req.body - const apiResponse = await documentStoreService.processLoaderMiddleware(body, docLoaderId) + const isInternalRequest = req.headers['x-request-from'] === 'internal' + const apiResponse = await documentStoreService.processLoaderMiddleware(body, docLoaderId, isInternalRequest) return res.json(apiResponse) } catch (error) { next(error) @@ -334,8 +335,7 @@ const saveVectorStoreConfig = async (req: Request, res: Response, next: NextFunc } const body = req.body const appDataSource = getRunningExpressApp().AppDataSource - const componentNodes = getRunningExpressApp().nodesPool.componentNodes - const apiResponse = await documentStoreService.saveVectorStoreConfig(appDataSource, componentNodes, body) + const apiResponse = await documentStoreService.saveVectorStoreConfig(appDataSource, body) return res.json(apiResponse) } catch (error) { next(error) diff --git a/packages/server/src/controllers/feedback/index.ts b/packages/server/src/controllers/feedback/index.ts index 936a3b87912..a7286cf152b 100644 --- a/packages/server/src/controllers/feedback/index.ts +++ b/packages/server/src/controllers/feedback/index.ts @@ -1,5 +1,6 @@ import { Request, Response, NextFunction } from 'express' import feedbackService from '../../services/feedback' +import { validateFeedbackForCreation, validateFeedbackForUpdate } from '../../services/feedback/validation' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { StatusCodes } from 'http-status-codes' @@ -31,6 +32,7 @@ const createChatMessageFeedbackForChatflow = async (req: Request, res: Response, `Error: feedbackController.createChatMessageFeedbackForChatflow - body not provided!` ) } + await validateFeedbackForCreation(req.body) const apiResponse = await feedbackService.createChatMessageFeedbackForChatflow(req.body) return res.json(apiResponse) } catch (error) { @@ -52,6 +54,7 @@ const updateChatMessageFeedbackForChatflow = async (req: Request, res: Response, `Error: feedbackController.updateChatMessageFeedbackForChatflow - id not provided!` ) } + await validateFeedbackForUpdate(req.params.id, req.body) const apiResponse = await feedbackService.updateChatMessageFeedbackForChatflow(req.params.id, req.body) return res.json(apiResponse) } catch (error) { diff --git a/packages/server/src/controllers/nvidia-nim/index.ts b/packages/server/src/controllers/nvidia-nim/index.ts index ce99c14abc8..54bc4a3f6cd 100644 --- a/packages/server/src/controllers/nvidia-nim/index.ts +++ b/packages/server/src/controllers/nvidia-nim/index.ts @@ -1,7 +1,7 @@ import axios from 'axios' -import { Request, Response, NextFunction } from 'express' +import { NextFunction, Request, Response } from 'express' -const { NimContainerManager } = require('nim-container-manager') +const { NimContainerManager } = require('flowise-nim-container-manager') const getToken = async (req: Request, res: Response, next: NextFunction) => { try { @@ -55,7 +55,13 @@ const startContainer = async (req: Request, res: Response, next: NextFunction) = try { const imageTag = req.body.imageTag const apiKey = req.body.apiKey - await NimContainerManager.startContainer(imageTag, apiKey) + const hostPort = req.body.hostPort + const nimRelaxMemConstraints = parseInt(req.body.nimRelaxMemConstraints) + // Validate nimRelaxMemConstraints + if (isNaN(nimRelaxMemConstraints) || (nimRelaxMemConstraints !== 0 && nimRelaxMemConstraints !== 1)) { + return res.status(400).send('nimRelaxMemConstraints must be 0 or 1') + } + await NimContainerManager.startContainer(imageTag, apiKey, hostPort, nimRelaxMemConstraints) return res.send(`Starting container ${imageTag}`) } catch (error) { next(error) @@ -79,17 +85,51 @@ const getImage = async (req: Request, res: Response, next: NextFunction) => { const getContainer = async (req: Request, res: Response, next: NextFunction) => { try { const imageTag = req.body.imageTag + const port = req.body.port + + // First check if the image exists const images = await NimContainerManager.userImageLibrary() const image = images.find((img: any) => img.tag === imageTag) if (!image) { return res.status(404).send(`Image ${imageTag} not found`) } - if (!image.container) { - return res.status(404).send(`Container of ${imageTag} not found`) + + const containers = await NimContainerManager.listRunningContainers() + const portInUse = containers.find((cont: any) => cont.port === port) + if (portInUse) { + const isModelContainer = portInUse.image === image.tag + if (isModelContainer) { + portInUse.image = image.name + return res.json(portInUse) + } else { + return res.status(409).send({ + message: `Port ${port} is already in use by another container`, + container: portInUse + }) + } } - const container = image.container - container.image = image.name - return res.json(container) + + // If no container found with matching port, return 404 + return res.status(404).send(`Container of ${imageTag} with port ${port} not found`) + } catch (error) { + next(error) + } +} + +const listRunningContainers = async (req: Request, res: Response, next: NextFunction) => { + try { + const containers = await NimContainerManager.listRunningContainers() + return res.json(containers) + } catch (error) { + next(error) + } +} + +const stopContainer = async (req: Request, res: Response, next: NextFunction) => { + try { + const containerId = req.body.containerId + const containerInfo = await NimContainerManager.stopContainer(containerId) + return res.json(containerInfo) } catch (error) { next(error) } @@ -102,5 +142,7 @@ export default { pullImage, startContainer, getImage, - getContainer + getContainer, + listRunningContainers, + stopContainer } diff --git a/packages/server/src/metrics/OpenTelemetry.ts b/packages/server/src/metrics/OpenTelemetry.ts index 7686225db83..a9a3d9c4f1c 100644 --- a/packages/server/src/metrics/OpenTelemetry.ts +++ b/packages/server/src/metrics/OpenTelemetry.ts @@ -6,6 +6,9 @@ import { diag, DiagLogLevel, DiagConsoleLogger, Attributes, Counter } from '@ope import { getVersion } from 'flowise-components' import express from 'express' +// Create a static map to track created metrics and prevent duplicates +const createdMetrics = new Map() + export class OpenTelemetry implements IMetricsProvider { private app: express.Application private resource: Resource @@ -30,6 +33,9 @@ export class OpenTelemetry implements IMetricsProvider { if (process.env.METRICS_OPEN_TELEMETRY_DEBUG === 'true') { diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG) } + + // Clear metrics tracking on new instance + createdMetrics.clear() } public getName(): string { @@ -37,121 +43,215 @@ export class OpenTelemetry implements IMetricsProvider { } async initializeCounters(): Promise { - // Define the resource with the service name for trace grouping - const flowiseVersion = await getVersion() - - this.resource = new Resource({ - [ATTR_SERVICE_NAME]: process.env.METRICS_SERVICE_NAME || 'FlowiseAI', - [ATTR_SERVICE_VERSION]: flowiseVersion.version // Version as a label - }) - - const metricProtocol = process.env.METRICS_OPEN_TELEMETRY_PROTOCOL || 'http' // Default to 'http' - // Conditionally import the correct OTLP exporters based on protocol - let OTLPMetricExporter - if (metricProtocol === 'http') { - OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-http').OTLPMetricExporter - } else if (metricProtocol === 'grpc') { - OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-grpc').OTLPMetricExporter - } else if (metricProtocol === 'proto') { - OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-proto').OTLPMetricExporter - } else { - console.error('Invalid METRICS_OPEN_TELEMETRY_PROTOCOL specified. Please set it to "http", "grpc", or "proto".') - process.exit(1) // Exit if invalid protocol type is specified - } + try { + // Define the resource with the service name for trace grouping + const flowiseVersion = await getVersion() - this.otlpMetricExporter = new OTLPMetricExporter({ - url: process.env.METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT // OTLP endpoint for metrics - }) - - this.metricReader = new PeriodicExportingMetricReader({ - exporter: this.otlpMetricExporter, - exportIntervalMillis: 5000 // Export metrics every 5 seconds - }) - this.meterProvider = new MeterProvider({ resource: this.resource, readers: [this.metricReader] }) - - const meter = this.meterProvider.getMeter('flowise-metrics') - // look at the FLOWISE_COUNTER enum in Interface.Metrics.ts and get all values - // for each counter in the enum, create a new promClient.Counter and add it to the registry - const enumEntries = Object.entries(FLOWISE_METRIC_COUNTERS) - enumEntries.forEach(([name, value]) => { - // derive proper counter name from the enum value (chatflow_created = Chatflow Created) - const properCounterName: string = name.replace(/_/g, ' ').replace(/\b\w/g, (l) => l.toUpperCase()) - this.counters.set( - value, - meter.createCounter(value, { - description: properCounterName - }) - ) - }) - - // in addition to the enum counters, add a few more custom counters - - const versionGuage = meter.createGauge('flowise_version', { - description: 'Flowise version' - }) - // remove the last dot from the version string, e.g. 2.1.3 -> 2.13 (guage needs a number - float) - const formattedVersion = flowiseVersion.version.replace(/\.(\d+)$/, '$1') - versionGuage.record(parseFloat(formattedVersion)) - - // Counter for HTTP requests with method, path, and status as labels - this.httpRequestCounter = meter.createCounter('http_requests_total', { - description: 'Counts the number of HTTP requests received' - }) - - // Histogram to measure HTTP request duration in milliseconds - this.httpRequestDuration = meter.createHistogram('http_request_duration_ms', { - description: 'Records the duration of HTTP requests in ms' - }) + this.resource = new Resource({ + [ATTR_SERVICE_NAME]: process.env.METRICS_SERVICE_NAME || 'FlowiseAI', + [ATTR_SERVICE_VERSION]: flowiseVersion.version // Version as a label + }) + + const metricProtocol = process.env.METRICS_OPEN_TELEMETRY_PROTOCOL || 'http' // Default to 'http' + // Conditionally import the correct OTLP exporters based on protocol + let OTLPMetricExporter + if (metricProtocol === 'http') { + OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-http').OTLPMetricExporter + } else if (metricProtocol === 'grpc') { + OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-grpc').OTLPMetricExporter + } else if (metricProtocol === 'proto') { + OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-proto').OTLPMetricExporter + } else { + console.error('Invalid METRICS_OPEN_TELEMETRY_PROTOCOL specified. Please set it to "http", "grpc", or "proto".') + process.exit(1) // Exit if invalid protocol type is specified + } + + // Handle any existing metric exporter + if (this.otlpMetricExporter) { + try { + await this.otlpMetricExporter.shutdown() + } catch (error) { + // Ignore shutdown errors + } + } + + this.otlpMetricExporter = new OTLPMetricExporter({ + url: process.env.METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT // OTLP endpoint for metrics + }) + + // Clean up any existing metric reader + if (this.metricReader) { + try { + await this.metricReader.shutdown() + } catch (error) { + // Ignore shutdown errors + } + } + + this.metricReader = new PeriodicExportingMetricReader({ + exporter: this.otlpMetricExporter, + exportIntervalMillis: 5000 // Export metrics every 5 seconds + }) + + // Clean up any existing meter provider + if (this.meterProvider) { + try { + await this.meterProvider.shutdown() + } catch (error) { + // Ignore shutdown errors + } + } + + this.meterProvider = new MeterProvider({ resource: this.resource, readers: [this.metricReader] }) + + const meter = this.meterProvider.getMeter('flowise-metrics') + // look at the FLOWISE_COUNTER enum in Interface.Metrics.ts and get all values + // for each counter in the enum, create a new promClient.Counter and add it to the registry + const enumEntries = Object.entries(FLOWISE_METRIC_COUNTERS) + enumEntries.forEach(([name, value]) => { + try { + // Check if we've already created this metric + if (!createdMetrics.has(value)) { + // derive proper counter name from the enum value (chatflow_created = Chatflow Created) + const properCounterName: string = name.replace(/_/g, ' ').replace(/\b\w/g, (l) => l.toUpperCase()) + this.counters.set( + value, + meter.createCounter(value, { + description: properCounterName + }) + ) + createdMetrics.set(value, true) + } + } catch (error) { + // Log error but continue with other metrics + console.error(`Error creating metric ${value}:`, error) + } + }) + + try { + // Add version gauge if not already created + if (!createdMetrics.has('flowise_version')) { + const versionGuage = meter.createGauge('flowise_version', { + description: 'Flowise version' + }) + // remove the last dot from the version string, e.g. 2.1.3 -> 2.13 (gauge needs a number - float) + const formattedVersion = flowiseVersion.version.replace(/\.(\d+)$/, '$1') + versionGuage.record(parseFloat(formattedVersion)) + createdMetrics.set('flowise_version', true) + } + } catch (error) { + console.error('Error creating version gauge:', error) + } + + try { + // HTTP requests counter + if (!createdMetrics.has('http_requests_total')) { + this.httpRequestCounter = meter.createCounter('http_requests_total', { + description: 'Counts the number of HTTP requests received' + }) + createdMetrics.set('http_requests_total', true) + } + } catch (error) { + console.error('Error creating HTTP request counter:', error) + } + + try { + // HTTP request duration histogram + if (!createdMetrics.has('http_request_duration_ms')) { + this.httpRequestDuration = meter.createHistogram('http_request_duration_ms', { + description: 'Records the duration of HTTP requests in ms' + }) + createdMetrics.set('http_request_duration_ms', true) + } + } catch (error) { + console.error('Error creating HTTP request duration histogram:', error) + } + + await this.setupMetricsEndpoint() + } catch (error) { + console.error('Error initializing OpenTelemetry metrics:', error) + // Don't throw - allow app to continue without metrics + } } // Function to record HTTP request duration private recordHttpRequestDuration(durationMs: number, method: string, path: string, status: number) { - this.httpRequestDuration.record(durationMs, { - method, - path, - status: status.toString() - }) + try { + if (this.httpRequestDuration) { + this.httpRequestDuration.record(durationMs, { + method, + path, + status: status.toString() + }) + } + } catch (error) { + // Log error but don't crash the application + console.error('Error recording HTTP request duration:', error) + } } // Function to record HTTP requests with specific labels private recordHttpRequest(method: string, path: string, status: number) { - this.httpRequestCounter.add(1, { - method, - path, - status: status.toString() - }) + try { + if (this.httpRequestCounter) { + this.httpRequestCounter.add(1, { + method, + path, + status: status.toString() + }) + } + } catch (error) { + // Log error but don't crash the application + console.error('Error recording HTTP request:', error) + } } async setupMetricsEndpoint(): Promise { - // Graceful shutdown for telemetry data flushing - process.on('SIGTERM', async () => { - await this.metricReader.shutdown() - await this.meterProvider.shutdown() - }) - - // Runs before each requests - this.app.use((req, res, next) => { - res.locals.startEpoch = Date.now() - next() - }) - - // Runs after each requests - this.app.use((req, res, next) => { - res.on('finish', async () => { - if (res.locals.startEpoch) { - const responseTimeInMs = Date.now() - res.locals.startEpoch - this.recordHttpRequest(req.method, req.path, res.statusCode) - this.recordHttpRequestDuration(responseTimeInMs, req.method, req.path, res.statusCode) + try { + // Graceful shutdown for telemetry data flushing + process.on('SIGTERM', async () => { + try { + if (this.metricReader) await this.metricReader.shutdown() + if (this.meterProvider) await this.meterProvider.shutdown() + } catch (error) { + console.error('Error during metrics shutdown:', error) } }) - next() - }) + + // Runs before each requests + this.app.use((req, res, next) => { + res.locals.startEpoch = Date.now() + next() + }) + + // Runs after each requests + this.app.use((req, res, next) => { + res.on('finish', async () => { + try { + if (res.locals.startEpoch) { + const responseTimeInMs = Date.now() - res.locals.startEpoch + this.recordHttpRequest(req.method, req.path, res.statusCode) + this.recordHttpRequestDuration(responseTimeInMs, req.method, req.path, res.statusCode) + } + } catch (error) { + console.error('Error in metrics middleware:', error) + } + }) + next() + }) + } catch (error) { + console.error('Error setting up metrics endpoint:', error) + } } async incrementCounter(counter: string, payload: any): Promise { - // Increment OpenTelemetry counter with the payload - if (this.counters.has(counter)) { - ;(this.counters.get(counter) as Counter).add(1, payload) + try { + // Increment OpenTelemetry counter with the payload + if (this.counters.has(counter)) { + ;(this.counters.get(counter) as Counter).add(1, payload) + } + } catch (error) { + console.error(`Error incrementing counter ${counter}:`, error) } } } diff --git a/packages/server/src/metrics/Prometheus.ts b/packages/server/src/metrics/Prometheus.ts index 15eaafeac86..56b4da3ffc6 100644 --- a/packages/server/src/metrics/Prometheus.ts +++ b/packages/server/src/metrics/Prometheus.ts @@ -12,6 +12,9 @@ export class Prometheus implements IMetricsProvider { constructor(app: express.Application) { this.app = app + // Clear any existing default registry metrics to avoid conflicts + promClient.register.clear() + // Create a separate registry for our metrics this.register = new promClient.Registry() } @@ -27,48 +30,87 @@ export class Prometheus implements IMetricsProvider { // look at the FLOWISE_COUNTER enum in Interface.Metrics.ts and get all values // for each counter in the enum, create a new promClient.Counter and add it to the registry - this.counters = new Map>() + this.counters = new Map | promClient.Gauge | promClient.Histogram>() const enumEntries = Object.entries(FLOWISE_METRIC_COUNTERS) enumEntries.forEach(([name, value]) => { // derive proper counter name from the enum value (chatflow_created = Chatflow Created) const properCounterName: string = name.replace(/_/g, ' ').replace(/\b\w/g, (l) => l.toUpperCase()) - this.counters.set( - value, - new promClient.Counter({ - name: value, - help: `Total number of ${properCounterName}`, - labelNames: ['status'] - }) - ) + try { + this.counters.set( + value, + new promClient.Counter({ + name: value, + help: `Total number of ${properCounterName}`, + labelNames: ['status'], + registers: [this.register] // Explicitly set the registry + }) + ) + } catch (error) { + // If metric already exists, get it from the registry instead + const existingMetrics = this.register.getSingleMetric(value) + if (existingMetrics) { + this.counters.set(value, existingMetrics as promClient.Counter) + } + } }) // in addition to the enum counters, add a few more custom counters // version, http_request_duration_ms, http_requests_total - const versionGaugeCounter = new promClient.Gauge({ - name: 'flowise_version_info', - help: 'Flowise version info.', - labelNames: ['version'] - }) + try { + const versionGaugeCounter = new promClient.Gauge({ + name: 'flowise_version_info', + help: 'Flowise version info.', + labelNames: ['version'], + registers: [this.register] // Explicitly set the registry + }) - const { version } = await getVersion() - versionGaugeCounter.set({ version: 'v' + version }, 1) - this.counters.set('flowise_version', versionGaugeCounter) + const { version } = await getVersion() + versionGaugeCounter.set({ version: 'v' + version }, 1) + this.counters.set('flowise_version', versionGaugeCounter) + } catch (error) { + // If metric already exists, get it from the registry + const existingMetric = this.register.getSingleMetric('flowise_version') + if (existingMetric) { + this.counters.set('flowise_version', existingMetric as promClient.Gauge) + } + } - this.httpRequestDurationMicroseconds = new promClient.Histogram({ - name: 'http_request_duration_ms', - help: 'Duration of HTTP requests in ms', - labelNames: ['method', 'route', 'code'], - buckets: [1, 5, 15, 50, 100, 200, 300, 400, 500] // buckets for response time from 0.1ms to 500ms - }) - this.counters.set('http_request_duration_ms', this.httpRequestDurationMicroseconds) + try { + this.httpRequestDurationMicroseconds = new promClient.Histogram({ + name: 'http_request_duration_ms', + help: 'Duration of HTTP requests in ms', + labelNames: ['method', 'route', 'code'], + buckets: [1, 5, 15, 50, 100, 200, 300, 400, 500], // buckets for response time from 0.1ms to 500ms + registers: [this.register] // Explicitly set the registry + }) + this.counters.set('http_request_duration_ms', this.httpRequestDurationMicroseconds) + } catch (error) { + // If metric already exists, get it from the registry + const existingMetric = this.register.getSingleMetric('http_request_duration_ms') + if (existingMetric) { + this.httpRequestDurationMicroseconds = existingMetric as Histogram + this.counters.set('http_request_duration_ms', this.httpRequestDurationMicroseconds) + } + } - this.requestCounter = new Counter({ - name: 'http_requests_total', - help: 'Total number of HTTP requests', - labelNames: ['method', 'path', 'status'] - }) - this.counters.set('http_requests_total', this.requestCounter) + try { + this.requestCounter = new Counter({ + name: 'http_requests_total', + help: 'Total number of HTTP requests', + labelNames: ['method', 'path', 'status'], + registers: [this.register] // Explicitly set the registry + }) + this.counters.set('http_requests_total', this.requestCounter) + } catch (error) { + // If metric already exists, get it from the registry + const existingMetric = this.register.getSingleMetric('http_requests_total') + if (existingMetric) { + this.requestCounter = existingMetric as Counter + this.counters.set('http_requests_total', this.requestCounter) + } + } + // Only register metrics that aren't already in the registry this.registerMetrics() await this.setupMetricsEndpoint() } @@ -111,12 +153,28 @@ export class Prometheus implements IMetricsProvider { private registerMetrics() { if (process.env.METRICS_INCLUDE_NODE_METRICS !== 'false') { + // Clear any existing default metrics to avoid conflicts + promClient.register.clear() // enable default metrics like CPU usage, memory usage, etc. - promClient.collectDefaultMetrics({ register: this.register }) + // and ensure they're only registered with our custom registry + promClient.collectDefaultMetrics({ + register: this.register, + prefix: 'flowise_' // Add a prefix to avoid conflicts + }) } - // Add our custom metrics to the registry + + // Add only the custom metrics that haven't been registered yet for (const counter of this.counters.values()) { - this.register.registerMetric(counter) + try { + // Type assertion to access the name property + const metricName = (counter as any).name + if (!this.register.getSingleMetric(metricName)) { + this.register.registerMetric(counter) + } + } catch (error) { + // If we can't register the metric, it probably already exists + // Just continue with the next one + } } } } diff --git a/packages/server/src/middlewares/errors/index.ts b/packages/server/src/middlewares/errors/index.ts index 2f649843991..88b3dd80cb7 100644 --- a/packages/server/src/middlewares/errors/index.ts +++ b/packages/server/src/middlewares/errors/index.ts @@ -5,6 +5,8 @@ import { InternalFlowiseError } from '../../errors/internalFlowiseError' // we need eslint because we have to pass next arg for the error middleware // eslint-disable-next-line async function errorHandlerMiddleware(err: InternalFlowiseError, req: Request, res: Response, next: NextFunction) { + if (err.message.includes('401 Incorrect API key provided')) + err.message = '401 Invalid model key or Incorrect local model configuration.' let displayedError = { statusCode: err.statusCode || StatusCodes.INTERNAL_SERVER_ERROR, success: false, diff --git a/packages/server/src/queue/BaseQueue.ts b/packages/server/src/queue/BaseQueue.ts index 0c3003ea633..d3bf18d29e3 100644 --- a/packages/server/src/queue/BaseQueue.ts +++ b/packages/server/src/queue/BaseQueue.ts @@ -1,4 +1,4 @@ -import { Queue, Worker, Job, QueueEvents, RedisOptions } from 'bullmq' +import { Queue, Worker, Job, QueueEvents, RedisOptions, KeepJobs } from 'bullmq' import { v4 as uuidv4 } from 'uuid' import logger from '../utils/logger' @@ -6,6 +6,8 @@ const QUEUE_REDIS_EVENT_STREAM_MAX_LEN = process.env.QUEUE_REDIS_EVENT_STREAM_MA ? parseInt(process.env.QUEUE_REDIS_EVENT_STREAM_MAX_LEN) : 10000 const WORKER_CONCURRENCY = process.env.WORKER_CONCURRENCY ? parseInt(process.env.WORKER_CONCURRENCY) : 100000 +const REMOVE_ON_AGE = process.env.REMOVE_ON_AGE ? parseInt(process.env.REMOVE_ON_AGE) : -1 +const REMOVE_ON_COUNT = process.env.REMOVE_ON_COUNT ? parseInt(process.env.REMOVE_ON_COUNT) : -1 export abstract class BaseQueue { protected queue: Queue @@ -34,7 +36,24 @@ export abstract class BaseQueue { public async addJob(jobData: any): Promise { const jobId = jobData.id || uuidv4() - return await this.queue.add(jobId, jobData, { removeOnFail: true }) + + let removeOnFail: number | boolean | KeepJobs | undefined = true + let removeOnComplete: number | boolean | KeepJobs | undefined = undefined + + // Only override removal options if age or count is specified + if (REMOVE_ON_AGE !== -1 || REMOVE_ON_COUNT !== -1) { + const keepJobObj: KeepJobs = {} + if (REMOVE_ON_AGE !== -1) { + keepJobObj.age = REMOVE_ON_AGE + } + if (REMOVE_ON_COUNT !== -1) { + keepJobObj.count = REMOVE_ON_COUNT + } + removeOnFail = keepJobObj + removeOnComplete = keepJobObj + } + + return await this.queue.add(jobId, jobData, { removeOnFail, removeOnComplete }) } public createWorker(concurrency: number = WORKER_CONCURRENCY): Worker { diff --git a/packages/server/src/routes/nvidia-nim/index.ts b/packages/server/src/routes/nvidia-nim/index.ts index 9f695db4c63..473b57156e9 100644 --- a/packages/server/src/routes/nvidia-nim/index.ts +++ b/packages/server/src/routes/nvidia-nim/index.ts @@ -6,8 +6,10 @@ const router = express.Router() router.get('/preload', nimController.preload) router.get('/get-token', nimController.getToken) router.get('/download-installer', nimController.downloadInstaller) +router.get('/list-running-containers', nimController.listRunningContainers) router.post('/pull-image', nimController.pullImage) router.post('/start-container', nimController.startContainer) +router.post('/stop-container', nimController.stopContainer) router.post('/get-image', nimController.getImage) router.post('/get-container', nimController.getContainer) diff --git a/packages/server/src/services/assistants/index.ts b/packages/server/src/services/assistants/index.ts index 0681376bc53..a88f5e0019a 100644 --- a/packages/server/src/services/assistants/index.ts +++ b/packages/server/src/services/assistants/index.ts @@ -16,6 +16,7 @@ import { ICommonObject } from 'flowise-components' import logger from '../../utils/logger' import { ASSISTANT_PROMPT_GENERATOR } from '../../utils/prompt' import { INPUT_PARAMS_TYPE } from '../../utils/constants' +import { validate } from 'uuid' const createAssistant = async (requestBody: any): Promise => { try { @@ -339,6 +340,12 @@ const updateAssistant = async (assistantId: string, requestBody: any): Promise[], queryRunner?: QueryRunner): Promise => { try { + for (const data of newAssistants) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importAssistants - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(Assistant) : appServer.AppDataSource.getRepository(Assistant) diff --git a/packages/server/src/services/chat-messages/index.ts b/packages/server/src/services/chat-messages/index.ts index dc3a9690dfb..20f0184feec 100644 --- a/packages/server/src/services/chat-messages/index.ts +++ b/packages/server/src/services/chat-messages/index.ts @@ -1,15 +1,15 @@ -import { DeleteResult, FindOptionsWhere } from 'typeorm' +import { removeFilesFromStorage } from 'flowise-components' import { StatusCodes } from 'http-status-codes' +import { DeleteResult, FindOptionsWhere } from 'typeorm' +import { ChatMessage } from '../../database/entities/ChatMessage' +import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { getErrorMessage } from '../../errors/utils' import { ChatMessageRatingType, ChatType, IChatMessage, MODE } from '../../Interface' -import { utilGetChatMessage } from '../../utils/getChatMessage' import { utilAddChatMessage } from '../../utils/addChatMesage' +import { utilGetChatMessage } from '../../utils/getChatMessage' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' -import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' -import { removeFilesFromStorage } from 'flowise-components' import logger from '../../utils/logger' -import { ChatMessage } from '../../database/entities/ChatMessage' -import { InternalFlowiseError } from '../../errors/internalFlowiseError' -import { getErrorMessage } from '../../errors/utils' // Add chatmessages for chatflowid const createChatMessage = async (chatMessage: Partial) => { @@ -178,11 +178,23 @@ const abortChatMessage = async (chatId: string, chatflowid: string) => { } } +async function getAllMessages(): Promise { + const appServer = getRunningExpressApp() + return await appServer.AppDataSource.getRepository(ChatMessage).find() +} + +async function getAllMessagesFeedback(): Promise { + const appServer = getRunningExpressApp() + return await appServer.AppDataSource.getRepository(ChatMessageFeedback).find() +} + export default { createChatMessage, getAllChatMessages, getAllInternalChatMessages, removeAllChatMessages, removeChatMessagesByMessageIds, - abortChatMessage + abortChatMessage, + getAllMessages, + getAllMessagesFeedback } diff --git a/packages/server/src/services/chatflows/index.ts b/packages/server/src/services/chatflows/index.ts index 2fc1adc1822..6bfc8362388 100644 --- a/packages/server/src/services/chatflows/index.ts +++ b/packages/server/src/services/chatflows/index.ts @@ -1,6 +1,8 @@ import { ICommonObject, removeFolderFromStorage } from 'flowise-components' import { StatusCodes } from 'http-status-codes' +import { QueryRunner } from 'typeorm' import { ChatflowType, IReactFlowObject } from '../../Interface' +import { FLOWISE_COUNTER_STATUS, FLOWISE_METRIC_COUNTERS } from '../../Interface.Metrics' import { ChatFlow } from '../../database/entities/ChatFlow' import { ChatMessage } from '../../database/entities/ChatMessage' import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' @@ -13,8 +15,7 @@ import { containsBase64File, updateFlowDataWithFilePaths } from '../../utils/fil import { getRunningExpressApp } from '../../utils/getRunningExpressApp' import { utilGetUploadsConfig } from '../../utils/getUploadsConfig' import logger from '../../utils/logger' -import { FLOWISE_METRIC_COUNTERS, FLOWISE_COUNTER_STATUS } from '../../Interface.Metrics' -import { QueryRunner } from 'typeorm' +import { validate } from 'uuid' // Check if chatflow valid for streaming const checkIfChatflowIsValidForStreaming = async (chatflowId: string): Promise => { @@ -120,6 +121,8 @@ const getAllChatflows = async (type?: ChatflowType): Promise => { const dbResponse = await appServer.AppDataSource.getRepository(ChatFlow).find() if (type === 'MULTIAGENT') { return dbResponse.filter((chatflow) => chatflow.type === 'MULTIAGENT') + } else if (type === 'ASSISTANT') { + return dbResponse.filter((chatflow) => chatflow.type === 'ASSISTANT') } else if (type === 'CHATFLOW') { // fetch all chatflows that are not agentflow return dbResponse.filter((chatflow) => chatflow.type === 'CHATFLOW' || !chatflow.type) @@ -218,6 +221,12 @@ const saveChatflow = async (newChatFlow: ChatFlow): Promise => { const importChatflows = async (newChatflows: Partial[], queryRunner?: QueryRunner): Promise => { try { + for (const data of newChatflows) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importChatflows - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(ChatFlow) : appServer.AppDataSource.getRepository(ChatFlow) diff --git a/packages/server/src/services/documentstore/index.ts b/packages/server/src/services/documentstore/index.ts index 9826af68bb8..adea69baed6 100644 --- a/packages/server/src/services/documentstore/index.ts +++ b/packages/server/src/services/documentstore/index.ts @@ -740,7 +740,7 @@ export const processLoader = async ({ appDataSource, componentNodes, data, docLo return getDocumentStoreFileChunks(appDataSource, data.storeId as string, docLoaderId) } -const processLoaderMiddleware = async (data: IDocumentStoreLoaderForPreview, docLoaderId: string) => { +const processLoaderMiddleware = async (data: IDocumentStoreLoaderForPreview, docLoaderId: string, isInternalRequest = false) => { try { const appServer = getRunningExpressApp() const appDataSource = appServer.AppDataSource @@ -761,6 +761,12 @@ const processLoaderMiddleware = async (data: IDocumentStoreLoaderForPreview, doc const job = await upsertQueue.addJob(omit(executeData, OMIT_QUEUE_JOB_DATA)) logger.debug(`[server]: Job added to queue: ${job.id}`) + if (isInternalRequest) { + return { + jobId: job.id + } + } + const queueEvents = upsertQueue.getQueueEvents() const result = await job.waitUntilFinished(queueEvents) diff --git a/packages/server/src/services/export-import/index.ts b/packages/server/src/services/export-import/index.ts index c113476a34f..90317d8fd72 100644 --- a/packages/server/src/services/export-import/index.ts +++ b/packages/server/src/services/export-import/index.ts @@ -1,40 +1,72 @@ import { StatusCodes } from 'http-status-codes' +import { In, QueryRunner } from 'typeorm' +import { v4 as uuidv4 } from 'uuid' +import { Assistant } from '../../database/entities/Assistant' import { ChatFlow } from '../../database/entities/ChatFlow' +import { ChatMessage } from '../../database/entities/ChatMessage' +import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' +import { CustomTemplate } from '../../database/entities/CustomTemplate' +import { DocumentStore } from '../../database/entities/DocumentStore' +import { DocumentStoreFileChunk } from '../../database/entities/DocumentStoreFileChunk' import { Tool } from '../../database/entities/Tool' import { Variable } from '../../database/entities/Variable' -import { Assistant } from '../../database/entities/Assistant' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { getErrorMessage } from '../../errors/utils' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import assistantService from '../assistants' +import chatMessagesService from '../chat-messages' import chatflowService from '../chatflows' +import documenStoreService from '../documentstore' +import marketplacesService from '../marketplaces' import toolsService from '../tools' import variableService from '../variables' -import assistantService from '../assistants' type ExportInput = { - tool: boolean - chatflow: boolean agentflow: boolean + assistantCustom: boolean + assistantOpenAI: boolean + assistantAzure: boolean + chatflow: boolean + chat_message: boolean + chat_feedback: boolean + custom_template: boolean + document_store: boolean + tool: boolean variable: boolean - assistant: boolean } type ExportData = { - Tool: Tool[] - ChatFlow: ChatFlow[] AgentFlow: ChatFlow[] + AssistantCustom: Assistant[] + AssistantFlow: ChatFlow[] + AssistantOpenAI: Assistant[] + AssistantAzure: Assistant[] + ChatFlow: ChatFlow[] + ChatMessage: ChatMessage[] + ChatMessageFeedback: ChatMessageFeedback[] + CustomTemplate: CustomTemplate[] + DocumentStore: DocumentStore[] + DocumentStoreFileChunk: DocumentStoreFileChunk[] + Tool: Tool[] Variable: Variable[] - Assistant: Assistant[] } const convertExportInput = (body: any): ExportInput => { try { if (!body || typeof body !== 'object') throw new Error('Invalid ExportInput object in request body') - if (body.tool && typeof body.tool !== 'boolean') throw new Error('Invalid tool property in ExportInput object') - if (body.chatflow && typeof body.chatflow !== 'boolean') throw new Error('Invalid chatflow property in ExportInput object') if (body.agentflow && typeof body.agentflow !== 'boolean') throw new Error('Invalid agentflow property in ExportInput object') - if (body.variable && typeof body.variable !== 'boolean') throw new Error('Invalid variable property in ExportInput object') if (body.assistant && typeof body.assistant !== 'boolean') throw new Error('Invalid assistant property in ExportInput object') + if (body.chatflow && typeof body.chatflow !== 'boolean') throw new Error('Invalid chatflow property in ExportInput object') + if (body.chat_message && typeof body.chat_message !== 'boolean') + throw new Error('Invalid chat_message property in ExportInput object') + if (body.chat_feedback && typeof body.chat_feedback !== 'boolean') + throw new Error('Invalid chat_feedback property in ExportInput object') + if (body.custom_template && typeof body.custom_template !== 'boolean') + throw new Error('Invalid custom_template property in ExportInput object') + if (body.document_store && typeof body.document_store !== 'boolean') + throw new Error('Invalid document_store property in ExportInput object') + if (body.tool && typeof body.tool !== 'boolean') throw new Error('Invalid tool property in ExportInput object') + if (body.variable && typeof body.variable !== 'boolean') throw new Error('Invalid variable property in ExportInput object') return body as ExportInput } catch (error) { throw new InternalFlowiseError( @@ -47,31 +79,49 @@ const convertExportInput = (body: any): ExportInput => { const FileDefaultName = 'ExportData.json' const exportData = async (exportInput: ExportInput): Promise<{ FileDefaultName: string } & ExportData> => { try { - // step 1 - get all Tool - let allTool: Tool[] = [] - if (exportInput.tool === true) allTool = await toolsService.getAllTools() + let AgentFlow: ChatFlow[] = exportInput.agentflow === true ? await chatflowService.getAllChatflows('MULTIAGENT') : [] + + let AssistantCustom: Assistant[] = exportInput.assistantCustom === true ? await assistantService.getAllAssistants('CUSTOM') : [] + let AssistantFlow: ChatFlow[] = exportInput.assistantCustom === true ? await chatflowService.getAllChatflows('ASSISTANT') : [] + + let AssistantOpenAI: Assistant[] = exportInput.assistantOpenAI === true ? await assistantService.getAllAssistants('OPENAI') : [] + + let AssistantAzure: Assistant[] = exportInput.assistantAzure === true ? await assistantService.getAllAssistants('AZURE') : [] + + let ChatFlow: ChatFlow[] = exportInput.chatflow === true ? await chatflowService.getAllChatflows('CHATFLOW') : [] + + let ChatMessage: ChatMessage[] = exportInput.chat_message === true ? await chatMessagesService.getAllMessages() : [] + + let ChatMessageFeedback: ChatMessageFeedback[] = + exportInput.chat_feedback === true ? await chatMessagesService.getAllMessagesFeedback() : [] - // step 2 - get all ChatFlow - let allChatflow: ChatFlow[] = [] - if (exportInput.chatflow === true) allChatflow = await chatflowService.getAllChatflows('CHATFLOW') + let CustomTemplate: CustomTemplate[] = exportInput.custom_template === true ? await marketplacesService.getAllCustomTemplates() : [] + CustomTemplate = CustomTemplate.map((customTemplate) => ({ ...customTemplate, usecases: JSON.stringify(customTemplate.usecases) })) - // step 3 - get all MultiAgent - let allMultiAgent: ChatFlow[] = [] - if (exportInput.agentflow === true) allMultiAgent = await chatflowService.getAllChatflows('MULTIAGENT') + let DocumentStore: DocumentStore[] = exportInput.document_store === true ? await documenStoreService.getAllDocumentStores() : [] - let allVars: Variable[] = [] - if (exportInput.variable === true) allVars = await variableService.getAllVariables() + let DocumentStoreFileChunk: DocumentStoreFileChunk[] = + exportInput.document_store === true ? await documenStoreService.getAllDocumentFileChunks() : [] - let allAssistants: Assistant[] = [] - if (exportInput.assistant === true) allAssistants = await assistantService.getAllAssistants() + let Tool: Tool[] = exportInput.tool === true ? await toolsService.getAllTools() : [] + + let Variable: Variable[] = exportInput.variable === true ? await variableService.getAllVariables() : [] return { FileDefaultName, - Tool: allTool, - ChatFlow: allChatflow, - AgentFlow: allMultiAgent, - Variable: allVars, - Assistant: allAssistants + AgentFlow, + AssistantCustom, + AssistantFlow, + AssistantOpenAI, + AssistantAzure, + ChatFlow, + ChatMessage, + ChatMessageFeedback, + CustomTemplate, + DocumentStore, + DocumentStoreFileChunk, + Tool, + Variable } } catch (error) { throw new InternalFlowiseError( @@ -81,28 +131,348 @@ const exportData = async (exportInput: ExportInput): Promise<{ FileDefaultName: } } +async function replaceDuplicateIdsForChatFlow(queryRunner: QueryRunner, originalData: ExportData, chatflows: ChatFlow[]) { + try { + const ids = chatflows.map((chatflow) => chatflow.id) + const records = await queryRunner.manager.find(ChatFlow, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForChatflow - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForAssistant(queryRunner: QueryRunner, originalData: ExportData, assistants: Assistant[]) { + try { + const ids = assistants.map((assistant) => assistant.id) + const records = await queryRunner.manager.find(Assistant, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForAssistant - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForChatMessage(queryRunner: QueryRunner, originalData: ExportData, chatMessages: ChatMessage[]) { + try { + const chatmessageChatflowIds = chatMessages.map((chatMessage) => { + return { id: chatMessage.chatflowid, qty: 0 } + }) + const originalDataChatflowIds = originalData.ChatFlow.map((chatflow) => chatflow.id) + chatmessageChatflowIds.forEach((item) => { + if (originalDataChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + const databaseChatflowIds = await ( + await queryRunner.manager.find(ChatFlow, { + where: { id: In(chatmessageChatflowIds.map((chatmessageChatflowId) => chatmessageChatflowId.id)) } + }) + ).map((chatflow) => chatflow.id) + chatmessageChatflowIds.forEach((item) => { + if (databaseChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + + const missingChatflowIds = chatmessageChatflowIds.filter((item) => item.qty === 0).map((item) => item.id) + if (missingChatflowIds.length > 0) { + chatMessages = chatMessages.filter((chatMessage) => !missingChatflowIds.includes(chatMessage.chatflowid)) + originalData.ChatMessage = chatMessages + } + + const ids = chatMessages.map((chatMessage) => chatMessage.id) + const records = await queryRunner.manager.find(ChatMessage, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForChatMessage - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForChatMessageFeedback( + queryRunner: QueryRunner, + originalData: ExportData, + chatMessageFeedbacks: ChatMessageFeedback[] +) { + try { + const feedbackChatflowIds = chatMessageFeedbacks.map((feedback) => { + return { id: feedback.chatflowid, qty: 0 } + }) + const originalDataChatflowIds = originalData.ChatFlow.map((chatflow) => chatflow.id) + feedbackChatflowIds.forEach((item) => { + if (originalDataChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + const databaseChatflowIds = await ( + await queryRunner.manager.find(ChatFlow, { + where: { id: In(feedbackChatflowIds.map((feedbackChatflowId) => feedbackChatflowId.id)) } + }) + ).map((chatflow) => chatflow.id) + feedbackChatflowIds.forEach((item) => { + if (databaseChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + + const feedbackMessageIds = chatMessageFeedbacks.map((feedback) => { + return { id: feedback.messageId, qty: 0 } + }) + const originalDataMessageIds = originalData.ChatMessage.map((chatMessage) => chatMessage.id) + feedbackMessageIds.forEach((item) => { + if (originalDataMessageIds.includes(item.id)) { + item.qty += 1 + } + }) + const databaseMessageIds = await ( + await queryRunner.manager.find(ChatMessage, { + where: { id: In(feedbackMessageIds.map((feedbackMessageId) => feedbackMessageId.id)) } + }) + ).map((chatMessage) => chatMessage.id) + feedbackMessageIds.forEach((item) => { + if (databaseMessageIds.includes(item.id)) { + item.qty += 1 + } + }) + + const missingChatflowIds = feedbackChatflowIds.filter((item) => item.qty === 0).map((item) => item.id) + const missingMessageIds = feedbackMessageIds.filter((item) => item.qty === 0).map((item) => item.id) + + if (missingChatflowIds.length > 0 || missingMessageIds.length > 0) { + chatMessageFeedbacks = chatMessageFeedbacks.filter( + (feedback) => !missingChatflowIds.includes(feedback.chatflowid) && !missingMessageIds.includes(feedback.messageId) + ) + originalData.ChatMessageFeedback = chatMessageFeedbacks + } + + const ids = chatMessageFeedbacks.map((chatMessageFeedback) => chatMessageFeedback.id) + const records = await queryRunner.manager.find(ChatMessageFeedback, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForChatMessageFeedback - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForCustomTemplate(queryRunner: QueryRunner, originalData: ExportData, customTemplates: CustomTemplate[]) { + try { + const ids = customTemplates.map((customTemplate) => customTemplate.id) + const records = await queryRunner.manager.find(CustomTemplate, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForCustomTemplate - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForDocumentStore(queryRunner: QueryRunner, originalData: ExportData, documentStores: DocumentStore[]) { + try { + const ids = documentStores.map((documentStore) => documentStore.id) + const records = await queryRunner.manager.find(DocumentStore, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForDocumentStore - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForDocumentStoreFileChunk( + queryRunner: QueryRunner, + originalData: ExportData, + documentStoreFileChunks: DocumentStoreFileChunk[] +) { + try { + const ids = documentStoreFileChunks.map((documentStoreFileChunk) => documentStoreFileChunk.id) + const records = await queryRunner.manager.find(DocumentStoreFileChunk, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForDocumentStoreFileChunk - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForTool(queryRunner: QueryRunner, originalData: ExportData, tools: Tool[]) { + try { + const ids = tools.map((tool) => tool.id) + const records = await queryRunner.manager.find(Tool, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForTool - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForVariable(queryRunner: QueryRunner, originalData: ExportData, variables: Variable[]) { + try { + const ids = variables.map((variable) => variable.id) + const records = await queryRunner.manager.find(Variable, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForVariable - ${getErrorMessage(error)}` + ) + } +} + +function reduceSpaceForChatflowFlowData(chatflows: ChatFlow[]) { + return chatflows.map((chatflow) => { + return { ...chatflow, flowData: JSON.stringify(JSON.parse(chatflow.flowData)) } + }) +} + const importData = async (importData: ExportData) => { + let queryRunner try { - const appServer = getRunningExpressApp() - const queryRunner = appServer.AppDataSource.createQueryRunner() + queryRunner = getRunningExpressApp().AppDataSource.createQueryRunner() + await queryRunner.connect() try { + if (importData.AgentFlow.length > 0) { + importData.AgentFlow = reduceSpaceForChatflowFlowData(importData.AgentFlow) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.AgentFlow) + } + if (importData.AssistantCustom.length > 0) + importData = await replaceDuplicateIdsForAssistant(queryRunner, importData, importData.AssistantCustom) + if (importData.AssistantFlow.length > 0) { + importData.AssistantFlow = reduceSpaceForChatflowFlowData(importData.AssistantFlow) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.AssistantFlow) + } + if (importData.AssistantOpenAI.length > 0) + importData = await replaceDuplicateIdsForAssistant(queryRunner, importData, importData.AssistantOpenAI) + if (importData.AssistantAzure.length > 0) + importData = await replaceDuplicateIdsForAssistant(queryRunner, importData, importData.AssistantAzure) + if (importData.ChatFlow.length > 0) { + importData.ChatFlow = reduceSpaceForChatflowFlowData(importData.ChatFlow) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.ChatFlow) + } + if (importData.ChatMessage.length > 0) + importData = await replaceDuplicateIdsForChatMessage(queryRunner, importData, importData.ChatMessage) + if (importData.ChatMessageFeedback.length > 0) + importData = await replaceDuplicateIdsForChatMessageFeedback(queryRunner, importData, importData.ChatMessageFeedback) + if (importData.CustomTemplate.length > 0) + importData = await replaceDuplicateIdsForCustomTemplate(queryRunner, importData, importData.CustomTemplate) + if (importData.DocumentStore.length > 0) + importData = await replaceDuplicateIdsForDocumentStore(queryRunner, importData, importData.DocumentStore) + if (importData.DocumentStoreFileChunk.length > 0) + importData = await replaceDuplicateIdsForDocumentStoreFileChunk(queryRunner, importData, importData.DocumentStoreFileChunk) + if (importData.Tool.length > 0) importData = await replaceDuplicateIdsForTool(queryRunner, importData, importData.Tool) + if (importData.Variable.length > 0) + importData = await replaceDuplicateIdsForVariable(queryRunner, importData, importData.Variable) + await queryRunner.startTransaction() - if (importData.Tool.length > 0) await toolsService.importTools(importData.Tool, queryRunner) - if (importData.ChatFlow.length > 0) await chatflowService.importChatflows(importData.ChatFlow, queryRunner) - if (importData.AgentFlow.length > 0) await chatflowService.importChatflows(importData.AgentFlow, queryRunner) - if (importData.Variable.length > 0) await variableService.importVariables(importData.Variable, queryRunner) - if (importData.Assistant.length > 0) await assistantService.importAssistants(importData.Assistant, queryRunner) + if (importData.AgentFlow.length > 0) await queryRunner.manager.save(ChatFlow, importData.AgentFlow) + if (importData.AssistantFlow.length > 0) await queryRunner.manager.save(ChatFlow, importData.AssistantFlow) + if (importData.AssistantCustom.length > 0) await queryRunner.manager.save(Assistant, importData.AssistantCustom) + if (importData.AssistantOpenAI.length > 0) await queryRunner.manager.save(Assistant, importData.AssistantOpenAI) + if (importData.AssistantAzure.length > 0) await queryRunner.manager.save(Assistant, importData.AssistantAzure) + if (importData.ChatFlow.length > 0) await queryRunner.manager.save(ChatFlow, importData.ChatFlow) + if (importData.ChatMessage.length > 0) await queryRunner.manager.save(ChatMessage, importData.ChatMessage) + if (importData.ChatMessageFeedback.length > 0) + await queryRunner.manager.save(ChatMessageFeedback, importData.ChatMessageFeedback) + if (importData.CustomTemplate.length > 0) await queryRunner.manager.save(CustomTemplate, importData.CustomTemplate) + if (importData.DocumentStore.length > 0) await queryRunner.manager.save(DocumentStore, importData.DocumentStore) + if (importData.DocumentStoreFileChunk.length > 0) + await queryRunner.manager.save(DocumentStoreFileChunk, importData.DocumentStoreFileChunk) + if (importData.Tool.length > 0) await queryRunner.manager.save(Tool, importData.Tool) + if (importData.Variable.length > 0) await queryRunner.manager.save(Variable, importData.Variable) await queryRunner.commitTransaction() } catch (error) { - await queryRunner.rollbackTransaction() + if (queryRunner && !queryRunner.isTransactionActive) await queryRunner.rollbackTransaction() throw error } finally { - if (!queryRunner.isReleased) { - await queryRunner.release() - } + if (queryRunner && !queryRunner.isReleased) await queryRunner.release() } } catch (error) { throw new InternalFlowiseError( diff --git a/packages/server/src/services/feedback/validation.ts b/packages/server/src/services/feedback/validation.ts new file mode 100644 index 00000000000..03db24ec92b --- /dev/null +++ b/packages/server/src/services/feedback/validation.ts @@ -0,0 +1,127 @@ +import { StatusCodes } from 'http-status-codes' +import { IChatMessageFeedback } from '../../Interface' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import { ChatMessage } from '../../database/entities/ChatMessage' +import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' + +/** + * Validates that the message ID exists + * @param {string} messageId + */ +export const validateMessageExists = async (messageId: string): Promise => { + const appServer = getRunningExpressApp() + const message = await appServer.AppDataSource.getRepository(ChatMessage).findOne({ + where: { id: messageId } + }) + + if (!message) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Message with ID ${messageId} not found`) + } + + return message +} + +/** + * Validates that the feedback ID exists + * @param {string} feedbackId + */ +export const validateFeedbackExists = async (feedbackId: string): Promise => { + const appServer = getRunningExpressApp() + const feedbackExists = await appServer.AppDataSource.getRepository(ChatMessageFeedback).findOne({ + where: { id: feedbackId } + }) + + if (!feedbackExists) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Feedback with ID ${feedbackId} not found`) + } + + return feedbackExists +} + +/** + * Validates a feedback object for creation + * @param {Partial} feedback + */ +export const validateFeedbackForCreation = async (feedback: Partial): Promise> => { + // If messageId is provided, validate it exists and get the message + let message: ChatMessage | null = null + if (feedback.messageId) { + message = await validateMessageExists(feedback.messageId) + } else { + throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Message ID is required') + } + + // If chatId is provided, validate it matches the message's chatId + if (feedback.chatId) { + if (message.chatId !== feedback.chatId) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chat ID: message with ID ${message.id} does not belong to chat with ID ${feedback.chatId}` + ) + } + } else { + // If not provided, use the message's chatId + feedback.chatId = message.chatId + } + + // If chatflowid is provided, validate it matches the message's chatflowid + if (feedback.chatflowid) { + if (message.chatflowid !== feedback.chatflowid) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chatflow ID: message with ID ${message.id} does not belong to chatflow with ID ${feedback.chatflowid}` + ) + } + } else { + // If not provided, use the message's chatflowid + feedback.chatflowid = message.chatflowid + } + + return feedback +} + +/** + * Validates a feedback object for update + * @param {string} feedbackId + * @param {Partial} feedback + */ +export const validateFeedbackForUpdate = async ( + feedbackId: string, + feedback: Partial +): Promise> => { + // First validate the feedback exists + const existingFeedback = await validateFeedbackExists(feedbackId) + + feedback.messageId = feedback.messageId ?? existingFeedback.messageId + feedback.chatId = feedback.chatId ?? existingFeedback.chatId + feedback.chatflowid = feedback.chatflowid ?? existingFeedback.chatflowid + + // If messageId is provided, validate it exists and get the message + let message: ChatMessage | null = null + if (feedback.messageId) { + message = await validateMessageExists(feedback.messageId) + } + + // If chatId is provided and we have a message, validate it matches the message's chatId + if (feedback.chatId) { + if (message?.chatId !== feedback.chatId) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chat ID: message with ID ${message?.id} does not belong to chat with ID ${feedback.chatId}` + ) + } + } + + // If chatflowid is provided and we have a message, validate it matches the message's chatflowid + if (feedback.chatflowid && message) { + if (message?.chatflowid !== feedback.chatflowid) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chatflow ID: message with ID ${message?.id} does not belong to chatflow with ID ${feedback.chatflowid}` + ) + } + } + + return feedback +} diff --git a/packages/server/src/services/tools/index.ts b/packages/server/src/services/tools/index.ts index 9ba60d8b3cb..0dbf69b7ff1 100644 --- a/packages/server/src/services/tools/index.ts +++ b/packages/server/src/services/tools/index.ts @@ -6,6 +6,7 @@ import { getAppVersion } from '../../utils' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' import { FLOWISE_METRIC_COUNTERS, FLOWISE_COUNTER_STATUS } from '../../Interface.Metrics' import { QueryRunner } from 'typeorm' +import { validate } from 'uuid' const createTool = async (requestBody: any): Promise => { try { @@ -84,6 +85,12 @@ const updateTool = async (toolId: string, toolBody: any): Promise => { const importTools = async (newTools: Partial[], queryRunner?: QueryRunner) => { try { + for (const data of newTools) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importTools - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(Tool) : appServer.AppDataSource.getRepository(Tool) diff --git a/packages/server/src/services/variables/index.ts b/packages/server/src/services/variables/index.ts index a01d5b3dc4c..d06e8c6c778 100644 --- a/packages/server/src/services/variables/index.ts +++ b/packages/server/src/services/variables/index.ts @@ -4,6 +4,7 @@ import { Variable } from '../../database/entities/Variable' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { getErrorMessage } from '../../errors/utils' import { QueryRunner } from 'typeorm' +import { validate } from 'uuid' const createVariable = async (newVariable: Variable) => { try { @@ -76,6 +77,12 @@ const updateVariable = async (variable: Variable, updatedVariable: Variable) => const importVariables = async (newVariables: Partial[], queryRunner?: QueryRunner): Promise => { try { + for (const data of newVariables) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importVariables - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(Variable) : appServer.AppDataSource.getRepository(Variable) diff --git a/packages/server/src/utils/SSEStreamer.ts b/packages/server/src/utils/SSEStreamer.ts index a5327bad1f0..6240d0a04c4 100644 --- a/packages/server/src/utils/SSEStreamer.ts +++ b/packages/server/src/utils/SSEStreamer.ts @@ -166,6 +166,7 @@ export class SSEStreamer implements IServerSideEventStreamer { } streamErrorEvent(chatId: string, msg: string) { + if (msg.includes('401 Incorrect API key provided')) msg = '401 Invalid model key or Incorrect local model configuration.' const client = this.clients[chatId] if (client) { const clientResponse = { diff --git a/packages/server/src/utils/buildChatflow.ts b/packages/server/src/utils/buildChatflow.ts index 9c321011548..0332cd6b128 100644 --- a/packages/server/src/utils/buildChatflow.ts +++ b/packages/server/src/utils/buildChatflow.ts @@ -14,7 +14,8 @@ import { mapMimeTypeToInputField, mapExtToInputField, getFileFromUpload, - removeSpecificFileFromUpload + removeSpecificFileFromUpload, + handleEscapeCharacters } from 'flowise-components' import { StatusCodes } from 'http-status-codes' import { @@ -244,7 +245,7 @@ export const executeFlow = async ({ ...incomingInput } - const question = incomingInput.question || '' // Ensure question is never undefined + let question = incomingInput.question || '' // Ensure question is never undefined let overrideConfig = incomingInput.overrideConfig ?? {} const uploads = incomingInput.uploads const prependMessages = incomingInput.history ?? [] @@ -308,6 +309,7 @@ export const executeFlow = async ({ logger.debug(`Speech to text result: ${speechToTextResult}`) if (speechToTextResult) { incomingInput.question = speechToTextResult + question = speechToTextResult } } } @@ -664,6 +666,7 @@ export const executeFlow = async ({ const postProcessingFunction = JSON.parse(chatflowConfig?.postProcessing?.customFunction) const nodeInstanceFilePath = componentNodes['customFunction'].filePath as string const nodeModule = await import(nodeInstanceFilePath) + //set the outputs.output to EndingNode to prevent json escaping of content... const nodeData = { inputs: { javascriptFunction: postProcessingFunction }, outputs: { output: 'output' } @@ -680,7 +683,13 @@ export const executeFlow = async ({ } const customFuncNodeInstance = new nodeModule.nodeClass() let moderatedResponse = await customFuncNodeInstance.init(nodeData, question, options) - result.text = moderatedResponse + if (typeof moderatedResponse === 'string') { + result.text = handleEscapeCharacters(moderatedResponse, true) + } else if (typeof moderatedResponse === 'object') { + result.text = '```json\n' + JSON.stringify(moderatedResponse, null, 2) + '\n```' + } else { + result.text = moderatedResponse + } resultText = result.text } catch (e) { logger.log('[server]: Post Processing Error:', e) diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 3aa371500d5..b54e117dba4 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -43,6 +43,7 @@ import { randomBytes } from 'crypto' import { AES, enc } from 'crypto-js' import multer from 'multer' import multerS3 from 'multer-s3' +import MulterGoogleCloudStorage from 'multer-cloud-storage' import { ChatFlow } from '../database/entities/ChatFlow' import { ChatMessage } from '../database/entities/ChatMessage' import { Credential } from '../database/entities/Credential' @@ -59,7 +60,6 @@ import { StatusCodes } from 'http-status-codes' import { CreateSecretCommand, GetSecretValueCommand, - PutSecretValueCommand, SecretsManagerClient, SecretsManagerClientConfig } from '@aws-sdk/client-secrets-manager' @@ -1394,6 +1394,29 @@ export const getEncryptionKey = async (): Promise => { if (process.env.FLOWISE_SECRETKEY_OVERWRITE !== undefined && process.env.FLOWISE_SECRETKEY_OVERWRITE !== '') { return process.env.FLOWISE_SECRETKEY_OVERWRITE } + if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { + const secretId = process.env.SECRETKEY_AWS_NAME || 'FlowiseEncryptionKey' + try { + const command = new GetSecretValueCommand({ SecretId: secretId }) + const response = await secretsManagerClient.send(command) + + if (response.SecretString) { + return response.SecretString + } + } catch (error: any) { + if (error.name === 'ResourceNotFoundException') { + // Secret doesn't exist, create it + const newKey = generateEncryptKey() + const createCommand = new CreateSecretCommand({ + Name: secretId, + SecretString: newKey + }) + await secretsManagerClient.send(createCommand) + return newKey + } + throw error + } + } try { return await fs.promises.readFile(getEncryptionKeyPath(), 'utf8') } catch (error) { @@ -1412,39 +1435,7 @@ export const getEncryptionKey = async (): Promise => { * @returns {Promise} */ export const encryptCredentialData = async (plainDataObj: ICredentialDataDecrypted): Promise => { - if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { - const secretName = `FlowiseCredential_${randomBytes(12).toString('hex')}` - - logger.info(`[server]: Upserting AWS Secret: ${secretName}`) - - const secretString = JSON.stringify({ ...plainDataObj }) - - try { - // Try to update the secret if it exists - const putCommand = new PutSecretValueCommand({ - SecretId: secretName, - SecretString: secretString - }) - await secretsManagerClient.send(putCommand) - } catch (error: any) { - if (error.name === 'ResourceNotFoundException') { - // Secret doesn't exist, so create it - const createCommand = new CreateSecretCommand({ - Name: secretName, - SecretString: secretString - }) - await secretsManagerClient.send(createCommand) - } else { - // Rethrow any other errors - throw error - } - } - return secretName - } - const encryptKey = await getEncryptionKey() - - // Fallback to existing code return AES.encrypt(JSON.stringify(plainDataObj), encryptKey).toString() } @@ -1465,14 +1456,20 @@ export const decryptCredentialData = async ( if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { try { logger.info(`[server]: Reading AWS Secret: ${encryptedData}`) - const command = new GetSecretValueCommand({ SecretId: encryptedData }) - const response = await secretsManagerClient.send(command) + if (encryptedData.startsWith('FlowiseCredential_')) { + const command = new GetSecretValueCommand({ SecretId: encryptedData }) + const response = await secretsManagerClient.send(command) - if (response.SecretString) { - const secretObj = JSON.parse(response.SecretString) - decryptedDataStr = JSON.stringify(secretObj) + if (response.SecretString) { + const secretObj = JSON.parse(response.SecretString) + decryptedDataStr = JSON.stringify(secretObj) + } else { + throw new Error('Failed to retrieve secret value.') + } } else { - throw new Error('Failed to retrieve secret value.') + const encryptKey = await getEncryptionKey() + const decryptedData = AES.decrypt(encryptedData, encryptKey) + decryptedDataStr = decryptedData.toString(enc.Utf8) } } catch (error) { console.error(error) @@ -1803,6 +1800,16 @@ export const getMulterStorage = () => { }) }) return upload + } else if (storageType === 'gcs') { + return multer({ + storage: new MulterGoogleCloudStorage({ + projectId: process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID, + bucket: process.env.GOOGLE_CLOUD_STORAGE_BUCKET_NAME, + keyFilename: process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL, + uniformBucketLevelAccess: Boolean(process.env.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS) ?? true, + destination: `uploads/${getOrgId()}` + }) + }) } else { return multer({ dest: getUploadPath() }) } diff --git a/packages/server/src/utils/logger.ts b/packages/server/src/utils/logger.ts index c49670aa05a..7ad5b58a461 100644 --- a/packages/server/src/utils/logger.ts +++ b/packages/server/src/utils/logger.ts @@ -5,6 +5,7 @@ import config from './config' // should be replaced by node-config or similar import { createLogger, transports, format } from 'winston' import { NextFunction, Request, Response } from 'express' import { S3ClientConfig } from '@aws-sdk/client-s3' +import { LoggingWinston } from '@google-cloud/logging-winston' const { S3StreamLogger } = require('s3-streamlogger') @@ -13,6 +14,11 @@ const { combine, timestamp, printf, errors } = format let s3ServerStream: any let s3ErrorStream: any let s3ServerReqStream: any + +let gcsServerStream: any +let gcsErrorStream: any +let gcsServerReqStream: any + if (process.env.STORAGE_TYPE === 's3') { const accessKeyId = process.env.S3_STORAGE_ACCESS_KEY_ID const secretAccessKey = process.env.S3_STORAGE_SECRET_ACCESS_KEY @@ -60,6 +66,29 @@ if (process.env.STORAGE_TYPE === 's3') { }) } +if (process.env.STORAGE_TYPE === 'gcs') { + const config = { + projectId: process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID, + keyFilename: process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL, + defaultCallback: (err: any) => { + if (err) { + console.error('Error logging to GCS: ' + err) + } + } + } + gcsServerStream = new LoggingWinston({ + ...config, + logName: 'server' + }) + gcsErrorStream = new LoggingWinston({ + ...config, + logName: 'error' + }) + gcsServerReqStream = new LoggingWinston({ + ...config, + logName: 'requests' + }) +} // expect the log dir be relative to the projects root const logDir = config.logging.dir @@ -101,7 +130,8 @@ const logger = createLogger({ stream: s3ServerStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsServerStream] : []) ], exceptionHandlers: [ ...(!process.env.STORAGE_TYPE || process.env.STORAGE_TYPE === 'local' @@ -117,7 +147,8 @@ const logger = createLogger({ stream: s3ErrorStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsErrorStream] : []) ], rejectionHandlers: [ ...(!process.env.STORAGE_TYPE || process.env.STORAGE_TYPE === 'local' @@ -133,12 +164,13 @@ const logger = createLogger({ stream: s3ErrorStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsErrorStream] : []) ] }) export function expressRequestLogger(req: Request, res: Response, next: NextFunction): void { - const unwantedLogURLs = ['/api/v1/node-icon/', '/api/v1/components-credentials-icon/'] + const unwantedLogURLs = ['/api/v1/node-icon/', '/api/v1/components-credentials-icon/', '/api/v1/ping'] if (/\/api\/v1\//i.test(req.url) && !unwantedLogURLs.some((url) => new RegExp(url, 'i').test(req.url))) { const fileLogger = createLogger({ format: combine(timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), format.json(), errors({ stack: true })), @@ -168,7 +200,8 @@ export function expressRequestLogger(req: Request, res: Response, next: NextFunc stream: s3ServerReqStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsServerReqStream] : []) ] }) diff --git a/packages/ui/package.json b/packages/ui/package.json index 05b46b94cf7..4826ab494c5 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "2.2.7-patch.1", + "version": "2.2.8", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { diff --git a/packages/ui/src/ErrorBoundary.jsx b/packages/ui/src/ErrorBoundary.jsx index bbe4fafe3b1..9745013fa00 100644 --- a/packages/ui/src/ErrorBoundary.jsx +++ b/packages/ui/src/ErrorBoundary.jsx @@ -16,7 +16,7 @@ const ErrorBoundary = ({ error }) => { Oh snap! - The following error occured when loading this page. + The following error occurred when loading this page. @@ -27,7 +27,7 @@ const ErrorBoundary = ({ error }) => { > -
+                        
                             {`Status: ${error.response.status}`}
                             
{error.response.data.message} diff --git a/packages/ui/src/assets/images/opik.png b/packages/ui/src/assets/images/opik.png new file mode 100644 index 00000000000..20de0c39d47 Binary files /dev/null and b/packages/ui/src/assets/images/opik.png differ diff --git a/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx b/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx index 5caec1463cb..866087b2b9b 100644 --- a/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx +++ b/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx @@ -3,8 +3,8 @@ import { exportData, stringify } from '@/utils/exportImport' import useNotifier from '@/utils/useNotifier' import PropTypes from 'prop-types' import { useEffect, useRef, useState } from 'react' -import { useDispatch, useSelector } from 'react-redux' import { createPortal } from 'react-dom' +import { useDispatch, useSelector } from 'react-redux' // material-ui import { @@ -12,22 +12,22 @@ import { Box, Button, ButtonBase, + Checkbox, ClickAwayListener, + Dialog, + DialogActions, + DialogContent, + DialogTitle, Divider, + FormControlLabel, List, ListItemButton, ListItemIcon, ListItemText, Paper, Popper, - Typography, - Dialog, - DialogTitle, - DialogContent, Stack, - FormControlLabel, - Checkbox, - DialogActions + Typography } from '@mui/material' import { useTheme } from '@mui/material/styles' @@ -40,9 +40,9 @@ import AboutDialog from '@/ui-component/dialog/AboutDialog' import Transitions from '@/ui-component/extended/Transitions' // assets +import ExportingGIF from '@/assets/images/Exporting.gif' import { IconFileExport, IconFileUpload, IconInfoCircle, IconLogout, IconSettings, IconX } from '@tabler/icons-react' import './index.css' -import ExportingGIF from '@/assets/images/Exporting.gif' //API import exportImportApi from '@/api/exportimport' @@ -52,12 +52,24 @@ import useApi from '@/hooks/useApi' import { getErrorMessage } from '@/utils/errorHandler' import { useNavigate } from 'react-router-dom' -const dataToExport = ['Chatflows', 'Agentflows', 'Tools', 'Variables', 'Assistants'] +const dataToExport = [ + 'Agentflows', + 'Assistants Custom', + 'Assistants OpenAI', + 'Assistants Azure', + 'Chatflows', + 'Chat Messages', + 'Chat Feedbacks', + 'Custom Templates', + 'Document Stores', + 'Tools', + 'Variables' +] const ExportDialog = ({ show, onCancel, onExport }) => { const portalElement = document.getElementById('portal') - const [selectedData, setSelectedData] = useState(['Chatflows', 'Agentflows', 'Tools', 'Variables', 'Assistants']) + const [selectedData, setSelectedData] = useState(dataToExport) const [isExporting, setIsExporting] = useState(false) useEffect(() => { @@ -243,11 +255,17 @@ const ProfileSection = ({ username, handleLogout }) => { const onExport = (data) => { const body = {} - if (data.includes('Chatflows')) body.chatflow = true if (data.includes('Agentflows')) body.agentflow = true + if (data.includes('Assistants Custom')) body.assistantCustom = true + if (data.includes('Assistants OpenAI')) body.assistantOpenAI = true + if (data.includes('Assistants Azure')) body.assistantAzure = true + if (data.includes('Chatflows')) body.chatflow = true + if (data.includes('Chat Messages')) body.chat_message = true + if (data.includes('Chat Feedbacks')) body.chat_feedback = true + if (data.includes('Custom Templates')) body.custom_template = true + if (data.includes('Document Stores')) body.document_store = true if (data.includes('Tools')) body.tool = true if (data.includes('Variables')) body.variable = true - if (data.includes('Assistants')) body.assistant = true exportAllApi.request(body) } diff --git a/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx b/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx index 75aceb85b75..0ad96fb3626 100644 --- a/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx +++ b/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx @@ -12,25 +12,26 @@ const FollowUpPromptsCard = ({ isGrid, followUpPrompts, sx, onPromptClick }) => className={'button-container'} sx={{ width: '100%', maxWidth: isGrid ? 'inherit' : '400px', p: 1.5, display: 'flex', gap: 1, ...sx }} > - {followUpPrompts.map((fp, index) => ( - onPromptClick(fp, e)} - sx={{ - backgroundColor: 'transparent', - border: '1px solid', - boxShadow: '0px 2px 1px -1px rgba(0,0,0,0.2)', - color: '#2196f3', - transition: 'all 300ms cubic-bezier(0.4, 0, 0.2, 1) 0ms', - '&:hover': { - backgroundColor: customization.isDarkMode ? 'rgba(0, 0, 0, 0.12)' : 'rgba(0, 0, 0, 0.05)', - border: '1px solid' - } - }} - /> - ))} + {Array.isArray(followUpPrompts) && + followUpPrompts.map((fp, index) => ( + onPromptClick(fp, e)} + sx={{ + backgroundColor: 'transparent', + border: '1px solid', + boxShadow: '0px 2px 1px -1px rgba(0,0,0,0.2)', + color: '#2196f3', + transition: 'all 300ms cubic-bezier(0.4, 0, 0.2, 1) 0ms', + '&:hover': { + backgroundColor: customization.isDarkMode ? 'rgba(0, 0, 0, 0.12)' : 'rgba(0, 0, 0, 0.05)', + border: '1px solid' + } + }} + /> + ))} ) } diff --git a/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx b/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx index aca39c2bbb7..a0f7b9e1dd7 100644 --- a/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx +++ b/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx @@ -1,34 +1,39 @@ -import { useState, useEffect } from 'react' -import { createPortal } from 'react-dom' -import axios from 'axios' -import PropTypes from 'prop-types' import { - Dialog, - DialogTitle, - DialogContent, - DialogActions, Button, CircularProgress, - Stepper, + Dialog, + DialogActions, + DialogContent, + DialogTitle, + FormControl, + InputLabel, + MenuItem, + Select, Step, StepLabel, - Select, - MenuItem, - FormControl, - InputLabel + Stepper, + TextField } from '@mui/material' +import axios from 'axios' +import PropTypes from 'prop-types' +import { useEffect, useState } from 'react' +import { createPortal } from 'react-dom' const NvidiaNIMDialog = ({ open, onClose, onComplete }) => { const portalElement = document.getElementById('portal') const modelOptions = { - 'nv-mistralai/mistral-nemo-12b-instruct:latest': { - label: 'Mistral Nemo 12B Instruct', - licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nv-mistralai/containers/mistral-nemo-12b-instruct' - }, - 'meta/llama-3.1-8b-instruct-rtx:latest': { + 'nvcr.io/nim/meta/llama-3.1-8b-instruct:1.8.0-RTX': { label: 'Llama 3.1 8B Instruct', licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nim/teams/meta/containers/llama-3.1-8b-instruct' + }, + 'nvcr.io/nim/deepseek-ai/deepseek-r1-distill-llama-8b:1.8.0-RTX': { + label: 'DeepSeek R1 Distill Llama 8B', + licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nim/teams/deepseek-ai/containers/deepseek-r1-distill-llama-8b' + }, + 'nvcr.io/nim/nv-mistralai/mistral-nemo-12b-instruct:1.8.0-rtx': { + label: 'Mistral Nemo 12B Instruct', + licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nim/teams/nv-mistralai/containers/mistral-nemo-12b-instruct' } } @@ -36,6 +41,10 @@ const NvidiaNIMDialog = ({ open, onClose, onComplete }) => { const [loading, setLoading] = useState(false) const [imageTag, setImageTag] = useState('') const [pollInterval, setPollInterval] = useState(null) + const [nimRelaxMemConstraints, setNimRelaxMemConstraints] = useState('0') + const [hostPort, setHostPort] = useState('8080') + const [showContainerConfirm, setShowContainerConfirm] = useState(false) + const [existingContainer, setExistingContainer] = useState(null) const steps = ['Download Installer', 'Pull Image', 'Start Container'] @@ -137,34 +146,63 @@ const NvidiaNIMDialog = ({ open, onClose, onComplete }) => { try { setLoading(true) try { - const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { imageTag }) - if (containerResponse.data && containerResponse.data && containerResponse.data.status === 'running') { - // wait additional 10 seconds for container to be ready - await new Promise((resolve) => setTimeout(resolve, 10000)) + const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { + imageTag, + port: parseInt(hostPort) + }) + if (containerResponse.data) { + setExistingContainer(containerResponse.data) + setShowContainerConfirm(true) setLoading(false) - onComplete(containerResponse.data) - onClose() return } } catch (err) { + // Handle port in use by non-model container + if (err.response?.status === 409) { + alert(`Port ${hostPort} is already in use by another container. Please choose a different port.`) + setLoading(false) + return + } // Continue if container not found if (err.response?.status !== 404) { throw err } } + // No container found with this port, proceed with starting new container + await startNewContainer() + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to check container status: ' + errorData) + setLoading(false) + } + } + + const startNewContainer = async () => { + try { + setLoading(true) const tokenResponse = await axios.get('/api/v1/nvidia-nim/get-token') const apiKey = tokenResponse.data.access_token await axios.post('/api/v1/nvidia-nim/start-container', { imageTag, - apiKey + apiKey, + nimRelaxMemConstraints: parseInt(nimRelaxMemConstraints), + hostPort: parseInt(hostPort) }) // Start polling for container status const interval = setInterval(async () => { try { - const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { imageTag }) + const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { + imageTag, + port: parseInt(hostPort) + }) if (containerResponse.data) { clearInterval(interval) setLoading(false) @@ -194,12 +232,59 @@ const NvidiaNIMDialog = ({ open, onClose, onComplete }) => { } } + const handleUseExistingContainer = async () => { + try { + setLoading(true) + // Start polling for container status + const interval = setInterval(async () => { + try { + const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { + imageTag, + port: parseInt(hostPort) + }) + if (containerResponse.data) { + clearInterval(interval) + setLoading(false) + onComplete(containerResponse.data) + onClose() + } + } catch (err) { + // Continue polling if container not found + if (err.response?.status !== 404) { + clearInterval(interval) + alert('Failed to check container status: ' + err.message) + setLoading(false) + } + } + }, 5000) + + setPollInterval(interval) + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to check container status: ' + errorData) + setLoading(false) + } + } + const handleNext = () => { if (activeStep === 1 && !imageTag) { alert('Please enter an image tag') return } + if (activeStep === 2) { + const port = parseInt(hostPort) + if (isNaN(port) || port < 1 || port > 65535) { + alert('Please enter a valid port number between 1 and 65535') + return + } + } + switch (activeStep) { case 0: preload() @@ -234,86 +319,150 @@ const NvidiaNIMDialog = ({ open, onClose, onComplete }) => { }, [open]) const component = open ? ( - - NIM Setup - - - {steps.map((label) => ( - - {label} - - ))} - + <> + + NIM Setup + + + {steps.map((label) => ( + + {label} + + ))} + - {activeStep === 0 && ( -
-

- Would you like to download the NIM installer? Click Next if it has been installed -

- {loading && } -
- )} + {activeStep === 0 && ( +
+

+ Would you like to download the NIM installer? Click Next if it has been installed +

+ {loading && } +
+ )} - {activeStep === 1 && ( -
- - Model - - - {imageTag && ( - - )} - {loading && ( -
-
- -

Pulling image...

-
- )} -
- )} + {activeStep === 1 && ( +
+ + Model + + + {imageTag && ( + + )} + {loading && ( +
+
+ +

Pulling image...

+
+ )} +
+ )} - {activeStep === 2 && ( + {activeStep === 2 && ( +
+ {loading ? ( + <> +
+ +

Starting container...

+ + ) : ( + <> + + Relax Memory Constraints + + + setHostPort(e.target.value)} + inputProps={{ min: 1, max: 65535 }} + sx={{ mt: 2 }} + /> +

Click Next to start the container.

+ + )} +
+ )} + + + + {activeStep === 0 && ( + + )} + + +
+ setShowContainerConfirm(false)}> + Container Already Exists + +

A container for this image already exists:

- {loading ? ( - <> -
- -

Starting container...

- - ) : ( -

Image is ready! Click Next to start the container.

- )} +

+ Name: {existingContainer?.name || 'N/A'} +

+

+ Status: {existingContainer?.status || 'N/A'} +

- )} - - - - {activeStep === 0 && ( - + - )} - - -
+ +
+ ) : null return createPortal(component, portalElement) diff --git a/packages/ui/src/ui-component/extended/AnalyseFlow.jsx b/packages/ui/src/ui-component/extended/AnalyseFlow.jsx index d9001368e43..de162e51a34 100644 --- a/packages/ui/src/ui-component/extended/AnalyseFlow.jsx +++ b/packages/ui/src/ui-component/extended/AnalyseFlow.jsx @@ -30,6 +30,7 @@ import lunarySVG from '@/assets/images/lunary.svg' import langwatchSVG from '@/assets/images/langwatch.svg' import arizePNG from '@/assets/images/arize.png' import phoenixPNG from '@/assets/images/phoenix.png' +import opikPNG from '@/assets/images/opik.png' // store import useNotifier from '@/utils/useNotifier' @@ -188,6 +189,33 @@ const analyticProviders = [ optional: true } ] + }, + { + label: 'Opik', + name: 'opik', + icon: opikPNG, + url: 'https://www.comet.com/opik', + inputs: [ + { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['opikApi'] + }, + { + label: 'Project Name', + name: 'opikProjectName', + type: 'string', + description: 'Name of your Opik project', + placeholder: 'default' + }, + { + label: 'On/Off', + name: 'status', + type: 'boolean', + optional: true + } + ] } ] diff --git a/packages/ui/src/ui-component/extended/FileUpload.jsx b/packages/ui/src/ui-component/extended/FileUpload.jsx index d6fc8f02aba..34a6f3e509d 100644 --- a/packages/ui/src/ui-component/extended/FileUpload.jsx +++ b/packages/ui/src/ui-component/extended/FileUpload.jsx @@ -5,7 +5,7 @@ import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackba import parser from 'html-react-parser' // material-ui -import { Button, Box } from '@mui/material' +import { Button, Box, Typography } from '@mui/material' import { IconX, IconBulb } from '@tabler/icons-react' // Project import @@ -22,6 +22,18 @@ const message = `Uploaded files will be parsed as strings and sent to the LLM. I
Refer
docs for more details.` +const availableFileTypes = [ + { name: 'CSS', ext: 'text/css' }, + { name: 'CSV', ext: 'text/csv' }, + { name: 'HTML', ext: 'text/html' }, + { name: 'JSON', ext: 'application/json' }, + { name: 'Markdown', ext: 'text/markdown' }, + { name: 'PDF', ext: 'application/pdf' }, + { name: 'SQL', ext: 'application/sql' }, + { name: 'Text File', ext: 'text/plain' }, + { name: 'XML', ext: 'application/xml' } +] + const FileUpload = ({ dialogProps }) => { const dispatch = useDispatch() @@ -31,16 +43,27 @@ const FileUpload = ({ dialogProps }) => { const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) const [fullFileUpload, setFullFileUpload] = useState(false) + const [allowedFileTypes, setAllowedFileTypes] = useState([]) const [chatbotConfig, setChatbotConfig] = useState({}) const handleChange = (value) => { setFullFileUpload(value) } + const handleAllowedFileTypesChange = (event) => { + const { checked, value } = event.target + if (checked) { + setAllowedFileTypes((prev) => [...prev, value]) + } else { + setAllowedFileTypes((prev) => prev.filter((item) => item !== value)) + } + } + const onSave = async () => { try { const value = { - status: fullFileUpload + status: fullFileUpload, + allowedUploadFileTypes: allowedFileTypes.join(',') } chatbotConfig.fullFileUpload = value @@ -82,6 +105,9 @@ const FileUpload = ({ dialogProps }) => { } useEffect(() => { + /* backward compatibility - by default, allow all */ + const allowedFileTypes = availableFileTypes.map((fileType) => fileType.ext) + setAllowedFileTypes(allowedFileTypes) if (dialogProps.chatflow) { if (dialogProps.chatflow.chatbotConfig) { try { @@ -90,6 +116,10 @@ const FileUpload = ({ dialogProps }) => { if (chatbotConfig.fullFileUpload) { setFullFileUpload(chatbotConfig.fullFileUpload.status) } + if (chatbotConfig.fullFileUpload?.allowedUploadFileTypes) { + const allowedFileTypes = chatbotConfig.fullFileUpload.allowedUploadFileTypes.split(',') + setAllowedFileTypes(allowedFileTypes) + } } catch (e) { setChatbotConfig({}) } @@ -135,8 +165,44 @@ const FileUpload = ({ dialogProps }) => { - {/* TODO: Allow selection of allowed file types*/} - + + Allow Uploads of Type +
+ {availableFileTypes.map((fileType) => ( +
+ + +
+ ))} +
+ Save diff --git a/packages/ui/src/ui-component/extended/OverrideConfig.jsx b/packages/ui/src/ui-component/extended/OverrideConfig.jsx index 5f398fe01d6..f3c6041dfc7 100644 --- a/packages/ui/src/ui-component/extended/OverrideConfig.jsx +++ b/packages/ui/src/ui-component/extended/OverrideConfig.jsx @@ -116,25 +116,27 @@ const OverrideConfig = ({ dialogProps }) => { } const formatObj = () => { - const obj = { - overrideConfig: { status: overrideConfigStatus } + let apiConfig = JSON.parse(dialogProps.chatflow.apiConfig) + if (apiConfig === null || apiConfig === undefined) { + apiConfig = {} } + let overrideConfig = { status: overrideConfigStatus } if (overrideConfigStatus) { - // loop through each key in nodeOverrides and filter out the enabled ones const filteredNodeOverrides = {} for (const key in nodeOverrides) { filteredNodeOverrides[key] = nodeOverrides[key].filter((node) => node.enabled) } - obj.overrideConfig = { - ...obj.overrideConfig, + overrideConfig = { + ...overrideConfig, nodes: filteredNodeOverrides, variables: variableOverrides.filter((node) => node.enabled) } } + apiConfig.overrideConfig = overrideConfig - return obj + return apiConfig } const onNodeOverrideToggle = (node, property, status) => { @@ -206,7 +208,7 @@ const OverrideConfig = ({ dialogProps }) => { if (!overrideConfigStatus) { setNodeOverrides(newNodeOverrides) } else { - const updatedNodeOverrides = { ...nodeOverrides } + const updatedNodeOverrides = { ...newNodeOverrides } Object.keys(updatedNodeOverrides).forEach((node) => { if (!seenNodes.has(node)) { diff --git a/packages/ui/src/ui-component/extended/RateLimit.jsx b/packages/ui/src/ui-component/extended/RateLimit.jsx index c57b20e79e2..1b3ca3b0105 100644 --- a/packages/ui/src/ui-component/extended/RateLimit.jsx +++ b/packages/ui/src/ui-component/extended/RateLimit.jsx @@ -19,7 +19,7 @@ import chatflowsApi from '@/api/chatflows' // utils import useNotifier from '@/utils/useNotifier' -const RateLimit = () => { +const RateLimit = ({ dialogProps }) => { const dispatch = useDispatch() const chatflow = useSelector((state) => state.canvas.chatflow) const chatflowid = chatflow.id @@ -36,9 +36,11 @@ const RateLimit = () => { const [limitMsg, setLimitMsg] = useState(apiConfig?.rateLimit?.limitMsg ?? '') const formatObj = () => { - const obj = { - rateLimit: { status: rateLimitStatus } + let apiConfig = JSON.parse(dialogProps.chatflow.apiConfig) + if (apiConfig === null || apiConfig === undefined) { + apiConfig = {} } + let obj = { status: rateLimitStatus } if (rateLimitStatus) { const rateLimitValuesBoolean = [!limitMax, !limitDuration, !limitMsg] @@ -46,16 +48,16 @@ const RateLimit = () => { if (rateLimitFilledValues.length >= 1 && rateLimitFilledValues.length <= 2) { throw new Error('Need to fill all rate limit input fields') } else if (rateLimitFilledValues.length === 3) { - obj.rateLimit = { - ...obj.rateLimit, + obj = { + ...obj, limitMax, limitDuration, limitMsg } } } - - return obj + apiConfig.rateLimit = obj + return apiConfig } const handleChange = (value) => { @@ -173,7 +175,8 @@ const RateLimit = () => { } RateLimit.propTypes = { - isSessionMemory: PropTypes.bool + isSessionMemory: PropTypes.bool, + dialogProps: PropTypes.object } export default RateLimit diff --git a/packages/ui/src/ui-component/extended/Security.jsx b/packages/ui/src/ui-component/extended/Security.jsx index b46847fcba0..57fff04babf 100644 --- a/packages/ui/src/ui-component/extended/Security.jsx +++ b/packages/ui/src/ui-component/extended/Security.jsx @@ -12,7 +12,7 @@ const Security = ({ dialogProps }) => { return ( } spacing={4}> - + diff --git a/packages/ui/src/ui-component/table/FlowListTable.jsx b/packages/ui/src/ui-component/table/FlowListTable.jsx index dea00791cb2..fad0f52a7ec 100644 --- a/packages/ui/src/ui-component/table/FlowListTable.jsx +++ b/packages/ui/src/ui-component/table/FlowListTable.jsx @@ -248,7 +248,9 @@ export const FlowListTable = ({ data, images, isLoading, filterFunction, updateF )} - {moment(row.updatedDate).format('MMMM Do, YYYY')} + + {moment(row.updatedDate).format('MMMM Do, YYYY HH:mm:ss')} + { id: assistant.id, details: assistant.details, credential: assistant.credential, - iconSrc: assistant.iconSrc + iconSrc: assistant.iconSrc, + type: assistant.type } }) } catch (error) { @@ -76,11 +77,19 @@ export const stringify = (object) => { export const exportData = (exportAllData) => { try { return { - Tool: sanitizeTool(exportAllData.Tool), - ChatFlow: sanitizeChatflow(exportAllData.ChatFlow), AgentFlow: sanitizeChatflow(exportAllData.AgentFlow), - Variable: sanitizeVariable(exportAllData.Variable), - Assistant: sanitizeAssistant(exportAllData.Assistant) + AssistantFlow: sanitizeChatflow(exportAllData.AssistantFlow), + AssistantCustom: sanitizeAssistant(exportAllData.AssistantCustom), + AssistantOpenAI: sanitizeAssistant(exportAllData.AssistantOpenAI), + AssistantAzure: sanitizeAssistant(exportAllData.AssistantAzure), + ChatFlow: sanitizeChatflow(exportAllData.ChatFlow), + ChatMessage: exportAllData.ChatMessage, + ChatMessageFeedback: exportAllData.ChatMessageFeedback, + CustomTemplate: exportAllData.CustomTemplate, + DocumentStore: exportAllData.DocumentStore, + DocumentStoreFileChunk: exportAllData.DocumentStoreFileChunk, + Tool: sanitizeTool(exportAllData.Tool), + Variable: sanitizeVariable(exportAllData.Variable) } } catch (error) { throw new Error(`exportImport.exportData ${getErrorMessage(error)}`) diff --git a/packages/ui/src/views/assistants/openai/AssistantDialog.jsx b/packages/ui/src/views/assistants/openai/AssistantDialog.jsx index 3157efa6016..cd6722ef91b 100644 --- a/packages/ui/src/views/assistants/openai/AssistantDialog.jsx +++ b/packages/ui/src/views/assistants/openai/AssistantDialog.jsx @@ -46,6 +46,18 @@ import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from '@/store/actions' import { maxScroll } from '@/store/constant' const assistantAvailableModels = [ + { + label: 'gpt-4.1', + name: 'gpt-4.1' + }, + { + label: 'gpt-4.1-mini', + name: 'gpt-4.1-mini' + }, + { + label: 'gpt-4.1-nano', + name: 'gpt-4.1-nano' + }, { label: 'gpt-4.5-preview', name: 'gpt-4.5-preview' diff --git a/packages/ui/src/views/canvas/NodeInputHandler.jsx b/packages/ui/src/views/canvas/NodeInputHandler.jsx index b21592f0ef4..613666b4c82 100644 --- a/packages/ui/src/views/canvas/NodeInputHandler.jsx +++ b/packages/ui/src/views/canvas/NodeInputHandler.jsx @@ -1,46 +1,46 @@ import PropTypes from 'prop-types' -import { Handle, Position, useUpdateNodeInternals } from 'reactflow' -import { useEffect, useRef, useState, useContext } from 'react' +import { useContext, useEffect, useRef, useState } from 'react' import { useSelector } from 'react-redux' +import { Handle, Position, useUpdateNodeInternals } from 'reactflow' // material-ui -import { useTheme, styled } from '@mui/material/styles' -import { Popper, Box, Typography, Tooltip, IconButton, Button, TextField } from '@mui/material' -import { useGridApiContext } from '@mui/x-data-grid' -import IconAutoFixHigh from '@mui/icons-material/AutoFixHigh' -import { tooltipClasses } from '@mui/material/Tooltip' -import { IconArrowsMaximize, IconEdit, IconAlertTriangle, IconBulb, IconRefresh } from '@tabler/icons-react' import { Tabs } from '@mui/base/Tabs' +import IconAutoFixHigh from '@mui/icons-material/AutoFixHigh' +import { Box, Button, IconButton, Popper, TextField, Tooltip, Typography } from '@mui/material' import Autocomplete, { autocompleteClasses } from '@mui/material/Autocomplete' +import { styled, useTheme } from '@mui/material/styles' +import { tooltipClasses } from '@mui/material/Tooltip' +import { useGridApiContext } from '@mui/x-data-grid' +import { IconAlertTriangle, IconArrowsMaximize, IconBulb, IconEdit, IconRefresh } from '@tabler/icons-react' // project import +import { flowContext } from '@/store/context/ReactFlowContext' +import ConditionDialog from '@/ui-component/dialog/ConditionDialog' +import ExpandTextDialog from '@/ui-component/dialog/ExpandTextDialog' +import FormatPromptValuesDialog from '@/ui-component/dialog/FormatPromptValuesDialog' +import InputHintDialog from '@/ui-component/dialog/InputHintDialog' +import ManageScrapedLinksDialog from '@/ui-component/dialog/ManageScrapedLinksDialog' +import NvidiaNIMDialog from '@/ui-component/dialog/NvidiaNIMDialog' +import PromptLangsmithHubDialog from '@/ui-component/dialog/PromptLangsmithHubDialog' +import { AsyncDropdown } from '@/ui-component/dropdown/AsyncDropdown' import { Dropdown } from '@/ui-component/dropdown/Dropdown' import { MultiDropdown } from '@/ui-component/dropdown/MultiDropdown' -import { AsyncDropdown } from '@/ui-component/dropdown/AsyncDropdown' -import { Input } from '@/ui-component/input/Input' -import { DataGrid } from '@/ui-component/grid/DataGrid' +import { CodeEditor } from '@/ui-component/editor/CodeEditor' import { File } from '@/ui-component/file/File' -import { SwitchInput } from '@/ui-component/switch/Switch' -import { flowContext } from '@/store/context/ReactFlowContext' +import { DataGrid } from '@/ui-component/grid/DataGrid' +import { Input } from '@/ui-component/input/Input' import { JsonEditorInput } from '@/ui-component/json/JsonEditor' -import { TooltipWithParser } from '@/ui-component/tooltip/TooltipWithParser' -import { CodeEditor } from '@/ui-component/editor/CodeEditor' +import { SwitchInput } from '@/ui-component/switch/Switch' +import { Tab } from '@/ui-component/tabs/Tab' import { TabPanel } from '@/ui-component/tabs/TabPanel' import { TabsList } from '@/ui-component/tabs/TabsList' -import { Tab } from '@/ui-component/tabs/Tab' -import ToolDialog from '@/views/tools/ToolDialog' +import { TooltipWithParser } from '@/ui-component/tooltip/TooltipWithParser' import AssistantDialog from '@/views/assistants/openai/AssistantDialog' -import FormatPromptValuesDialog from '@/ui-component/dialog/FormatPromptValuesDialog' -import ExpandTextDialog from '@/ui-component/dialog/ExpandTextDialog' -import ConditionDialog from '@/ui-component/dialog/ConditionDialog' -import PromptLangsmithHubDialog from '@/ui-component/dialog/PromptLangsmithHubDialog' -import ManageScrapedLinksDialog from '@/ui-component/dialog/ManageScrapedLinksDialog' +import ToolDialog from '@/views/tools/ToolDialog' import CredentialInputHandler from './CredentialInputHandler' -import InputHintDialog from '@/ui-component/dialog/InputHintDialog' -import NvidiaNIMDialog from '@/ui-component/dialog/NvidiaNIMDialog' // utils -import { getInputVariables, getCustomConditionOutputs, isValidConnection, getAvailableNodesForVariable } from '@/utils/genericHelper' +import { getAvailableNodesForVariable, getCustomConditionOutputs, getInputVariables, isValidConnection } from '@/utils/genericHelper' // const import { FLOWISE_CREDENTIAL_ID } from '@/store/constant' @@ -537,7 +537,7 @@ const NodeInputHandler = ({ > )} - {data.name === 'chatNvidiaNIM' && inputParam.name === 'modelName' && ( + {data.name === 'Chat NVIDIA NIM' && inputParam.name === 'modelName' && ( <>