Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'
import { OpenAI as OpenAIClient } from 'openai'

const serverCredentialsExists =
!!process.env.AZURE_OPENAI_API_KEY &&
Expand All @@ -26,7 +27,7 @@ class AzureChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'Azure ChatOpenAI'
this.name = 'azureChatOpenAI'
this.version = 7.0
this.version = 7.1
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
Expand Down Expand Up @@ -154,6 +155,15 @@ class AzureChatOpenAI_ChatModels implements INode {
optional: false,
additionalParams: true
},
{
label: 'Reasoning',
description: 'Whether the model supports reasoning. Only applicable for reasoning models.',
name: 'reasoning',
type: 'boolean',
default: false,
optional: true,
additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 and o3 models.',
Expand All @@ -173,9 +183,34 @@ class AzureChatOpenAI_ChatModels implements INode {
name: 'high'
}
],
default: 'medium',
optional: false,
additionalParams: true
additionalParams: true,
show: {
reasoning: true
}
},
{
label: 'Reasoning Summary',
description: `A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process`,
name: 'reasoningSummary',
type: 'options',
options: [
{
label: 'Auto',
name: 'auto'
},
{
label: 'Concise',
name: 'concise'
},
{
label: 'Detailed',
name: 'detailed'
}
],
additionalParams: true,
show: {
reasoning: true
}
}
]
}
Expand All @@ -199,7 +234,8 @@ class AzureChatOpenAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort | null
const reasoningSummary = nodeData.inputs?.reasoningSummary as 'auto' | 'concise' | 'detailed' | null

const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
Expand Down Expand Up @@ -240,8 +276,15 @@ class AzureChatOpenAI_ChatModels implements INode {
if (modelName === 'o3-mini' || modelName.includes('o1')) {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
if (modelName.includes('o1') || modelName.includes('o3')) {
const reasoning: OpenAIClient.Reasoning = {}
if (reasoningEffort) {
reasoning.effort = reasoningEffort
}
if (reasoningSummary) {
reasoning.summary = reasoningSummary
}
obj.reasoning = reasoning
}

const multiModalOption: IMultiModalOption = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
builtInTools: Record<string, any>[] = []
id: string

constructor(
Expand All @@ -27,7 +28,7 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
}

revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.model = this.configuredModel
this.maxTokens = this.configuredMaxToken
}

Expand All @@ -38,4 +39,8 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
setVisionModel(): void {
// pass
}

addBuiltInTools(builtInTool: Record<string, any>): void {
this.builtInTools.push(builtInTool)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,8 @@ class ChatCerebras_ChatModels implements INode {

const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
model: modelName,
apiKey: cerebrasAIApiKey,
openAIApiKey: cerebrasAIApiKey,
streaming: streaming ?? true
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { BaseCache } from '@langchain/core/caches'
import { ChatFireworks } from '@langchain/community/chat_models/fireworks'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatFireworks, ChatFireworksParams } from './core'

class ChatFireworks_ChatModels implements INode {
label: string
Expand Down Expand Up @@ -41,8 +41,8 @@ class ChatFireworks_ChatModels implements INode {
label: 'Model',
name: 'modelName',
type: 'string',
default: 'accounts/fireworks/models/llama-v2-13b-chat',
placeholder: 'accounts/fireworks/models/llama-v2-13b-chat'
default: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
placeholder: 'accounts/fireworks/models/llama-v3p1-8b-instruct'
},
{
label: 'Temperature',
Expand Down Expand Up @@ -71,9 +71,8 @@ class ChatFireworks_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const fireworksApiKey = getCredentialParam('fireworksApiKey', credentialData, nodeData)

const obj: Partial<ChatFireworks> = {
const obj: ChatFireworksParams = {
fireworksApiKey,
model: modelName,
modelName,
temperature: temperature ? parseFloat(temperature) : undefined,
streaming: streaming ?? true
Expand Down
126 changes: 126 additions & 0 deletions packages/components/nodes/chatmodels/ChatFireworks/core.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import type { BaseChatModelParams, LangSmithParams } from '@langchain/core/language_models/chat_models'
import {
type OpenAIClient,
type ChatOpenAICallOptions,
type OpenAIChatInput,
type OpenAICoreRequestOptions,
ChatOpenAICompletions
} from '@langchain/openai'

import { getEnvironmentVariable } from '@langchain/core/utils/env'

type FireworksUnsupportedArgs = 'frequencyPenalty' | 'presencePenalty' | 'logitBias' | 'functions'

type FireworksUnsupportedCallOptions = 'functions' | 'function_call'

export type ChatFireworksCallOptions = Partial<Omit<ChatOpenAICallOptions, FireworksUnsupportedCallOptions>>

export type ChatFireworksParams = Partial<Omit<OpenAIChatInput, 'openAIApiKey' | FireworksUnsupportedArgs>> &
BaseChatModelParams & {
/**
* Prefer `apiKey`
*/
fireworksApiKey?: string
/**
* The Fireworks API key to use.
*/
apiKey?: string
}

export class ChatFireworks extends ChatOpenAICompletions<ChatFireworksCallOptions> {
static lc_name() {
return 'ChatFireworks'
}

_llmType() {
return 'fireworks'
}

get lc_secrets(): { [key: string]: string } | undefined {
return {
fireworksApiKey: 'FIREWORKS_API_KEY',
apiKey: 'FIREWORKS_API_KEY'
}
}

lc_serializable = true

fireworksApiKey?: string

apiKey?: string

constructor(fields?: ChatFireworksParams) {
const fireworksApiKey = fields?.apiKey || fields?.fireworksApiKey || getEnvironmentVariable('FIREWORKS_API_KEY')

if (!fireworksApiKey) {
throw new Error(
`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`
)
}

super({
...fields,
model: fields?.model || fields?.modelName || 'accounts/fireworks/models/llama-v3p1-8b-instruct',
apiKey: fireworksApiKey,
configuration: {
baseURL: 'https://api.fireworks.ai/inference/v1'
},
streamUsage: false
})

this.fireworksApiKey = fireworksApiKey
this.apiKey = fireworksApiKey
}

getLsParams(options: any): LangSmithParams {
const params = super.getLsParams(options)
params.ls_provider = 'fireworks'
return params
}

toJSON() {
const result = super.toJSON()

if ('kwargs' in result && typeof result.kwargs === 'object' && result.kwargs != null) {
delete result.kwargs.openai_api_key
delete result.kwargs.configuration
}

return result
}

// eslint-disable-next-line
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
options?: OpenAICoreRequestOptions
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>

// eslint-disable-next-line
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<OpenAIClient.Chat.Completions.ChatCompletion>

/**
* Calls the Fireworks API with retry logic in case of failures.
* @param request The request to send to the Fireworks API.
* @param options Optional configuration for the API call.
* @returns The response from the Fireworks API.
*/
// eslint-disable-next-line
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk> | OpenAIClient.Chat.Completions.ChatCompletion> {
delete request.frequency_penalty
delete request.presence_penalty
delete request.logit_bias
delete request.functions

if (request.stream === true) {
return super.completionWithRetry(request, options)
}

return super.completionWithRetry(request, options)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,10 @@ class ChatLitellm_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (apiKey) obj.openAIApiKey = apiKey
if (apiKey) {
obj.openAIApiKey = apiKey
obj.apiKey = apiKey
}

const model = new ChatOpenAI(obj)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,18 @@ class ChatLocalAI_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: 'sk-',
apiKey: 'sk-',
streaming: streaming ?? true
}

if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
if (localAIApiKey) {
obj.openAIApiKey = localAIApiKey
obj.apiKey = localAIApiKey
}
if (basePath) obj.configuration = { baseURL: basePath }

const model = new ChatOpenAI(obj)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: nvidiaNIMApiKey ?? 'sk-',
apiKey: nvidiaNIMApiKey ?? 'sk-',
streaming: streaming ?? true
}

Expand Down
Loading