Skip to content

Commit 89da79a

Browse files
committed
移除OpenAIFunctionCall
1 parent ac3b8d4 commit 89da79a

File tree

4 files changed

+16
-235
lines changed

4 files changed

+16
-235
lines changed

config.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from typing import Any, Optional
77

88
# Client Configuration
9-
CLI_VERSION = "0.1.5" # Match current gemini-cli version
109

1110
# 需要自动封禁的错误码 (默认值,可通过环境变量或配置覆盖)
1211
AUTO_BAN_ERROR_CODES = [401, 403]

src/models.py

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -13,34 +13,12 @@ class ModelList(BaseModel):
1313
object: str = "list"
1414
data: List[Model]
1515

16-
# OpenAI Function Calling Models
17-
class OpenAIFunctionCall(BaseModel):
18-
name: str
19-
arguments: str
20-
21-
class OpenAIToolCall(BaseModel):
22-
id: str
23-
type: str = "function"
24-
function: OpenAIFunctionCall
25-
26-
class OpenAIFunction(BaseModel):
27-
name: str
28-
description: str
29-
parameters: Dict[str, Any]
30-
strict: Optional[bool] = None
31-
32-
class OpenAITool(BaseModel):
33-
type: str = "function"
34-
function: OpenAIFunction
35-
3616
# OpenAI Models
3717
class OpenAIChatMessage(BaseModel):
3818
role: str
3919
content: Union[str, List[Dict[str, Any]], None] = None
4020
reasoning_content: Optional[str] = None
4121
name: Optional[str] = None
42-
tool_calls: Optional[List[OpenAIToolCall]] = None
43-
tool_call_id: Optional[str] = None
4422

4523
class OpenAIChatCompletionRequest(BaseModel):
4624
model: str
@@ -57,8 +35,6 @@ class OpenAIChatCompletionRequest(BaseModel):
5735
response_format: Optional[Dict[str, Any]] = None
5836
top_k: Optional[int] = Field(None, ge=1)
5937
enable_anti_truncation: Optional[bool] = False
60-
tools: Optional[List[OpenAITool]] = None
61-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
6238

6339
class Config:
6440
extra = "allow" # Allow additional fields not explicitly defined
@@ -163,38 +139,6 @@ class GeminiResponse(BaseModel):
163139
usageMetadata: Optional[GeminiUsageMetadata] = None
164140
modelVersion: Optional[str] = None
165141

166-
# Universal Request Model (auto-detects format)
167-
class UniversalChatRequest(BaseModel):
168-
"""通用聊天请求模型,可以自动检测和处理不同格式"""
169-
model: str
170-
# OpenAI 风格字段
171-
messages: Optional[List[OpenAIChatMessage]] = None
172-
stream: Optional[bool] = False
173-
temperature: Optional[float] = None
174-
top_p: Optional[float] = None
175-
max_tokens: Optional[int] = None
176-
stop: Optional[Union[str, List[str]]] = None
177-
frequency_penalty: Optional[float] = None
178-
presence_penalty: Optional[float] = None
179-
n: Optional[int] = None
180-
seed: Optional[int] = None
181-
response_format: Optional[Dict[str, Any]] = None
182-
top_k: Optional[int] = None
183-
184-
# Gemini 风格字段
185-
contents: Optional[List[GeminiContent]] = None
186-
systemInstruction: Optional[GeminiSystemInstruction] = None
187-
generationConfig: Optional[GeminiGenerationConfig] = None
188-
safetySettings: Optional[List[GeminiSafetySetting]] = None
189-
tools: Optional[List[Dict[str, Any]]] = None
190-
toolConfig: Optional[Dict[str, Any]] = None
191-
192-
# 通用字段
193-
enable_anti_truncation: Optional[bool] = False
194-
195-
class Config:
196-
extra = "allow"
197-
198142
# Error Models
199143
class APIError(BaseModel):
200144
message: str

src/openai_transfer.py

Lines changed: 14 additions & 147 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
OpenAI Transfer Module - Handles conversion between OpenAI and Gemini API formats
33
被openai-router调用,负责OpenAI格式与Gemini格式的双向转换
44
"""
5-
import json
65
import time
76
import uuid
87
from typing import Dict, Any
@@ -67,51 +66,8 @@ async def openai_request_to_gemini_payload(openai_request: ChatCompletionRequest
6766
if role == "assistant":
6867
role = "model"
6968

70-
# 处理工具调用响应消息(role为tool)
71-
if role == "tool" and message.tool_call_id and message.content:
72-
# 工具调用结果消息转换为Gemini格式
73-
# 注意:这里我们需要从message.name中获取函数名,如果没有则跳过
74-
function_name = getattr(message, 'name', None)
75-
if not function_name:
76-
log.error(f"Tool response message missing function name for tool_call_id: {message.tool_call_id}")
77-
log.error("This will cause a 400 error from Gemini API: 'please ensure that the number of function response parts is equal to the number of function call parts'")
78-
log.error("Solution: Include 'name' field in tool messages matching the original function name")
79-
continue
80-
81-
parts = [{
82-
"functionResponse": {
83-
"name": function_name,
84-
"response": {"result": message.content}
85-
}
86-
}]
87-
contents.append({"role": "function", "parts": parts})
88-
log.debug(f"Added tool response to contents: function={function_name}, tool_call_id={message.tool_call_id}")
89-
continue
90-
91-
# 处理工具调用(从assistant消息中的tool_calls)
92-
if hasattr(message, 'tool_calls') and message.tool_calls:
93-
# 如果有工具调用,优先处理工具调用,忽略文本内容
94-
tool_parts = []
95-
for tool_call in message.tool_calls:
96-
if tool_call.type == "function":
97-
try:
98-
args = json.loads(tool_call.function.arguments)
99-
except json.JSONDecodeError:
100-
args = {}
101-
102-
tool_parts.append({
103-
"functionCall": {
104-
"name": tool_call.function.name,
105-
"args": args
106-
}
107-
})
108-
109-
if tool_parts:
110-
contents.append({"role": role, "parts": tool_parts})
111-
log.debug(f"Added tool calls to contents: {[tc.function.name for tc in message.tool_calls]}")
112-
113-
# 处理普通内容(只有在没有工具调用的情况下)
114-
elif isinstance(message.content, list):
69+
# 处理普通内容
70+
if isinstance(message.content, list):
11571
parts = []
11672
for part in message.content:
11773
if part.get("type") == "text":
@@ -191,77 +147,10 @@ async def openai_request_to_gemini_payload(openai_request: ChatCompletionRequest
191147
"thinkingBudget": thinking_budget,
192148
"includeThoughts": should_include_thoughts(openai_request.model)
193149
}
194-
195-
# 处理工具定义转换(OpenAI tools -> Gemini tools)
196-
if openai_request.tools:
197-
function_declarations = []
198-
for tool in openai_request.tools:
199-
if tool.type == "function":
200-
# 清理parameters中的additionalProperties字段,Gemini API不支持
201-
parameters = tool.function.parameters.copy() if tool.function.parameters else {}
202-
if "additionalProperties" in parameters:
203-
del parameters["additionalProperties"]
204-
205-
# 递归清理嵌套对象中的additionalProperties
206-
def clean_additional_properties(obj):
207-
if isinstance(obj, dict):
208-
# 删除当前层级的additionalProperties
209-
if "additionalProperties" in obj:
210-
del obj["additionalProperties"]
211-
# 递归处理嵌套的对象
212-
for key, value in obj.items():
213-
if isinstance(value, dict):
214-
clean_additional_properties(value)
215-
elif isinstance(value, list):
216-
for item in value:
217-
if isinstance(item, dict):
218-
clean_additional_properties(item)
219-
220-
clean_additional_properties(parameters)
221-
222-
function_def = {
223-
"name": tool.function.name,
224-
"description": tool.function.description,
225-
"parameters": parameters
226-
}
227-
function_declarations.append(function_def)
228-
229-
# 所有函数声明放在一个工具对象中
230-
if function_declarations:
231-
request_data["tools"] = [{"functionDeclarations": function_declarations}]
232-
233-
# 处理工具选择转换(OpenAI tool_choice -> Gemini toolConfig)
234-
if openai_request.tool_choice:
235-
if isinstance(openai_request.tool_choice, str):
236-
if openai_request.tool_choice == "auto":
237-
# Gemini默认就是auto模式,无需特殊设置
238-
pass
239-
elif openai_request.tool_choice == "required":
240-
request_data["toolConfig"] = {"functionCallingConfig": {"mode": "ANY"}}
241-
elif openai_request.tool_choice == "none":
242-
request_data["toolConfig"] = {"functionCallingConfig": {"mode": "NONE"}}
243-
elif isinstance(openai_request.tool_choice, dict) and openai_request.tool_choice.get("type") == "function":
244-
# 强制调用特定函数
245-
function_name = openai_request.tool_choice.get("function", {}).get("name")
246-
if function_name:
247-
request_data["toolConfig"] = {
248-
"functionCallingConfig": {
249-
"mode": "ANY",
250-
"allowedFunctionNames": [function_name]
251-
}
252-
}
253150

254-
# 为搜索模型添加Google Search工具(只有在没有functionDeclarations时才添加)
151+
# 为搜索模型添加Google Search工具
255152
if is_search_model(openai_request.model):
256-
if "tools" not in request_data:
257-
request_data["tools"] = []
258-
request_data["tools"].append({"googleSearch": {}})
259-
elif not any("functionDeclarations" in tool for tool in request_data["tools"]):
260-
# 只有在没有函数工具时才添加googleSearch
261-
request_data["tools"].append({"googleSearch": {}})
262-
else:
263-
# 如果已有函数工具,不添加googleSearch(Gemini API不支持混合工具类型)
264-
log.debug("Skipping googleSearch tool because functionDeclarations are present")
153+
request_data["tools"] = [{"googleSearch": {}}]
265154

266155
# 移除None值
267156
request_data = {k: v for k, v in request_data.items() if v is not None}
@@ -273,10 +162,9 @@ def clean_additional_properties(obj):
273162
}
274163

275164
def _extract_content_and_reasoning(parts: list) -> tuple:
276-
"""从Gemini响应部件中提取内容、推理内容和函数调用"""
165+
"""从Gemini响应部件中提取内容和推理内容"""
277166
content = ""
278167
reasoning_content = ""
279-
tool_calls = []
280168

281169
for part in parts:
282170
# 处理文本内容
@@ -286,35 +174,16 @@ def _extract_content_and_reasoning(parts: list) -> tuple:
286174
reasoning_content += part.get("text", "")
287175
else:
288176
content += part.get("text", "")
289-
290-
# 处理函数调用
291-
elif part.get("functionCall"):
292-
function_call = part["functionCall"]
293-
tool_call = {
294-
"id": f"call_{uuid.uuid4().hex[:8]}",
295-
"type": "function",
296-
"function": {
297-
"name": function_call.get("name", ""),
298-
"arguments": json.dumps(function_call.get("args", {}))
299-
}
300-
}
301-
tool_calls.append(tool_call)
302177

303-
return content, reasoning_content, tool_calls
178+
return content, reasoning_content
304179

305-
def _build_message_with_reasoning(role: str, content: str, reasoning_content: str, tool_calls: list = None) -> dict:
306-
"""构建包含可选推理内容和工具调用的消息对象"""
180+
def _build_message_with_reasoning(role: str, content: str, reasoning_content: str) -> dict:
181+
"""构建包含可选推理内容的消息对象"""
307182
message = {
308183
"role": role,
184+
"content": content
309185
}
310186

311-
# 如果有工具调用,设置content为None,否则设置文本内容
312-
if tool_calls:
313-
message["tool_calls"] = tool_calls
314-
message["content"] = None if not content.strip() else content
315-
else:
316-
message["content"] = content
317-
318187
# 如果有thinking tokens,添加reasoning_content
319188
if reasoning_content:
320189
message["reasoning_content"] = reasoning_content
@@ -341,12 +210,12 @@ def gemini_response_to_openai(gemini_response: Dict[str, Any], model: str) -> Di
341210
if role == "model":
342211
role = "assistant"
343212

344-
# 提取并分离thinking tokens、常规内容和工具调用
213+
# 提取并分离thinking tokens和常规内容
345214
parts = candidate.get("content", {}).get("parts", [])
346-
content, reasoning_content, tool_calls = _extract_content_and_reasoning(parts)
215+
content, reasoning_content = _extract_content_and_reasoning(parts)
347216

348217
# 构建消息对象
349-
message = _build_message_with_reasoning(role, content, reasoning_content, tool_calls)
218+
message = _build_message_with_reasoning(role, content, reasoning_content)
350219

351220
choices.append({
352221
"index": candidate.get("index", 0),
@@ -383,18 +252,16 @@ def gemini_stream_chunk_to_openai(gemini_chunk: Dict[str, Any], model: str, resp
383252
if role == "model":
384253
role = "assistant"
385254

386-
# 提取并分离thinking tokens、常规内容和工具调用
255+
# 提取并分离thinking tokens和常规内容
387256
parts = candidate.get("content", {}).get("parts", [])
388-
content, reasoning_content, tool_calls = _extract_content_and_reasoning(parts)
257+
content, reasoning_content = _extract_content_and_reasoning(parts)
389258

390259
# 构建delta对象
391260
delta = {}
392261
if content:
393262
delta["content"] = content
394263
if reasoning_content:
395264
delta["reasoning_content"] = reasoning_content
396-
if tool_calls:
397-
delta["tool_calls"] = tool_calls
398265

399266
choices.append({
400267
"index": candidate.get("index", 0),

src/utils.py

Lines changed: 2 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,39 +1,10 @@
11
import platform
22

3-
from config import CLI_VERSION
3+
CLI_VERSION = "0.1.5" # Match current gemini-cli version
44

55
def get_user_agent():
66
"""Generate User-Agent string matching gemini-cli format."""
77
version = CLI_VERSION
88
system = platform.system()
99
arch = platform.machine()
10-
return f"GeminiCLI/{version} ({system}; {arch})"
11-
12-
def get_platform_string():
13-
"""Generate platform string matching gemini-cli format."""
14-
system = platform.system().upper()
15-
arch = platform.machine().upper()
16-
17-
# Map to gemini-cli platform format
18-
if system == "DARWIN":
19-
if arch in ["ARM64", "AARCH64"]:
20-
return "DARWIN_ARM64"
21-
else:
22-
return "DARWIN_AMD64"
23-
elif system == "LINUX":
24-
if arch in ["ARM64", "AARCH64"]:
25-
return "LINUX_ARM64"
26-
else:
27-
return "LINUX_AMD64"
28-
elif system == "WINDOWS":
29-
return "WINDOWS_AMD64"
30-
else:
31-
return "PLATFORM_UNSPECIFIED"
32-
33-
def get_client_metadata(project_id=None):
34-
return {
35-
"ideType": "IDE_UNSPECIFIED",
36-
"platform": get_platform_string(),
37-
"pluginType": "GEMINI",
38-
"duetProject": project_id,
39-
}
10+
return f"GeminiCLI/{version} ({system}; {arch})"

0 commit comments

Comments
 (0)