Skip to content

Commit e38f2be

Browse files
jameszyaoSimsonW
authored andcommitted
feat: Add chat completion example
1 parent aa2c7f1 commit e38f2be

File tree

10 files changed

+385
-10
lines changed

10 files changed

+385
-10
lines changed

examples/assistant/conversation.ipynb renamed to examples/assistant/chat_with_assistant.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
{
2222
"cell_type": "markdown",
2323
"source": [
24-
"# Chat with Assistant\n",
24+
"# TaskingAI: Chat with Assistant Example\n",
2525
"\n",
2626
"In this example, we will first create an assistant who knows the meaning of various numbers and will explain it in certain language.\n",
2727
"Then we will start a chat with the assistant."

examples/crud/assistant_crud.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
{
1919
"cell_type": "markdown",
2020
"source": [
21-
"# TasingAI Assistant Module CRUD"
21+
"# TasingAI Assistant Module CRUD Example"
2222
],
2323
"metadata": {
2424
"collapsed": false

examples/crud/retrieval_crud.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
{
2222
"cell_type": "markdown",
2323
"source": [
24-
"# TasingAI Retrieval Module CRUD"
24+
"# TasingAI Retrieval Module CRUD Example"
2525
],
2626
"metadata": {
2727
"collapsed": false

examples/crud/tool_crud.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
{
2121
"cell_type": "markdown",
2222
"source": [
23-
"# TasingAI Tool Module CRUD"
23+
"# TasingAI Tool Module CRUD Exampple"
2424
],
2525
"metadata": {
2626
"collapsed": false
Lines changed: 288 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,288 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"outputs": [],
7+
"source": [
8+
"import taskingai\n",
9+
"# Load TaskingAI API Key from environment variable"
10+
],
11+
"metadata": {
12+
"collapsed": false,
13+
"ExecuteTime": {
14+
"end_time": "2023-11-28T12:07:45.292065Z",
15+
"start_time": "2023-11-28T12:07:45.248068Z"
16+
}
17+
},
18+
"id": "1a6bfd1682fcb23f"
19+
},
20+
{
21+
"cell_type": "markdown",
22+
"source": [
23+
"# TasingAI Model Inference: Chat Completion"
24+
],
25+
"metadata": {
26+
"collapsed": false
27+
},
28+
"id": "657463bd357a3c3"
29+
},
30+
{
31+
"cell_type": "code",
32+
"execution_count": 2,
33+
"outputs": [],
34+
"source": [
35+
"from taskingai.inference import *\n",
36+
"import json\n",
37+
"# choose an available chat_completion model from your project\n",
38+
"model_id = \"Gk1145Bl\""
39+
],
40+
"metadata": {
41+
"collapsed": false,
42+
"ExecuteTime": {
43+
"end_time": "2023-11-28T12:07:46.170766Z",
44+
"start_time": "2023-11-28T12:07:46.157613Z"
45+
}
46+
},
47+
"id": "49abde692940b09e"
48+
},
49+
{
50+
"cell_type": "code",
51+
"execution_count": 3,
52+
"outputs": [
53+
{
54+
"data": {
55+
"text/plain": "{'created_timestamp': 1701173269243,\n 'finish_reason': 'stop',\n 'message': {'content': 'Hello! How can I assist you today?',\n 'function_call': None,\n 'role': 'assistant'},\n 'object': 'ChatCompletion'}"
56+
},
57+
"execution_count": 3,
58+
"metadata": {},
59+
"output_type": "execute_result"
60+
}
61+
],
62+
"source": [
63+
"# normal \n",
64+
"chat_completion = taskingai.inference.chat_completion(\n",
65+
" model_id=model_id,\n",
66+
" messages=[\n",
67+
" SystemMessage(\"You are a professional assistant.\"),\n",
68+
" UserMessage(\"Hi\"),\n",
69+
" ]\n",
70+
")\n",
71+
"chat_completion"
72+
],
73+
"metadata": {
74+
"collapsed": false,
75+
"ExecuteTime": {
76+
"end_time": "2023-11-28T12:07:49.355234Z",
77+
"start_time": "2023-11-28T12:07:46.700962Z"
78+
}
79+
},
80+
"id": "43dcc632665f0de4"
81+
},
82+
{
83+
"cell_type": "code",
84+
"execution_count": 4,
85+
"outputs": [
86+
{
87+
"data": {
88+
"text/plain": "{'created_timestamp': 1701173272255,\n 'finish_reason': 'stop',\n 'message': {'content': \"Of course! Here's another joke for you: Why don't \"\n \"skeletons fight each other? They don't have the guts!\",\n 'function_call': None,\n 'role': 'assistant'},\n 'object': 'ChatCompletion'}"
89+
},
90+
"execution_count": 4,
91+
"metadata": {},
92+
"output_type": "execute_result"
93+
}
94+
],
95+
"source": [
96+
"# multi round chat completion\n",
97+
"chat_completion = taskingai.inference.chat_completion(\n",
98+
" model_id=model_id,\n",
99+
" messages=[\n",
100+
" SystemMessage(\"You are a professional assistant.\"),\n",
101+
" UserMessage(\"Hi\"),\n",
102+
" AssistantMessage(\"Hello! How can I assist you today?\"),\n",
103+
" UserMessage(\"Can you tell me a joke?\"),\n",
104+
" AssistantMessage(\"Sure, here is a joke for you: Why don't scientists trust atoms? Because they make up everything!\"),\n",
105+
" UserMessage(\"That's funny. Can you tell me another one?\"),\n",
106+
" ]\n",
107+
")\n",
108+
"chat_completion"
109+
],
110+
"metadata": {
111+
"collapsed": false,
112+
"ExecuteTime": {
113+
"end_time": "2023-11-28T12:07:52.367618Z",
114+
"start_time": "2023-11-28T12:07:50.109888Z"
115+
}
116+
},
117+
"id": "e8933bc07f4b3228"
118+
},
119+
{
120+
"cell_type": "code",
121+
"execution_count": 5,
122+
"outputs": [
123+
{
124+
"data": {
125+
"text/plain": "{'created_timestamp': 1701173274744,\n 'finish_reason': 'length',\n 'message': {'content': \"Of course! Here's\",\n 'function_call': None,\n 'role': 'assistant'},\n 'object': 'ChatCompletion'}"
126+
},
127+
"execution_count": 5,
128+
"metadata": {},
129+
"output_type": "execute_result"
130+
}
131+
],
132+
"source": [
133+
"# config max tokens\n",
134+
"chat_completion = taskingai.inference.chat_completion(\n",
135+
" model_id=model_id,\n",
136+
" messages=[\n",
137+
" SystemMessage(\"You are a professional assistant.\"),\n",
138+
" UserMessage(\"Hi\"),\n",
139+
" AssistantMessage(\"Hello! How can I assist you today?\"),\n",
140+
" UserMessage(\"Can you tell me a joke?\"),\n",
141+
" AssistantMessage(\"Sure, here is a joke for you: Why don't scientists trust atoms? Because they make up everything!\"),\n",
142+
" UserMessage(\"That's funny. Can you tell me another one?\"),\n",
143+
" ],\n",
144+
" configs={\n",
145+
" \"max_tokens\": 5\n",
146+
" }\n",
147+
")\n",
148+
"chat_completion"
149+
],
150+
"metadata": {
151+
"collapsed": false,
152+
"ExecuteTime": {
153+
"end_time": "2023-11-28T12:07:54.817719Z",
154+
"start_time": "2023-11-28T12:07:53.137411Z"
155+
}
156+
},
157+
"id": "f7c1b8be2579d9e0"
158+
},
159+
{
160+
"cell_type": "code",
161+
"execution_count": 6,
162+
"outputs": [
163+
{
164+
"name": "stdout",
165+
"output_type": "stream",
166+
"text": [
167+
"chat_completion = {'created_timestamp': 1701173277776,\n",
168+
" 'finish_reason': 'function_call',\n",
169+
" 'message': {'content': None,\n",
170+
" 'function_call': {'arguments': {'a': 112, 'b': 22},\n",
171+
" 'name': 'plus_a_and_b'},\n",
172+
" 'role': 'assistant'},\n",
173+
" 'object': 'ChatCompletion'}\n",
174+
"function name: plus_a_and_b, argument content: {\"a\": 112, \"b\": 22}\n"
175+
]
176+
}
177+
],
178+
"source": [
179+
"# function call\n",
180+
"function = Function(\n",
181+
" name=\"plus_a_and_b\",\n",
182+
" description=\"Sum up a and b and return the result\",\n",
183+
" parameters={\n",
184+
" \"type\": \"object\",\n",
185+
" \"properties\": {\n",
186+
" \"a\": {\n",
187+
" \"type\": \"integer\",\n",
188+
" \"description\": \"The first number\"\n",
189+
" },\n",
190+
" \"b\": {\n",
191+
" \"type\": \"integer\",\n",
192+
" \"description\": \"The second number\"\n",
193+
" }\n",
194+
" },\n",
195+
" \"required\": [\"a\", \"b\"]\n",
196+
" },\n",
197+
")\n",
198+
"chat_completion = taskingai.inference.chat_completion(\n",
199+
" model_id=model_id,\n",
200+
" messages=[\n",
201+
" SystemMessage(\"You are a professional assistant.\"),\n",
202+
" UserMessage(\"What is the result of 112 plus 22?\"),\n",
203+
" ],\n",
204+
" functions=[function]\n",
205+
")\n",
206+
"print(f\"chat_completion = {chat_completion}\")\n",
207+
"\n",
208+
"assistant_function_call_message = chat_completion.message\n",
209+
"fucntion_name = assistant_function_call_message.function_call.name\n",
210+
"argument_content = json.dumps(assistant_function_call_message.function_call.arguments)\n",
211+
"print(f\"function name: {fucntion_name}, argument content: {argument_content}\")"
212+
],
213+
"metadata": {
214+
"collapsed": false,
215+
"ExecuteTime": {
216+
"end_time": "2023-11-28T12:07:57.823570Z",
217+
"start_time": "2023-11-28T12:07:55.601317Z"
218+
}
219+
},
220+
"id": "2645bdc3df011e7d"
221+
},
222+
{
223+
"cell_type": "markdown",
224+
"source": [],
225+
"metadata": {
226+
"collapsed": false
227+
},
228+
"id": "ed6957f0c380ba9f"
229+
},
230+
{
231+
"cell_type": "code",
232+
"execution_count": 7,
233+
"outputs": [
234+
{
235+
"data": {
236+
"text/plain": "{'created_timestamp': 1701173282280,\n 'finish_reason': 'stop',\n 'message': {'content': 'The result of 112 plus 22 is 144.',\n 'function_call': None,\n 'role': 'assistant'},\n 'object': 'ChatCompletion'}"
237+
},
238+
"execution_count": 7,
239+
"metadata": {},
240+
"output_type": "execute_result"
241+
}
242+
],
243+
"source": [
244+
"# add function message\n",
245+
"chat_completion = taskingai.inference.chat_completion(\n",
246+
" model_id=model_id,\n",
247+
" messages=[\n",
248+
" SystemMessage(\"You are a professional assistant.\"),\n",
249+
" UserMessage(\"What is the result of 112 plus 22?\"),\n",
250+
" assistant_function_call_message,\n",
251+
" FunctionMessage(name=fucntion_name, content=\"144\")\n",
252+
" ],\n",
253+
" functions=[function]\n",
254+
")\n",
255+
"chat_completion"
256+
],
257+
"metadata": {
258+
"collapsed": false,
259+
"ExecuteTime": {
260+
"end_time": "2023-11-28T12:08:02.319026Z",
261+
"start_time": "2023-11-28T12:08:00.109622Z"
262+
}
263+
},
264+
"id": "9df9a8b9eafa17d9"
265+
}
266+
],
267+
"metadata": {
268+
"kernelspec": {
269+
"display_name": "Python 3",
270+
"language": "python",
271+
"name": "python3"
272+
},
273+
"language_info": {
274+
"codemirror_mode": {
275+
"name": "ipython",
276+
"version": 2
277+
},
278+
"file_extension": ".py",
279+
"mimetype": "text/x-python",
280+
"name": "python",
281+
"nbconvert_exporter": "python",
282+
"pygments_lexer": "ipython2",
283+
"version": "2.7.6"
284+
}
285+
},
286+
"nbformat": 4,
287+
"nbformat_minor": 5
288+
}
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
import taskingai
2+
import os
3+
# Load TaskingAI API Key from environment variable
4+
model_id = os.environ.get("MODEL_ID")
5+
print(taskingai.Config.API_KEY, taskingai.Config.HOST, model_id)
6+
7+
from taskingai.inference import *
8+
# choose an available chat_completion model from your project
9+
10+
# function call
11+
function = Function(
12+
name="plus_a_and_b",
13+
description="Sum up a and b and return the result",
14+
parameters={
15+
"type": "object",
16+
"properties": {
17+
"a": {
18+
"type": "integer",
19+
"description": "The first number"
20+
},
21+
"b": {
22+
"type": "integer",
23+
"description": "The second number"
24+
}
25+
},
26+
"required": ["a", "b"]
27+
},
28+
)
29+
chat_completion = taskingai.inference.chat_completion(
30+
model_id=model_id,
31+
messages=[
32+
SystemMessage("You are a professional assistant."),
33+
UserMessage("What is the result of 112 plus 22?"),
34+
AssistantMessage(
35+
function_call=FunctionCall(name="plus_a_and_b", arguments={"a": 112, "b": 22}),
36+
),
37+
FunctionMessage(
38+
name="plus_a_and_b",
39+
content="144"
40+
)
41+
],
42+
functions=[function]
43+
)
44+
45+
46+
print(chat_completion.message)

examples/inference/text_embedding.py

Whitespace-only changes.

taskingai/assistant/message.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ def create_user_message(
110110

111111
def update_message(
112112
assistant_id: str,
113+
chat_id: str,
113114
message_id: str,
114115
metadata: Dict[str, str],
115116
) -> Message:
@@ -128,6 +129,7 @@ def update_message(
128129
)
129130
response: MessageUpdateResponse = api_instance.update_message(
130131
assistant_id=assistant_id,
132+
chat_id=chat_id,
131133
message_id=message_id,
132134
body=body
133135
)

taskingai/inference/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
# todo
1+
from .chat_completion import *
2+
from .text_embedding import *

0 commit comments

Comments
 (0)