forked from LAION-AI/Open-Assistant
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmessages.py
324 lines (207 loc) · 8.48 KB
/
messages.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
"""All user-facing messages and embeds."""
from datetime import datetime
import hikari
from oasst_shared.schemas import protocol as protocol_schema
NUMBER_EMOJIS = [":one:", ":two:", ":three:", ":four:", ":five:", ":six:", ":seven:", ":eight:", ":nine:", ":ten:"]
NL = "\n"
###
# Reusable 'components'
###
def _h1(text: str) -> str:
return f"\n:small_blue_diamond: __**{text}**__ :small_blue_diamond:"
def _h2(text: str) -> str:
return f"__**{text}**__"
def _h3(text: str) -> str:
return f"__{text}__"
def _writing_prompt(text: str) -> str:
return f":pencil: _{text}_"
def _ranking_prompt(text: str) -> str:
return f":trophy: _{text}_"
def _label_prompt(text: str) -> str:
return f":question: _{text}"
def _response_prompt(text: str) -> str:
return f":speech_balloon: _{text}_"
def _summarize_prompt(text: str) -> str:
return f":notepad_spiral: _{text}_"
def _user(text: str | None) -> str:
return f"""\
:person_red_hair: {_h3("User")}:{f"{NL}> **{text}**" if text is not None else ""}
"""
def _assistant(text: str | None) -> str:
return f"""\
:robot: {_h3("Assistant")}:{f"{NL}> {text}" if text is not None else ""}
"""
def _make_ordered_list(items: list[str]) -> list[str]:
return [f"{num} {item}" for num, item in zip(NUMBER_EMOJIS, items)]
def _ordered_list(items: list[str]) -> str:
return "\n\n".join(_make_ordered_list(items))
def _conversation(conv: protocol_schema.Conversation) -> str:
return "\n".join([_assistant(msg.text) if msg.is_assistant else _user(msg.text) for msg in conv.messages])
def _hint(hint: str | None) -> str:
return f"{NL}Hint: {hint}" if hint else ""
def _li(text: str) -> str:
return f":small_blue_diamond: {text}"
###
# Messages
###
def initial_prompt_message(task: protocol_schema.InitialPromptTask) -> str:
"""Creates the message that gets sent to users when they request an `initial_prompt` task."""
return f"""\
{_h1("INITIAL PROMPT")}
{_writing_prompt("Please provide an initial prompt to the assistant.")}
{_hint(task.hint)}
"""
def rank_initial_prompts_message(task: protocol_schema.RankInitialPromptsTask) -> str:
"""Creates the message that gets sent to users when they request a `rank_initial_prompts` task."""
return f"""\
{_h1("RANK INITIAL PROMPTS")}
{_ordered_list(task.prompts)}
{_ranking_prompt("Reply with the numbers of best to worst prompts separated by commas (example: '4,1,3,2')")}
"""
def rank_prompter_reply_message(task: protocol_schema.RankPrompterRepliesTask) -> str:
"""Creates the message that gets sent to users when they request a `rank_prompter_replies` task."""
return f"""\
{_h1("RANK PROMPTER REPLIES")}
{_conversation(task.conversation)}
{_user(None)}
{_ordered_list(task.replies)}
{_ranking_prompt("Reply with the numbers of best to worst replies separated by commas (example: '4,1,3,2')")}
"""
def rank_assistant_reply_message(task: protocol_schema.RankAssistantRepliesTask) -> str:
"""Creates the message that gets sent to users when they request a `rank_assistant_replies` task."""
return f"""\
{_h1("RANK ASSISTANT REPLIES")}
{_conversation(task.conversation)}
{_assistant(None)}
{_ordered_list(task.replies)}
{_ranking_prompt("Reply with the numbers of best to worst replies separated by commas (example: '4,1,3,2')")}
"""
def label_initial_prompt_message(task: protocol_schema.LabelInitialPromptTask) -> str:
"""Creates the message that gets sent to users when they request a `label_initial_prompt` task."""
return f"""\
{_h1("LABEL INITIAL PROMPT")}
{task.prompt}
{_label_prompt("Reply with labels for the prompt separated by commas (example: 'profanity,misleading')")}
"""
def label_prompter_reply_message(task: protocol_schema.LabelPrompterReplyTask) -> str:
"""Creates the message that gets sent to users when they request a `label_prompter_reply` task."""
return f"""\
{_h1("LABEL PROMPTER REPLY")}
{_conversation(task.conversation)}
{_user(None)}
{task.reply}
{_label_prompt("Reply with labels for the reply separated by commas (example: 'profanity,misleading')")}
"""
def label_assistant_reply_message(task: protocol_schema.LabelAssistantReplyTask) -> str:
"""Creates the message that gets sent to users when they request a `label_assistant_reply` task."""
return f"""\
{_h1("LABEL ASSISTANT REPLY")}
{_conversation(task.conversation)}
{_assistant(None)}
{task.reply}
{_label_prompt("Reply with labels for the reply separated by commas (example: 'profanity,misleading')")}
"""
def prompter_reply_message(task: protocol_schema.PrompterReplyTask) -> str:
"""Creates the message that gets sent to users when they request a `prompter_reply` task."""
return f"""\
{_h1("PROMPTER REPLY")}
{_conversation(task.conversation)}
{_hint(task.hint)}
{_response_prompt("Please provide a reply to the assistant.")}
"""
def assistant_reply_message(task: protocol_schema.AssistantReplyTask) -> str:
"""Creates the message that gets sent to users when they request a `assistant_reply` task."""
return f"""\
{_h1("ASSISTANT REPLY")}
{_conversation(task.conversation)}
{_response_prompt("Please provide an assistant reply to the prompter.")}
"""
def confirm_text_response_message(content: str) -> str:
return f"""\
{_h2("CONFIRM RESPONSE")}
> {content}
"""
def confirm_ranking_response_message(content: str, items: list[str]) -> str:
user_rankings = [int(r) for r in content.replace(" ", "").split(",")]
original_list = _make_ordered_list(items)
user_ranked_list = "\n\n".join([original_list[r - 1] for r in user_rankings])
return f"""\
{_h2("CONFIRM RESPONSE")}
{user_ranked_list}
"""
def help_message(can_manage_guild: bool, is_dev: bool) -> str:
"""The /help command message."""
content = f"""\
{_h1("HELP")}
{_li("**`/help`**")}
Show this message.
{_li("**`/work [type]`**")}
Start a new task.
**`[type]`**:
The type of task to start. If not provided, a random task will be selected. The different types are
:small_orange_diamond: `random`: A random task type
:small_orange_diamond: ~~`summarize_story`~~ (coming soon)
:small_orange_diamond: ~~`rate_summary`~~ (coming soon)
:small_orange_diamond: `initial_prompt`: Ask the assistant something
:small_orange_diamond: `prompter_reply`: Reply to the assistant
:small_orange_diamond: `assistant_reply`: Reply to the user
:small_orange_diamond: `rank_initial_prompts`: Rank some initial prompts
:small_orange_diamond: `rank_prompter_replies`: Rank some prompter replies
:small_orange_diamond: `rank_assistant_replies`: Rank some assistant replies
To learn how to complete tasks, run `/tutorial`.
"""
if can_manage_guild:
content += f"""\
{_li("**`/settings log_channel <channel>`**")}
Set the channel that the bot logs completed task messages in.
**`<channel>`**: The channel to log completed tasks in. The bot needs to be able to send messages in this channel.
{_li("**`/settings get`**")}
Get the current settings.
"""
if is_dev:
content += f"""\
{_li("**`/reload [plugin]`**")}
Hot-reload a plugin. Only code *inside* of function bodies will be updated.
Any changes to __function signatures__, __other files__, __decorators__, or __imports__ will require a restart.
**`[plugin]`**:
The plugin to hot-reload. If no plugin is provided, all plugins are hot-reload.
"""
return content
def tutorial_message() -> str:
"""The /tutorial command message."""
# TODO: Finish message
return f"""\
{_h1("TUTORIAL")}
"""
def confirm_label_response_message(content: str) -> str:
user_labels = content.lower().replace(" ", "").split(",")
user_labels_str = ", ".join(user_labels)
return f"""\
{_h2("CONFIRM RESPONSE")}
{user_labels_str}
"""
###
# Embeds
###
def task_complete_embed(task: protocol_schema.Task, mention: str) -> hikari.Embed:
return (
hikari.Embed(
title="Task Completion",
description=f"`{task.type}` completed by {mention}",
color=hikari.Color(0x00FF00),
timestamp=datetime.now().astimezone(),
)
.add_field("Total Tasks", "0", inline=True)
.add_field("Server Ranking", "0/0", inline=True)
.add_field("Global Ranking", "0/0", inline=True)
.set_footer(f"Task ID: {task.id}")
)
def invalid_user_input_embed(error_message: str) -> hikari.Embed:
return hikari.Embed(
title="Invalid User Input",
description=error_message,
color=hikari.Color(0xFF0000),
timestamp=datetime.now().astimezone(),
)
def plain_embed(text: str) -> hikari.Embed:
return hikari.Embed(color=0x36393F, description=text)