-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathmodel.py
61 lines (50 loc) · 1.95 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# Copyright (c) 2022 Cohere Inc. and its affiliates.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License in the LICENSE file at the top
# level of this repository.
import os
import cohere
import numpy as np
from cohere.classify import Example
from qa.util import pretty_print
_DATA_DIRNAME = os.path.join(os.path.dirname(__file__), "prompt_data")
def get_contextual_search_query(history, co, model="xlarge", verbosity=0):
"""Adds message history context to user query."""
prompt_path = os.path.join(_DATA_DIRNAME, "get_contextual_search_query.prompt")
with open(prompt_path) as f:
prompt = f.read() + f"{history}\n-"
prediction = co.generate(
model=model,
prompt=prompt,
max_tokens=50,
temperature=0.75,
k=0,
p=0.75,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=["\n"],
return_likelihoods="GENERATION",
num_generations=4,
)
likelihood = [g.likelihood for g in prediction.generations]
result = prediction.generations[np.argmax(likelihood)].text
if verbosity:
pretty_print("OKGREEN", "contextual question prompt: " + prompt)
pretty_print("OKCYAN", "contextual question: " + result)
return result.strip()
def get_sample_answer(question, co, model="xlarge"):
"""Return a sample answer to a question based on the model's training data."""
prompt_path = os.path.join(_DATA_DIRNAME, "get_sample_answer.prompt")
with open(prompt_path) as f:
prompt = f.read() + f"{question}\nAnswer:"
response = co.generate(model=model,
prompt=prompt,
max_tokens=50,
temperature=0.8,
k=0,
p=0.7,
stop_sequences=["--"])
return response.generations[0].text