Skip to content

Commit 0da5fff

Browse files
committed
limit history to ~3000 tokens
.. and some small fixes for the last update
1 parent 51dc4e7 commit 0da5fff

File tree

4 files changed

+41
-7
lines changed

4 files changed

+41
-7
lines changed

history.py

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,11 @@
1+
import tiktoken
2+
3+
def num_tokens_from_string(string: str) -> int:
4+
"""Returns the number of tokens in a text string."""
5+
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
6+
return len(encoding.encode(string))
7+
8+
19
class ResultHistory:
210
def __init__(self):
311
self.data = []
@@ -8,6 +16,30 @@ def append(self, cmd, result):
816
"result": result
917
})
1018

11-
def dump(self):
19+
def get_full_history(self):
1220
return self.data
1321

22+
# only retrieve recent parts. We need this as prompts only allow
23+
# for maximum token length. We currently do this in a quite stupid
24+
# whay which could be optimized in the future
25+
def get_history(self, limit=3072):
26+
result = []
27+
rest = limit
28+
29+
for itm in reversed(self.data):
30+
size_cmd = num_tokens_from_string(itm["cmd"])
31+
size_result = num_tokens_from_string(itm["result"])
32+
size = size_cmd + size_result
33+
34+
if size <= rest:
35+
result.append(itm)
36+
rest -= size
37+
else:
38+
# if theres a bit space left, fill that up with parts of the last item
39+
if (rest - size_cmd) >= 200:
40+
result.append({
41+
"cmd" : itm["cmd"],
42+
"result" : itm["result"][:(rest-size_cmd-2)] + ".."
43+
})
44+
return list(reversed(result))
45+
return list(reversed(result))

prompt_helper.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,20 +7,20 @@
77
from llms.openai import get_openai_response
88

99
log = logging.getLogger()
10-
filename = datetime.now().strftime('logs/run_%H_%M_%d_%m_%Y.log')
10+
filename = datetime.now().strftime('logs/run_%Y%m%d%m-%H%M.log')
1111
log.addHandler(logging.FileHandler(filename))
1212

13-
def output_log(self, kind, msg):
13+
def output_log(kind, msg):
1414
print("[" + Fore.RED + kind + Style.RESET_ALL +"]: " + msg)
15-
self.log.warning("[" + kind + "] " + msg)
15+
log.warning("[" + kind + "] " + msg)
1616

1717
# helper for generating and executing LLM prompts from a template
1818
def create_and_ask_prompt(template_file, log_prefix, **params):
1919
global logs
2020

2121
template = Template(filename='templates/' + template_file)
2222
prompt = template.render(**params)
23-
logs.warning(log_prefix + "-prompt", prompt)
23+
output_log(log_prefix + "-prompt", prompt)
2424
result = get_openai_response(prompt)
25-
logs.warning(log_prefix + "-answer", result)
25+
output_log(log_prefix + "-answer", result)
2626
return result

requirements.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,9 @@ paramiko==3.1.0
2020
pycparser==2.21
2121
PyNaCl==1.5.0
2222
python-dotenv==1.0.0
23+
regex==2023.3.23
2324
requests==2.28.2
25+
tiktoken==0.3.3
2426
tqdm==4.65.0
2527
urllib3==1.26.15
2628
yarl==1.9.2

wintermute.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
while True:
2323

24-
next_cmd = create_and_ask_prompt('gpt_query.txt', "next-cmd", user=initial_user, history=cmd_history.dump())
24+
next_cmd = create_and_ask_prompt('gpt_query.txt', "next-cmd", user=initial_user, history=cmd_history.get_history())
2525

2626
# disable this for now, it's tragic because the AI won't tell me why it had chosen something
2727
# create_and_ask_prompt("why.txt", "why", user=initial_user, history=cmd_history.dump(), next_cmd=next_cmd)

0 commit comments

Comments
 (0)