From 7cb2bdd862825b952eccd6108e55172cc3a187e7 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 18 Jun 2024 11:11:45 -0700 Subject: [PATCH 01/22] chore: print runtimemanager progress to callProgress --- go.mod | 2 +- go.sum | 4 +-- pkg/engine/engine.go | 1 + pkg/monitor/display.go | 30 +++++++---------- pkg/mvl/log.go | 20 ++++++++++++ pkg/openai/client.go | 2 ++ pkg/repos/get.go | 46 ++++++++++++++++++++++---- pkg/repos/git/git.go | 6 ++-- pkg/repos/runtimes/golang/golang.go | 6 ++-- pkg/repos/runtimes/node/node.go | 4 +-- pkg/repos/runtimes/python/python.go | 6 ++-- pkg/runner/runner.go | 9 ++++-- pkg/runner/runtimemanager.go | 50 +++++++++++++++++++++++++++++ 13 files changed, 146 insertions(+), 40 deletions(-) create mode 100644 pkg/runner/runtimemanager.go diff --git a/go.mod b/go.mod index f3b7eb22..c57862eb 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 - github.com/gptscript-ai/tui v0.0.0-20240614062633-985091711b0a + github.com/gptscript-ai/tui v0.0.0-20240618175050-a1d627a00cff github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index bbaa5899..ab056ef8 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/go-gptscript v0.0.0-20240613214812-8111c2b02d71 h1:WehkkausLuXI91ePpIVrzZ6eBmfFIU/HfNsSA1CHiwo= github.com/gptscript-ai/go-gptscript v0.0.0-20240613214812-8111c2b02d71/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240614062633-985091711b0a h1:LFsEDiIAx0Rq0V6aOMlRjXMMIfkA3uEhqqqjoggLlDQ= -github.com/gptscript-ai/tui v0.0.0-20240614062633-985091711b0a/go.mod h1:ZlyM+BRiD6mV04w+Xw2mXP1VKGEUbn8BvwrosWlplUo= +github.com/gptscript-ai/tui v0.0.0-20240618175050-a1d627a00cff h1:mjcUKZ4hHVpT8EkyIsxGpU608BTNko0EovpsY0fGfvU= +github.com/gptscript-ai/tui v0.0.0-20240618175050-a1d627a00cff/go.mod h1:ZlyM+BRiD6mV04w+Xw2mXP1VKGEUbn8BvwrosWlplUo= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index c94b236a..2a4297d7 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -21,6 +21,7 @@ type Model interface { type RuntimeManager interface { GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) + EnsureCredentialHelpers(ctx context.Context) error SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error } diff --git a/pkg/monitor/display.go b/pkg/monitor/display.go index 36c7a35d..167498c7 100644 --- a/pkg/monitor/display.go +++ b/pkg/monitor/display.go @@ -20,25 +20,22 @@ import ( ) type Options struct { - DisplayProgress bool `usage:"-"` - DumpState string `usage:"Dump the internal execution state to a file"` - DebugMessages bool `usage:"Enable logging of chat completion calls"` + DumpState string `usage:"Dump the internal execution state to a file"` + DebugMessages bool `usage:"Enable logging of chat completion calls"` } func Complete(opts ...Options) (result Options) { for _, opt := range opts { result.DumpState = types.FirstSet(opt.DumpState, result.DumpState) - result.DisplayProgress = types.FirstSet(opt.DisplayProgress, result.DisplayProgress) result.DebugMessages = types.FirstSet(opt.DebugMessages, result.DebugMessages) } return } type Console struct { - dumpState string - displayProgress bool - printMessages bool - callLock sync.Mutex + dumpState string + printMessages bool + callLock sync.Mutex } var ( @@ -47,7 +44,7 @@ var ( func (c *Console) Start(_ context.Context, prg *types.Program, _ []string, input string) (runner.Monitor, error) { id := counter.Next() - mon := newDisplay(c.dumpState, c.displayProgress, c.printMessages) + mon := newDisplay(c.dumpState, c.printMessages) mon.callLock = &c.callLock mon.dump.ID = fmt.Sprint(id) mon.dump.Program = prg @@ -315,23 +312,20 @@ func (d *display) Stop(output string, err error) { func NewConsole(opts ...Options) *Console { opt := Complete(opts...) return &Console{ - dumpState: opt.DumpState, - displayProgress: opt.DisplayProgress, - printMessages: opt.DebugMessages, + dumpState: opt.DumpState, + printMessages: opt.DebugMessages, } } -func newDisplay(dumpState string, progress, printMessages bool) *display { +func newDisplay(dumpState string, printMessages bool) *display { display := &display{ dumpState: dumpState, callIDMap: make(map[string]string), printMessages: printMessages, } - if progress { - display.livePrinter = &livePrinter{ - lastContent: map[string]string{}, - callIDMap: display.callIDMap, - } + display.livePrinter = &livePrinter{ + lastContent: map[string]string{}, + callIDMap: display.callIDMap, } return display } diff --git a/pkg/mvl/log.go b/pkg/mvl/log.go index 6aa7c491..015fa199 100644 --- a/pkg/mvl/log.go +++ b/pkg/mvl/log.go @@ -1,6 +1,7 @@ package mvl import ( + "context" "encoding/json" "fmt" "io" @@ -156,6 +157,25 @@ func (l *Logger) Fields(kv ...any) *Logger { } } +type InfoLogger interface { + Infof(msg string, args ...any) +} + +type infoKey struct{} + +func WithInfo(ctx context.Context, logger InfoLogger) context.Context { + return context.WithValue(ctx, infoKey{}, logger) +} + +func (l *Logger) InfofCtx(ctx context.Context, msg string, args ...any) { + il, ok := ctx.Value(infoKey{}).(InfoLogger) + if ok { + il.Infof(msg, args...) + return + } + l.log.WithFields(l.fields).Infof(msg, args...) +} + func (l *Logger) Infof(msg string, args ...any) { l.log.WithFields(l.fields).Infof(msg, args...) } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 27a6317c..8ed4014b 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -437,6 +437,8 @@ func appendMessage(msg types.CompletionMessage, response openai.ChatCompletionSt if tc.ToolCall.Function.Name != tool.Function.Name { tc.ToolCall.Function.Name += tool.Function.Name } + // OpenAI like to sometimes add this prefix for no good reason + tc.ToolCall.Function.Name = strings.TrimPrefix(tc.ToolCall.Function.Name, "namespace.") tc.ToolCall.Function.Arguments += tool.Function.Arguments msg.Content[idx] = tc diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 291ec038..68b2e427 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "strings" + "sync" "github.com/BurntSushi/locker" "github.com/gptscript-ai/gptscript/pkg/config" @@ -43,11 +44,19 @@ func (n noopRuntime) Setup(_ context.Context, _, _ string, _ []string) ([]string } type Manager struct { - storageDir string - gitDir string - runtimeDir string - credHelperDirs credentials.CredentialHelperDirs - runtimes []Runtime + storageDir string + gitDir string + runtimeDir string + credHelperDirs credentials.CredentialHelperDirs + runtimes []Runtime + credHelperConfig *credHelperConfig +} + +type credHelperConfig struct { + lock sync.Mutex + initialized bool + cliCfg *config.CLIConfig + env []string } func New(cacheDir string, runtimes ...Runtime) *Manager { @@ -61,7 +70,32 @@ func New(cacheDir string, runtimes ...Runtime) *Manager { } } -func (m *Manager) SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error { +func (m *Manager) EnsureCredentialHelpers(ctx context.Context) error { + if m.credHelperConfig == nil { + return nil + } + m.credHelperConfig.lock.Lock() + defer m.credHelperConfig.lock.Unlock() + + if !m.credHelperConfig.initialized { + if err := m.deferredSetUpCredentialHelpers(ctx, m.credHelperConfig.cliCfg, m.credHelperConfig.env); err != nil { + return err + } + m.credHelperConfig.initialized = true + } + + return nil +} + +func (m *Manager) SetUpCredentialHelpers(_ context.Context, cliCfg *config.CLIConfig, env []string) error { + m.credHelperConfig = &credHelperConfig{ + cliCfg: cliCfg, + env: env, + } + return nil +} + +func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error { var ( helperName = cliCfg.CredentialsStore suffix string diff --git a/pkg/repos/git/git.go b/pkg/repos/git/git.go index bf0f1341..978f3a6d 100644 --- a/pkg/repos/git/git.go +++ b/pkg/repos/git/git.go @@ -33,7 +33,7 @@ func Checkout(ctx context.Context, base, repo, commit, toDir string) error { return err } - log.Infof("Checking out %s to %s", commit, toDir) + log.InfofCtx(ctx, "Checking out %s to %s", commit, toDir) return gitWorktreeAdd(ctx, gitDir(base, repo), toDir, commit) } @@ -46,11 +46,11 @@ func Fetch(ctx context.Context, base, repo, commit string) error { if found, err := exists(gitDir); err != nil { return err } else if !found { - log.Infof("Cloning %s", repo) + log.InfofCtx(ctx, "Cloning %s", repo) if err := cloneBare(ctx, repo, gitDir); err != nil { return err } } - log.Infof("Fetching %s at %s", commit, repo) + log.InfofCtx(ctx, "Fetching %s at %s", commit, repo) return fetchCommit(ctx, gitDir, commit) } diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index acf66ab1..28300439 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -68,7 +68,7 @@ func (r *Runtime) BuildCredentialHelper(ctx context.Context, helperName string, } newEnv := runtimeEnv.AppendPath(env, binPath) - log.Infof("Building credential helper %s", helperName) + log.InfofCtx(ctx, "Building credential helper %s", helperName) cmd := debugcmd.New(ctx, filepath.Join(binPath, "go"), "build", "-buildvcs=false", "-o", filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix), @@ -103,7 +103,7 @@ func stripGo(env []string) (result []string) { } func (r *Runtime) runBuild(ctx context.Context, toolSource, binDir string, env []string) error { - log.Infof("Running go build in %s", toolSource) + log.InfofCtx(ctx, "Running go build in %s", toolSource) cmd := debugcmd.New(ctx, filepath.Join(binDir, "go"), "build", "-buildvcs=false", "-o", artifactName()) cmd.Env = stripGo(env) cmd.Dir = toolSource @@ -134,7 +134,7 @@ func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { return "", err } - log.Infof("Downloading Go %s", r.Version) + log.InfofCtx(ctx, "Downloading Go %s", r.Version) tmp := target + ".download" defer os.RemoveAll(tmp) diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index 9280d3b0..575e3b23 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -101,7 +101,7 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { } func (r *Runtime) runNPM(ctx context.Context, toolSource, binDir string, env []string) error { - log.Infof("Running npm in %s", toolSource) + log.InfofCtx(ctx, "Running npm in %s", toolSource) cmd := debugcmd.New(ctx, filepath.Join(binDir, "npm"), "install") cmd.Env = env cmd.Dir = toolSource @@ -141,7 +141,7 @@ func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { return "", err } - log.Infof("Downloading Node %s.x", r.Version) + log.InfofCtx(ctx, "Downloading Node %s.x", r.Version) tmp := target + ".download" defer os.RemoveAll(tmp) diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index 72de5457..a5268f31 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -76,7 +76,7 @@ func uvBin(binDir string) string { } func (r *Runtime) installVenv(ctx context.Context, binDir, venvPath string) error { - log.Infof("Creating virtualenv in %s", venvPath) + log.InfofCtx(ctx, "Creating virtualenv in %s", venvPath) cmd := debugcmd.New(ctx, uvBin(binDir), "venv", "-p", pythonCmd(binDir), venvPath) return cmd.Run() } @@ -171,7 +171,7 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { } func (r *Runtime) runPip(ctx context.Context, toolSource, binDir string, env []string) error { - log.Infof("Running pip in %s", toolSource) + log.InfofCtx(ctx, "Running pip in %s", toolSource) for _, req := range []string{"requirements-gptscript.txt", "requirements.txt"} { reqFile := filepath.Join(toolSource, req) if s, err := os.Stat(reqFile); err == nil && !s.IsDir() { @@ -203,7 +203,7 @@ func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { return "", err } - log.Infof("Downloading Python %s.x", r.Version) + log.InfofCtx(ctx, "Downloading Python %s.x", r.Version) tmp := target + ".download" defer os.RemoveAll(tmp) diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 06ccf941..92dc36e3 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -421,7 +421,7 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en e := engine.Engine{ Model: r.c, - RuntimeManager: r.runtimeManager, + RuntimeManager: runtimeWithLogger(callCtx, monitor, r.runtimeManager), Progress: progress, Env: env, } @@ -602,7 +602,7 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s e := engine.Engine{ Model: r.c, - RuntimeManager: r.runtimeManager, + RuntimeManager: runtimeWithLogger(callCtx, monitor, r.runtimeManager), Progress: progress, Env: env, } @@ -836,6 +836,11 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env exists bool ) + rm := runtimeWithLogger(callCtx, monitor, r.runtimeManager) + if err := rm.EnsureCredentialHelpers(callCtx.Ctx); err != nil { + return nil, fmt.Errorf("failed to setup credential helpers: %w", err) + } + // Only try to look up the cred if the tool is on GitHub or has an alias. // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. if isGitHubTool(toolName) && credentialAlias == "" { diff --git a/pkg/runner/runtimemanager.go b/pkg/runner/runtimemanager.go new file mode 100644 index 00000000..e1c5a4c6 --- /dev/null +++ b/pkg/runner/runtimemanager.go @@ -0,0 +1,50 @@ +package runner + +import ( + "context" + "fmt" + "time" + + "github.com/gptscript-ai/gptscript/pkg/config" + "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/mvl" + "github.com/gptscript-ai/gptscript/pkg/types" +) + +func runtimeWithLogger(callCtx engine.Context, monitor Monitor, rm engine.RuntimeManager) engine.RuntimeManager { + if rm == nil { + return nil + } + return runtimeManagerLogger{ + callCtx: callCtx, + monitor: monitor, + rm: rm, + } +} + +type runtimeManagerLogger struct { + callCtx engine.Context + monitor Monitor + rm engine.RuntimeManager +} + +func (r runtimeManagerLogger) Infof(msg string, args ...any) { + r.monitor.Event(Event{ + Time: time.Now(), + Type: EventTypeCallProgress, + CallContext: r.callCtx.GetCallContext(), + Content: fmt.Sprintf(msg, args...), + }) +} + +func (r runtimeManagerLogger) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { + return r.rm.GetContext(mvl.WithInfo(ctx, r), tool, cmd, env) +} + +func (r runtimeManagerLogger) EnsureCredentialHelpers(ctx context.Context) error { + return r.rm.EnsureCredentialHelpers(mvl.WithInfo(ctx, r)) +} + +func (r runtimeManagerLogger) SetUpCredentialHelpers(_ context.Context, _ *config.CLIConfig, _ []string) error { + panic("not implemented") +} From e83a2155ae6b1eb891d2d00fedee0abe507878e1 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 18 Jun 2024 16:10:41 -0700 Subject: [PATCH 02/22] chore: allow nested gptscript to run sys.chat.finish --- go.mod | 2 +- go.sum | 4 +- pkg/builtin/builtin.go | 12 +---- pkg/engine/cmd.go | 27 ++++++++-- pkg/engine/engine.go | 15 ++++++ pkg/input/input.go | 10 ++++ pkg/runner/runner.go | 12 ++++- pkg/system/prompt.go | 17 +++++-- pkg/tests/runner_test.go | 20 +++++++- pkg/tests/testdata/TestAgents/call1.golden | 20 +++++++- pkg/tests/testdata/TestAgents/call2.golden | 10 +++- pkg/tests/testdata/TestAgents/call3.golden | 20 +++++++- pkg/tests/testdata/TestAgents/step1.golden | 50 +++++++++++++++++-- pkg/tests/testdata/TestAsterick/call1.golden | 14 ++---- .../testdata/TestContextSubChat/call1.golden | 10 +++- .../testdata/TestContextSubChat/call4.golden | 10 +++- .../testdata/TestContextSubChat/call6.golden | 10 +++- .../testdata/TestContextSubChat/call9.golden | 10 +++- .../testdata/TestContextSubChat/step1.golden | 10 +++- .../testdata/TestContextSubChat/step3.golden | 10 +++- .../testdata/TestDualSubChat/call1.golden | 20 +++++++- .../testdata/TestDualSubChat/call7.golden | 20 +++++++- .../testdata/TestDualSubChat/step1.golden | 20 +++++++- .../testdata/TestDualSubChat/step2.golden | 20 +++++++- .../testdata/TestDualSubChat/step3.golden | 20 +++++++- pkg/tests/testdata/TestExport/call1.golden | 21 +++----- pkg/tests/testdata/TestExport/call3.golden | 21 +++----- .../testdata/TestExportContext/call1.golden | 7 +-- pkg/tests/testdata/TestSubChat/call1.golden | 10 +++- pkg/tests/testdata/TestToolAs/call1.golden | 14 ++---- pkg/types/tool.go | 2 + 31 files changed, 364 insertions(+), 104 deletions(-) diff --git a/go.mod b/go.mod index c57862eb..f903e27e 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 - github.com/gptscript-ai/tui v0.0.0-20240618175050-a1d627a00cff + github.com/gptscript-ai/tui v0.0.0-20240618230843-2b5961f3341b github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index ab056ef8..1ab5607d 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/go-gptscript v0.0.0-20240613214812-8111c2b02d71 h1:WehkkausLuXI91ePpIVrzZ6eBmfFIU/HfNsSA1CHiwo= github.com/gptscript-ai/go-gptscript v0.0.0-20240613214812-8111c2b02d71/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240618175050-a1d627a00cff h1:mjcUKZ4hHVpT8EkyIsxGpU608BTNko0EovpsY0fGfvU= -github.com/gptscript-ai/tui v0.0.0-20240618175050-a1d627a00cff/go.mod h1:ZlyM+BRiD6mV04w+Xw2mXP1VKGEUbn8BvwrosWlplUo= +github.com/gptscript-ai/tui v0.0.0-20240618230843-2b5961f3341b h1:OJfmpDQ/6ffz5P4UdJJEd5xeqo2dfWnsg1YZLDqJWYo= +github.com/gptscript-ai/tui v0.0.0-20240618230843-2b5961f3341b/go.mod h1:ZlyM+BRiD6mV04w+Xw2mXP1VKGEUbn8BvwrosWlplUo= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index a4aeb74e..989f523c 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -593,14 +593,6 @@ func SysGetenv(_ context.Context, env []string, input string) (string, error) { return value, nil } -type ErrChatFinish struct { - Message string -} - -func (e *ErrChatFinish) Error() string { - return fmt.Sprintf("CHAT FINISH: %s", e.Message) -} - func invalidArgument(input string, err error) string { return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) } @@ -640,11 +632,11 @@ func SysChatFinish(_ context.Context, _ []string, input string) (string, error) Message string `json:"return,omitempty"` } if err := json.Unmarshal([]byte(input), ¶ms); err != nil { - return "", &ErrChatFinish{ + return "", &engine.ErrChatFinish{ Message: input, } } - return "", &ErrChatFinish{ + return "", &engine.ErrChatFinish{ Message: params.Message, } } diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 48b2d2b8..cb88c1d6 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -25,6 +25,24 @@ var requiredFileExtensions = map[string]string{ "powershell": "*.ps1", } +type outputWriter struct { + id string + progress chan<- types.CompletionStatus + buf bytes.Buffer +} + +func (o *outputWriter) Write(p []byte) (n int, err error) { + o.buf.Write(p) + o.progress <- types.CompletionStatus{ + CompletionID: o.id, + PartialResponse: &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: types.Text(o.buf.String()), + }, + } + return len(p), nil +} + func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCategory ToolCategory) (cmdOut string, cmdErr error) { id := counter.Next() @@ -74,7 +92,10 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate output := &bytes.Buffer{} all := &bytes.Buffer{} cmd.Stderr = io.MultiWriter(all, os.Stderr) - cmd.Stdout = io.MultiWriter(all, output) + cmd.Stdout = io.MultiWriter(all, output, &outputWriter{ + id: id, + progress: e.Progress, + }) if err := cmd.Run(); err != nil { if toolCategory == NoCategory { @@ -85,7 +106,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate return "", fmt.Errorf("ERROR: %s: %w", all, err) } - return output.String(), nil + return output.String(), IsChatFinishMessage(output.String()) } func (e *Engine) getRuntimeEnv(ctx context.Context, tool types.Tool, cmd, env []string) ([]string, error) { @@ -161,7 +182,7 @@ func appendInputAsEnv(env []string, input string) []string { } } - env = appendEnv(env, "GPTSCRIPT_INPUT", input) + env = appendEnv(env, "GPTSCRIPT_INPUT_CONTENT", input) return env } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 2a4297d7..2dd17b1f 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -106,6 +106,21 @@ type InputContext struct { Content string `json:"content,omitempty"` } +type ErrChatFinish struct { + Message string +} + +func (e *ErrChatFinish) Error() string { + return fmt.Sprintf("CHAT FINISH: %s", e.Message) +} + +func IsChatFinishMessage(msg string) error { + if msg, ok := strings.CutPrefix(msg, "CHAT FINISH: "); ok { + return &ErrChatFinish{Message: msg} + } + return nil +} + func (c *Context) ParentID() string { if c.Parent == nil { return "" diff --git a/pkg/input/input.go b/pkg/input/input.go index 80bb49ee..0037fa5e 100644 --- a/pkg/input/input.go +++ b/pkg/input/input.go @@ -4,9 +4,11 @@ import ( "fmt" "io" "os" + "path/filepath" "strings" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/types" ) func FromArgs(args []string) string { @@ -31,6 +33,14 @@ func FromFile(file string) (string, error) { } return string(data), nil } else if file != "" { + if s, err := os.Stat(file); err == nil && s.IsDir() { + for _, ext := range types.DefaultFiles { + if _, err := os.Stat(filepath.Join(file, ext)); err == nil { + file = filepath.Join(file, ext) + break + } + } + } log.Debugf("reading file %s", file) data, err := os.ReadFile(file) if err != nil { diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 92dc36e3..fbb4622c 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -131,6 +131,16 @@ type ChatState interface{} func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Program, env []string, input string) (resp ChatResponse, err error) { var state *State + defer func() { + if finish := (*engine.ErrChatFinish)(nil); errors.As(err, &finish) { + resp = ChatResponse{ + Done: true, + Content: err.Error(), + } + err = nil + } + }() + if prevState != nil { switch v := prevState.(type) { case *State: @@ -568,7 +578,7 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s ) state, callResults, err = r.subCalls(callCtx, monitor, env, state, callCtx.ToolCategory) - if errMessage := (*builtin.ErrChatFinish)(nil); errors.As(err, &errMessage) && callCtx.Tool.Chat { + if errMessage := (*engine.ErrChatFinish)(nil); errors.As(err, &errMessage) && callCtx.Tool.Chat { return &State{ Result: &errMessage.Message, }, nil diff --git a/pkg/system/prompt.go b/pkg/system/prompt.go index 72858c93..98b2535a 100644 --- a/pkg/system/prompt.go +++ b/pkg/system/prompt.go @@ -24,19 +24,30 @@ You don't move to the next step until you have a result. // DefaultPromptParameter is used as the key in a json map to indication that we really wanted // to just send pure text but the interface required JSON (as that is the fundamental interface of tools in OpenAI) -var DefaultPromptParameter = "defaultPromptParameter" +var DefaultPromptParameter = "prompt" var DefaultToolSchema = openapi3.Schema{ Type: &openapi3.Types{"object"}, Properties: openapi3.Schemas{ DefaultPromptParameter: &openapi3.SchemaRef{ Value: &openapi3.Schema{ - Description: "Prompt to send to the tool or assistant. This may be instructions or question.", + Description: "Prompt to send to the tool. This may be an instruction or question.", + Type: &openapi3.Types{"string"}, + }, + }, + }, +} + +var DefaultChatSchema = openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + DefaultPromptParameter: &openapi3.SchemaRef{ + Value: &openapi3.Schema{ + Description: "Prompt to send to the assistant. This may be an instruction or question.", Type: &openapi3.Types{"string"}, }, }, }, - Required: []string{DefaultPromptParameter}, } func init() { diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 9470662c..fb698a0e 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -319,7 +319,15 @@ func TestSubChat(t *testing.T) { "function": { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -435,7 +443,15 @@ func TestSubChat(t *testing.T) { "function": { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/call1.golden b/pkg/tests/testdata/TestAgents/call1.golden index a4c9d565..c0465ac7 100644 --- a/pkg/tests/testdata/TestAgents/call1.golden +++ b/pkg/tests/testdata/TestAgents/call1.golden @@ -6,14 +6,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/call2.golden b/pkg/tests/testdata/TestAgents/call2.golden index b7f53512..a4b53537 100644 --- a/pkg/tests/testdata/TestAgents/call2.golden +++ b/pkg/tests/testdata/TestAgents/call2.golden @@ -6,7 +6,15 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/call3.golden b/pkg/tests/testdata/TestAgents/call3.golden index 5b6b3b87..4a001215 100644 --- a/pkg/tests/testdata/TestAgents/call3.golden +++ b/pkg/tests/testdata/TestAgents/call3.golden @@ -6,14 +6,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent3", "name": "agent3", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/step1.golden b/pkg/tests/testdata/TestAgents/step1.golden index 59f11e22..423069b5 100644 --- a/pkg/tests/testdata/TestAgents/step1.golden +++ b/pkg/tests/testdata/TestAgents/step1.golden @@ -14,14 +14,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -92,7 +108,15 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -154,14 +178,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent3", "name": "agent3", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAsterick/call1.golden b/pkg/tests/testdata/TestAsterick/call1.golden index c530c3ba..d6d56df9 100644 --- a/pkg/tests/testdata/TestAsterick/call1.golden +++ b/pkg/tests/testdata/TestAsterick/call1.golden @@ -7,14 +7,11 @@ "name": "afoo", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "a", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestContextSubChat/call1.golden b/pkg/tests/testdata/TestContextSubChat/call1.golden index f976d1c8..09da7a95 100644 --- a/pkg/tests/testdata/TestContextSubChat/call1.golden +++ b/pkg/tests/testdata/TestContextSubChat/call1.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/call4.golden b/pkg/tests/testdata/TestContextSubChat/call4.golden index 4fb1d2bb..abe24599 100644 --- a/pkg/tests/testdata/TestContextSubChat/call4.golden +++ b/pkg/tests/testdata/TestContextSubChat/call4.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/call6.golden b/pkg/tests/testdata/TestContextSubChat/call6.golden index f976d1c8..09da7a95 100644 --- a/pkg/tests/testdata/TestContextSubChat/call6.golden +++ b/pkg/tests/testdata/TestContextSubChat/call6.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/call9.golden b/pkg/tests/testdata/TestContextSubChat/call9.golden index 0cb1f56a..37f67e56 100644 --- a/pkg/tests/testdata/TestContextSubChat/call9.golden +++ b/pkg/tests/testdata/TestContextSubChat/call9.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/step1.golden b/pkg/tests/testdata/TestContextSubChat/step1.golden index 1a675b28..464efb36 100644 --- a/pkg/tests/testdata/TestContextSubChat/step1.golden +++ b/pkg/tests/testdata/TestContextSubChat/step1.golden @@ -13,7 +13,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/step3.golden b/pkg/tests/testdata/TestContextSubChat/step3.golden index d57035d5..3c365c54 100644 --- a/pkg/tests/testdata/TestContextSubChat/step3.golden +++ b/pkg/tests/testdata/TestContextSubChat/step3.golden @@ -54,7 +54,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/call1.golden b/pkg/tests/testdata/TestDualSubChat/call1.golden index a0c21ccc..267690fe 100644 --- a/pkg/tests/testdata/TestDualSubChat/call1.golden +++ b/pkg/tests/testdata/TestDualSubChat/call1.golden @@ -5,14 +5,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/call7.golden b/pkg/tests/testdata/TestDualSubChat/call7.golden index 60842c68..5f1d227a 100644 --- a/pkg/tests/testdata/TestDualSubChat/call7.golden +++ b/pkg/tests/testdata/TestDualSubChat/call7.golden @@ -5,14 +5,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/step1.golden b/pkg/tests/testdata/TestDualSubChat/step1.golden index 421bb069..46932939 100644 --- a/pkg/tests/testdata/TestDualSubChat/step1.golden +++ b/pkg/tests/testdata/TestDualSubChat/step1.golden @@ -13,14 +13,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/step2.golden b/pkg/tests/testdata/TestDualSubChat/step2.golden index 82f1a7cd..2e1511c7 100644 --- a/pkg/tests/testdata/TestDualSubChat/step2.golden +++ b/pkg/tests/testdata/TestDualSubChat/step2.golden @@ -13,14 +13,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/step3.golden b/pkg/tests/testdata/TestDualSubChat/step3.golden index ab2dc7c3..d21249c3 100644 --- a/pkg/tests/testdata/TestDualSubChat/step3.golden +++ b/pkg/tests/testdata/TestDualSubChat/step3.golden @@ -13,14 +13,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestExport/call1.golden b/pkg/tests/testdata/TestExport/call1.golden index bc35465f..8b360d8a 100644 --- a/pkg/tests/testdata/TestExport/call1.golden +++ b/pkg/tests/testdata/TestExport/call1.golden @@ -7,14 +7,11 @@ "name": "frommain", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "parentLocal", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -43,14 +37,11 @@ "name": "transient", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestExport/call3.golden b/pkg/tests/testdata/TestExport/call3.golden index 3b76c8af..a653821a 100644 --- a/pkg/tests/testdata/TestExport/call3.golden +++ b/pkg/tests/testdata/TestExport/call3.golden @@ -7,14 +7,11 @@ "name": "frommain", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "parentLocal", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -43,14 +37,11 @@ "name": "transient", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestExportContext/call1.golden b/pkg/tests/testdata/TestExportContext/call1.golden index 5476a745..2e1d2421 100644 --- a/pkg/tests/testdata/TestExportContext/call1.golden +++ b/pkg/tests/testdata/TestExportContext/call1.golden @@ -7,14 +7,11 @@ "name": "subtool", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestSubChat/call1.golden b/pkg/tests/testdata/TestSubChat/call1.golden index dc161663..53104a4e 100644 --- a/pkg/tests/testdata/TestSubChat/call1.golden +++ b/pkg/tests/testdata/TestSubChat/call1.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestToolAs/call1.golden b/pkg/tests/testdata/TestToolAs/call1.golden index e7ec18b5..519f07b8 100644 --- a/pkg/tests/testdata/TestToolAs/call1.golden +++ b/pkg/tests/testdata/TestToolAs/call1.golden @@ -7,14 +7,11 @@ "name": "local", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "remote", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/types/tool.go b/pkg/types/tool.go index fe137e6a..6c016d82 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -601,6 +601,8 @@ func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (re args := subTool.Parameters.Arguments if args == nil && !subTool.IsCommand() && !subTool.Chat { args = &system.DefaultToolSchema + } else if args == nil && !subTool.IsCommand() { + args = &system.DefaultChatSchema } if subTool.Instructions == "" { From 837736aa6a9b5b954aa070004aec679481e0e143 Mon Sep 17 00:00:00 2001 From: Craig Jellick Date: Tue, 18 Jun 2024 17:34:22 -0700 Subject: [PATCH 03/22] chore: unpin ui version post-release Signed-off-by: Craig Jellick --- pkg/cli/gptscript.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index d17afe45..658682be 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -329,7 +329,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { - args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v0.8.3")}, args...) + args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v2")}, args...) // If args has more than one element, then the user has provided a file. if len(args) > 1 { From 85047e1160611ec522e76ab3aefb31f3ba094049 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 19 Jun 2024 08:45:50 -0700 Subject: [PATCH 04/22] bug: fix regression in changing GPTSCRIPT_INPUT env var --- pkg/cli/gptscript.go | 17 +++++++++-------- pkg/engine/cmd.go | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 658682be..fbac48a0 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -45,14 +45,15 @@ type GPTScript struct { CacheOptions OpenAIOptions DisplayOptions - Color *bool `usage:"Use color in output (default true)" default:"true"` - Confirm bool `usage:"Prompt before running potentially dangerous commands"` - Debug bool `usage:"Enable debug logging"` - NoTrunc bool `usage:"Do not truncate long log messages"` - Quiet *bool `usage:"No output logging (set --quiet=false to force on even when there is no TTY)" short:"q"` - Output string `usage:"Save output to a file, or - for stdout" short:"o"` - EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f"` + Color *bool `usage:"Use color in output (default true)" default:"true"` + Confirm bool `usage:"Prompt before running potentially dangerous commands"` + Debug bool `usage:"Enable debug logging"` + NoTrunc bool `usage:"Do not truncate long log messages"` + Quiet *bool `usage:"No output logging (set --quiet=false to force on even when there is no TTY)" short:"q"` + Output string `usage:"Save output to a file, or - for stdout" short:"o"` + EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` + // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` ListModels bool `usage:"List the models available and exit" local:"true"` diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index cb88c1d6..4a697c69 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -182,7 +182,7 @@ func appendInputAsEnv(env []string, input string) []string { } } - env = appendEnv(env, "GPTSCRIPT_INPUT_CONTENT", input) + env = appendEnv(env, "GPTSCRIPT_INPUT", input) return env } From 641c682b7fd5c0c2ca476df0f146058d82c1c1c0 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Wed, 29 May 2024 18:46:25 -0400 Subject: [PATCH 05/22] test: add smoke test github workflow --- .github/workflows/smoke.yaml | 179 +++++++++++++++++++++++++++++++++++ Makefile | 5 +- 2 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/smoke.yaml diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml new file mode 100644 index 00000000..447bffd1 --- /dev/null +++ b/.github/workflows/smoke.yaml @@ -0,0 +1,179 @@ +name: test + +on: + pull_request_target: + types: [opened, synchronize, reopened] + branches: + - main + push: + branches: + - main + paths-ignore: + - docs/** + workflow_dispatch: + +jobs: + check-label: + runs-on: ubuntu-22.04 + outputs: + run_smoke_tests: ${{ steps.check.outputs.run_smoke_tests }} + steps: + - name: Check if PR author is a member of the organization or has the run-smoke label + id: check + run: | + case "${{ github.event_name }}" in + push) + # Run smoke tests for push to base repo + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + ;; + workflow_dispatch) + # Run smoke tests for manual runs against base branch + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + ;; + pull_request_target) + ORG="gptscript-ai" + AUTHOR="${{ github.event.pull_request.user.login }}" + + # Check for org membership + MEMBERSHIP_RESPONSE_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/orgs/$ORG/members/$AUTHOR") + + if [ "$MEMBERSHIP_RESPONSE_CODE" -eq 204 ]; then + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + fi + + # Check for "run-smoke" label + LABELS=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository_owner }}/${{ github.event.repository.name }}/issues/${{ github.event.pull_request.number }}/labels" | jq -r '.[].name') + if echo "$LABELS" | grep -q "run-smoke"; then + # Run smoke tests for PR with the "run-smoke" label + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + fi + + ;; + esac + + echo "run_smoke_tests=false" >> $GITHUB_OUTPUT + + smoke-gpt-4o-2024-05-13: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: gpt-4o-2024-05-13 + name: Run smoke test for gpt-4o-2024-05-13 + run: | + echo "Running smoke test for model gpt-4o-2024-05-13" + export PATH="$(pwd)/bin:${PATH}" + make smoke + + smoke-gpt-4-turbo-2024-04-09: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: gpt-4-turbo-2024-04-09 + name: Run smoke test for gpt-4-turbo-2024-04-09 + run: | + echo "Running smoke test for model gpt-4-turbo-2024-04-09" + export PATH="$(pwd)/bin:${PATH}" + make smoke + + smoke-claude-3-opus-20240229: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta + ANTHROPIC_API_KEY: ${{ secrets.SMOKE_ANTHROPIC_API_KEY }} + name: Run smoke test for claude-3-opus-20240229 + run: | + echo "Running smoke test for model claude-3-opus-20240229" + export PATH="$(pwd)/bin:${PATH}" + make smoke + + smoke-mistral-large-2402: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: mistral-large-2402 from https://api.mistral.ai/v1 + GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY: ${{ secrets.SMOKE_GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY }} + name: Run smoke test for mistral-large-2402 + run: | + echo "Running smoke test for model mistral-large-2402" + export PATH="$(pwd)/bin:${PATH}" + make smoke + diff --git a/Makefile b/Makefile index 0186125d..8faafda5 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,9 @@ tidy: test: go test -v ./... +smoke: + go test -v -tags='smoke' ./pkg/tests/smoke/... + GOLANGCI_LINT_VERSION ?= v1.59.0 lint: if ! command -v golangci-lint &> /dev/null; then \ @@ -52,4 +55,4 @@ validate-docs: echo "Encountered dirty repo!"; \ git diff; \ exit 1 \ - ;fi \ No newline at end of file + ;fi From 825bf2843054202f9b410300a762c0d30ed45754 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 3 Jun 2024 00:45:44 -0400 Subject: [PATCH 06/22] test: add smoke test runner and initial test cases Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- Makefile | 1 + go.mod | 2 +- pkg/tests/judge/judge.go | 127 + pkg/tests/smoke/smoke_test.go | 179 + pkg/tests/smoke/testdata/.gitignore | 3 + .../Bob/claude-3-opus-20240229-expected.json | 1058 ++++++ .../Bob/gpt-4-turbo-2024-04-09-expected.json | 2905 +++++++++++++++++ .../Bob/gpt-4o-2024-05-13-expected.json | 2829 ++++++++++++++++ .../Bob/mistral-large-2402-expected.json | 2608 +++++++++++++++ pkg/tests/smoke/testdata/Bob/test.gpt | 10 + .../claude-3-opus-20240229-expected.json | 819 +++++ .../gpt-4-turbo-2024-04-09-expected.json | 1694 ++++++++++ .../gpt-4o-2024-05-13-expected.json | 1953 +++++++++++ .../mistral-large-2402-expected.json | 1692 ++++++++++ pkg/tests/smoke/testdata/BobAsShell/test.gpt | 13 + 15 files changed, 15892 insertions(+), 1 deletion(-) create mode 100644 pkg/tests/judge/judge.go create mode 100644 pkg/tests/smoke/smoke_test.go create mode 100644 pkg/tests/smoke/testdata/.gitignore create mode 100644 pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/test.gpt create mode 100644 pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/test.gpt diff --git a/Makefile b/Makefile index 8faafda5..0fed6344 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,7 @@ tidy: test: go test -v ./... +smoke: build smoke: go test -v -tags='smoke' ./pkg/tests/smoke/... diff --git a/go.mod b/go.mod index f903e27e..4ab2b395 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( golang.org/x/sync v0.7.0 golang.org/x/term v0.20.0 gopkg.in/yaml.v3 v3.0.1 + gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 ) @@ -107,6 +108,5 @@ require ( golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect golang.org/x/tools v0.20.0 // indirect - gotest.tools/v3 v3.5.1 // indirect mvdan.cc/gofumpt v0.6.0 // indirect ) diff --git a/pkg/tests/judge/judge.go b/pkg/tests/judge/judge.go new file mode 100644 index 00000000..656d77a4 --- /dev/null +++ b/pkg/tests/judge/judge.go @@ -0,0 +1,127 @@ +package judge + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/getkin/kin-openapi/openapi3gen" + openai "github.com/gptscript-ai/chat-completion-client" +) + +const instructions = `When given JSON objects that conform to the following JSONSchema: + +%s + +Determine if "actual" is equal to "expected" based on the comparison constraints described by "criteria". +"actual" is considered equal to "expected" if and only if the all of the constraints described by "criteria" are satisfied. + +After making a determination, respond with a JSON object that conforms to the following JSONSchema: + +{ + "name": "ruling", + "type": "object", + "properties": { + "equal": { + "type": "boolean", + "description": "Set to true if and only if actual is considered equal to expected." + }, + "reasoning": { + "type": "string", + "description": "The reasoning used to come to the determination, that points out all instances where the given criteria was violated" + } + }, + "required": [ + "equal", + "reasoning" + ] +} + +Your responses are concise and include only the json object described above. +` + +type Judge[T any] struct { + client *openai.Client + instructions string +} + +type comparison[T any] struct { + Expected T `json:"expected"` + Actual T `json:"actual"` + Criteria string `json:"criteria"` +} + +type ruling struct { + Equal bool `json:"equal"` + Reasoning string `json:"reasoning"` +} + +func New[T any](client *openai.Client) (*Judge[T], error) { + schema, err := openapi3gen.NewSchemaRefForValue( + new(comparison[T]), + nil, + openapi3gen.CreateComponentSchemas( + openapi3gen.ExportComponentSchemasOptions{ + ExportComponentSchemas: true, + ExportGenerics: false, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed to generate JSONSchema for %T: %w", new(T), err) + } + + schemaJSON, err := json.MarshalIndent(schema, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal JSONSchema for %T: %w", new(T), err) + } + + return &Judge[T]{ + client: client, + instructions: fmt.Sprintf(instructions, schemaJSON), + }, nil +} + +func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria string) (equal bool, reasoning string, err error) { + comparisonJSON, err := json.MarshalIndent(&comparison[T]{ + Expected: expected, + Actual: actual, + Criteria: criteria, + }, "", " ") + if err != nil { + return false, "", fmt.Errorf("failed to marshal judge testcase JSON: %w", err) + } + + request := openai.ChatCompletionRequest{ + Model: openai.GPT4o, + Temperature: new(float32), + N: 1, + ResponseFormat: &openai.ChatCompletionResponseFormat{ + Type: openai.ChatCompletionResponseFormatTypeJSONObject, + }, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: j.instructions, + }, + { + Role: openai.ChatMessageRoleUser, + Content: string(comparisonJSON), + }, + }, + } + response, err := j.client.CreateChatCompletion(ctx, request) + if err != nil { + return false, "", fmt.Errorf("failed to make judge chat completion request: %w", err) + } + + if len(response.Choices) < 1 { + return false, "", fmt.Errorf("judge chat completion request returned no choices") + } + + var equality ruling + if err := json.Unmarshal([]byte(response.Choices[0].Message.Content), &equality); err != nil { + return false, "", fmt.Errorf("failed to unmarshal judge ruling: %w", err) + } + + return equality.Equal, equality.Reasoning, nil +} diff --git a/pkg/tests/smoke/smoke_test.go b/pkg/tests/smoke/smoke_test.go new file mode 100644 index 00000000..39ec1cf5 --- /dev/null +++ b/pkg/tests/smoke/smoke_test.go @@ -0,0 +1,179 @@ +//go:build smoke + +package smoke + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + openai "github.com/gptscript-ai/chat-completion-client" + "github.com/gptscript-ai/gptscript/pkg/runner" + "github.com/gptscript-ai/gptscript/pkg/tests/judge" + "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/samber/lo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gotest.tools/v3/icmd" +) + +const defaultModelEnvVar = "GPTSCRIPT_DEFAULT_MODEL" + +func TestSmoke(t *testing.T) { + client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) + smokeJudge, err := judge.New[[]event](client) + require.NoError(t, err, "error initializing smoke test judge") + + for _, tc := range getTestcases(t) { + t.Run(tc.name, func(t *testing.T) { + cmd := icmd.Command( + "gptscript", + "--color=false", + "--disable-cache", + "--events-stream-to", + tc.actualEventsFile, + "--default-model", + tc.defaultModel, + tc.gptFile, + ) + + result := icmd.RunCmd(cmd) + defer func() { + t.Helper() + assert.NoError(t, os.Remove(tc.actualEventsFile)) + }() + + require.NoError(t, result.Error, "stderr: %q", result.Stderr()) + require.Zero(t, result.ExitCode) + + var ( + actualEvents = getActualEvents(t, tc.actualEventsFile) + expectedEvents = make([]event, 0) + ) + f, err := os.Open(tc.expectedEventsFile) + if os.IsNotExist(err) { + // No expected events found, store the results of the latest call as the golden file for future tests runs + f, err := os.Create(tc.expectedEventsFile) + require.NoError(t, err) + defer f.Close() + + encoder := json.NewEncoder(f) + encoder.SetIndent("", " ") + require.NoError(t, encoder.Encode(actualEvents)) + t.Skipf("Generated initial golden file %q, skipping test", tc.expectedEventsFile) + } else { + require.NoError(t, err) + defer f.Close() + + decoder := json.NewDecoder(f) + require.NoError(t, decoder.Decode(&expectedEvents)) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + equal, reasoning, err := smokeJudge.Equal( + ctx, + expectedEvents, + actualEvents, + `The field values of the elements of expected and actual must be roughly equivalent. +Ignore variations in timestamps, IDs, and verbiage when determining equivalence.`, + ) + require.NoError(t, err, "error getting judge ruling on output") + require.True(t, equal, reasoning) + t.Logf("reasoning: %q", reasoning) + }) + } +} + +type testcase struct { + name string + dir string + gptFile string + defaultModel string + modelName string + env []string + actualEventsFile string + expectedEventsFile string +} + +func getTestcases(t *testing.T) []testcase { + t.Helper() + + defaultModel := os.Getenv(defaultModelEnvVar) + modelName := strings.Split(defaultModel, " ")[0] + + var testcases []testcase + for _, d := range lo.Must(os.ReadDir("testdata")) { + if !d.IsDir() { + continue + } + var ( + dirName = d.Name() + dir = filepath.Join("testdata", dirName) + ) + + files, err := os.ReadDir(dir) + require.NoError(t, err, "failed to get testdata dir %q", dir) + + for _, f := range files { + if f.IsDir() || filepath.Ext(f.Name()) != ".gpt" { + continue + } + + testcases = append(testcases, testcase{ + name: dirName, + dir: dir, + gptFile: filepath.Join(dir, f.Name()), + defaultModel: defaultModel, + modelName: modelName, + expectedEventsFile: filepath.Join(dir, fmt.Sprintf("%s-expected.json", modelName)), + actualEventsFile: filepath.Join(dir, fmt.Sprintf("%s.json", modelName)), + }) + + // Only take the first .gpt file in each testcase directory + break + } + } + + return testcases +} + +type event struct { + runner.Event + ChatRequest *openai.ChatCompletionRequest `json:"chatRequest,omitempty"` + ChatResponse *types.CompletionMessage `json:"chatResponse,omitempty"` +} + +func getActualEvents(t *testing.T, eventsFile string) []event { + t.Helper() + + f, err := os.Open(eventsFile) + require.NoError(t, err) + defer f.Close() + + var ( + events []event + scanner = bufio.NewScanner(f) + ) + for scanner.Scan() { + line := scanner.Text() + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + + var e event + require.NoError(t, json.Unmarshal([]byte(line), &e)) + events = append(events, e) + } + + require.NoError(t, scanner.Err()) + + return events +} diff --git a/pkg/tests/smoke/testdata/.gitignore b/pkg/tests/smoke/testdata/.gitignore new file mode 100644 index 00000000..292de8e7 --- /dev/null +++ b/pkg/tests/smoke/testdata/.gitignore @@ -0,0 +1,3 @@ +# Ignore intermediate event stream JSON files +*-events.json + diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json new file mode 100644 index 00000000..fd9c27b4 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json @@ -0,0 +1,1058 @@ +[ + { + "time": "2024-06-18T11:31:24.183335-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:24.183735-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:25.851994-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:25.852381-04:00", + "callContext": { + "id": "1718724686", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:25.852533-04:00", + "callContext": { + "id": "1718724687", + "tool": { + "name": "claude3-anthropic-credential", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", + "localTools": { + "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "credential", + "Name": "tool.gpt", + "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" + }, + "inputContext": null, + "toolCategory": "credential", + "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "parentID": "1718724686", + "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:25.855621-04:00", + "callContext": { + "id": "1718724687", + "tool": { + "name": "claude3-anthropic-credential", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", + "localTools": { + "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "credential", + "Name": "tool.gpt", + "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" + }, + "inputContext": null, + "toolCategory": "credential", + "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "parentID": "1718724686", + "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" + }, + "type": "callChat", + "chatCompletionId": "1718724688", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-18T11:31:25.948224-04:00", + "callContext": { + "id": "1718724687", + "tool": { + "name": "claude3-anthropic-credential", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", + "localTools": { + "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "credential", + "Name": "tool.gpt", + "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" + }, + "inputContext": null, + "toolCategory": "credential", + "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "parentID": "1718724686", + "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" + }, + "type": "callChat", + "chatCompletionId": "1718724688", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:25.948393-04:00", + "callContext": { + "id": "1718724687", + "tool": { + "name": "claude3-anthropic-credential", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", + "localTools": { + "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "credential", + "Name": "tool.gpt", + "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" + }, + "inputContext": null, + "toolCategory": "credential", + "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "parentID": "1718724686", + "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" + }, + "type": "callFinish", + "usage": {} + }, + { + "time": "2024-06-18T11:31:26.96565-04:00", + "callContext": { + "id": "1718724686", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:11059" + }, + { + "time": "2024-06-18T11:31:26.965764-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-06-18T11:31:26.965828-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724689", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-18T11:31:26.966331-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724689", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-18T11:31:35.503953-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724689", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how he is doing\"}" + }, + { + "time": "2024-06-18T11:31:35.505033-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724689", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how he is doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:35.505135-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01NJaY1g8kxqQkA4wbFTGtSX": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\": \"how he is doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-18T11:31:35.505164-04:00", + "callContext": { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724685" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how he is doing\"}" + }, + { + "time": "2024-06-18T11:31:35.666461-04:00", + "callContext": { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724685" + }, + "type": "callChat", + "chatCompletionId": "1718724690", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\": \"how he is doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-18T11:31:35.666692-04:00", + "callContext": { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724685" + }, + "type": "callProgress", + "chatCompletionId": "1718724690", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-18T11:31:38.650768-04:00", + "callContext": { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724685" + }, + "type": "callProgress", + "chatCompletionId": "1718724690", + "usage": {}, + "content": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-18T11:31:38.65205-04:00", + "callContext": { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724685" + }, + "type": "callChat", + "chatCompletionId": "1718724690", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:38.652106-04:00", + "callContext": { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724685" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-18T11:31:38.652143-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-18T11:31:38.847656-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724691", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how he is doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-18T11:31:38.847937-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724691", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-18T11:31:43.784059-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724691", + "usage": {}, + "content": "Bob said exactly: \"Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-18T11:31:43.785727-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724691", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said exactly: \"Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:43.785811-04:00", + "callContext": { + "id": "1718724685", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said exactly: \"Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-18T11:31:43.785827-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json new file mode 100644 index 00000000..d3dd346b --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json @@ -0,0 +1,2905 @@ +[ + { + "time": "2024-06-06T20:22:29.810249-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-06T20:22:29.810505-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-06T20:22:30.00145-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719751", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:22:30.004971-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:22:30.810075-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-06T20:22:30.810238-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-06T20:22:30.844325-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:22:30.844508-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:22:30.895166-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:22:30.895349-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:22:30.945392-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:22:30.945641-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:22:30.965686-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:30.965791-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:30.966161-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:30.966215-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719751", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:30.968104-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719751", + "usage": { + "promptTokens": 143, + "completionTokens": 18, + "totalTokens": 161 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + } + ], + "usage": { + "promptTokens": 143, + "completionTokens": 18, + "totalTokens": 161 + } + } + }, + { + "time": "2024-06-06T20:22:30.968398-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_IfovJOlHh0TrPkSiric0ZYDO": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\":\"How are you doing?\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-06T20:22:30.968468-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:31.152693-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callChat", + "chatCompletionId": "1717719752", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\":\"How are you doing?\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-06T20:22:31.152842-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:22:31.639639-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for" + }, + { + "time": "2024-06-06T20:22:31.639789-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for" + }, + { + "time": "2024-06-06T20:22:31.639837-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-06T20:22:31.639868-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-06T20:22:31.935618-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How" + }, + { + "time": "2024-06-06T20:22:31.935952-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How" + }, + { + "time": "2024-06-06T20:22:31.94189-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing" + }, + { + "time": "2024-06-06T20:22:31.942075-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing" + }, + { + "time": "2024-06-06T20:22:31.942142-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing" + }, + { + "time": "2024-06-06T20:22:31.942199-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I" + }, + { + "time": "2024-06-06T20:22:31.942311-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm" + }, + { + "time": "2024-06-06T20:22:31.942374-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm" + }, + { + "time": "2024-06-06T20:22:31.942437-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing" + }, + { + "time": "2024-06-06T20:22:31.94249-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow" + }, + { + "time": "2024-06-06T20:22:31.94254-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:22:31.942616-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:22:31.977094-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:22:31.977257-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:22:31.977342-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:22:31.977401-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:22:31.977493-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callProgress", + "chatCompletionId": "1717719752", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:22:31.977695-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callChat", + "chatCompletionId": "1717719752", + "usage": { + "promptTokens": 124, + "completionTokens": 18, + "totalTokens": 142 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 124, + "completionTokens": 18, + "totalTokens": 142 + } + } + }, + { + "time": "2024-06-06T20:22:31.977777-04:00", + "callContext": { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719750" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:22:31.977832-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-06T20:22:32.175015-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719753", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "call_IfovJOlHh0TrPkSiric0ZYDO" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:22:32.175386-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:22:32.857803-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:22:32.858022-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said" + }, + { + "time": "2024-06-06T20:22:32.858079-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said" + }, + { + "time": "2024-06-06T20:22:32.858182-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"" + }, + { + "time": "2024-06-06T20:22:32.858219-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"" + }, + { + "time": "2024-06-06T20:22:32.858301-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks" + }, + { + "time": "2024-06-06T20:22:32.882645-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking" + }, + { + "time": "2024-06-06T20:22:32.882856-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking" + }, + { + "time": "2024-06-06T20:22:33.022865-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How" + }, + { + "time": "2024-06-06T20:22:33.023013-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are" + }, + { + "time": "2024-06-06T20:22:33.023086-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are" + }, + { + "time": "2024-06-06T20:22:33.054293-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:22:33.054407-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:22:33.054428-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:22:33.054454-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I" + }, + { + "time": "2024-06-06T20:22:33.054499-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing" + }, + { + "time": "2024-06-06T20:22:33.054584-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + }, + { + "time": "2024-06-06T20:22:33.0546-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + }, + { + "time": "2024-06-06T20:22:33.097034-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:22:33.097195-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:22:33.138931-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:33.139013-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:33.139056-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:33.139073-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:33.139194-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719753", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:33.141011-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719753", + "usage": { + "promptTokens": 186, + "completionTokens": 23, + "totalTokens": 209 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 186, + "completionTokens": 23, + "totalTokens": 209 + } + } + }, + { + "time": "2024-06-06T20:22:33.141051-04:00", + "callContext": { + "id": "1717719750", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:33.141083-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json new file mode 100644 index 00000000..a3524b8c --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json @@ -0,0 +1,2829 @@ +[ + { + "time": "2024-06-06T20:21:47.098949-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-06T20:21:47.099193-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-06T20:21:47.306808-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719709", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:21:47.307426-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:21:47.801272-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-06T20:21:47.802783-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question" + }, + { + "time": "2024-06-06T20:21:47.802866-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:21:47.802893-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:21:47.802911-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How" + }, + { + "time": "2024-06-06T20:21:47.802929-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:21:47.809391-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:21:47.809508-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:21:47.821939-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:47.822028-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:47.862117-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:47.862256-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719709", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:47.864138-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719709", + "usage": { + "promptTokens": 139, + "completionTokens": 18, + "totalTokens": 157 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + } + ], + "usage": { + "promptTokens": 139, + "completionTokens": 18, + "totalTokens": 157 + } + } + }, + { + "time": "2024-06-06T20:21:47.864426-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_rziWxxpDmGb1tzIullRJT6zj": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\":\"How are you doing?\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-06T20:21:47.864526-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:48.045651-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callChat", + "chatCompletionId": "1717719710", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\":\"How are you doing?\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-06T20:21:48.045992-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:21:48.378921-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-06T20:21:48.379124-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-06T20:21:48.380438-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-06T20:21:48.380536-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-06T20:21:48.486384-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How" + }, + { + "time": "2024-06-06T20:21:48.486644-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How" + }, + { + "time": "2024-06-06T20:21:48.500327-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you" + }, + { + "time": "2024-06-06T20:21:48.500442-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing" + }, + { + "time": "2024-06-06T20:21:48.500489-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing" + }, + { + "time": "2024-06-06T20:21:48.500526-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm" + }, + { + "time": "2024-06-06T20:21:48.500599-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing" + }, + { + "time": "2024-06-06T20:21:48.500634-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great" + }, + { + "time": "2024-06-06T20:21:48.500672-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great" + }, + { + "time": "2024-06-06T20:21:48.521814-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:21:48.521956-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:21:48.597924-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:21:48.598155-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:21:48.598237-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:21:48.598304-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:21:48.598371-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callProgress", + "chatCompletionId": "1717719710", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:21:48.598692-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callChat", + "chatCompletionId": "1717719710", + "usage": { + "promptTokens": 122, + "completionTokens": 17, + "totalTokens": 139 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 122, + "completionTokens": 17, + "totalTokens": 139 + } + } + }, + { + "time": "2024-06-06T20:21:48.598808-04:00", + "callContext": { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719708" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:21:48.59889-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-06T20:21:48.789457-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719711", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "call_rziWxxpDmGb1tzIullRJT6zj" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:21:48.789942-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:21:49.135224-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:21:49.135428-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:21:49.163995-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said" + }, + { + "time": "2024-06-06T20:21:49.164256-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said:" + }, + { + "time": "2024-06-06T20:21:49.192967-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks" + }, + { + "time": "2024-06-06T20:21:49.193161-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks" + }, + { + "time": "2024-06-06T20:21:49.235659-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking" + }, + { + "time": "2024-06-06T20:21:49.235818-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking" + }, + { + "time": "2024-06-06T20:21:49.322521-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How" + }, + { + "time": "2024-06-06T20:21:49.322702-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are" + }, + { + "time": "2024-06-06T20:21:49.322768-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are" + }, + { + "time": "2024-06-06T20:21:49.35099-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:21:49.35118-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:21:49.351215-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:21:49.351246-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm" + }, + { + "time": "2024-06-06T20:21:49.351271-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing" + }, + { + "time": "2024-06-06T20:21:49.35129-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great" + }, + { + "time": "2024-06-06T20:21:49.382283-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow" + }, + { + "time": "2024-06-06T20:21:49.382442-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:21:49.418198-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:21:49.41831-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:21:49.418924-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:49.419001-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:49.419101-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:49.42086-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719711", + "usage": { + "promptTokens": 181, + "completionTokens": 22, + "totalTokens": 203 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 181, + "completionTokens": 22, + "totalTokens": 203 + } + } + }, + { + "time": "2024-06-06T20:21:49.42093-04:00", + "callContext": { + "id": "1717719708", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:49.42097-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json new file mode 100644 index 00000000..a7f0ce37 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json @@ -0,0 +1,2608 @@ +[ + { + "time": "2024-06-06T20:57:45.68913-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-06T20:57:45.689454-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-06T20:57:45.875198-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721867", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:57:45.875466-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721867", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:57:46.081212-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721867", + "usage": {} + }, + { + "time": "2024-06-06T20:57:46.831234-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721867", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:57:46.832764-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721867", + "usage": { + "promptTokens": 86, + "completionTokens": 23, + "totalTokens": 109 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "7sPqe5ERc", + "function": { + "name": "bob", + "arguments": "{\"question\": \"How are you doing?\"}" + } + } + } + ], + "usage": { + "promptTokens": 86, + "completionTokens": 23, + "totalTokens": 109 + } + } + }, + { + "time": "2024-06-06T20:57:46.833114-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "7sPqe5ERc": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\": \"How are you doing?\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-06T20:57:46.833185-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:57:47.015577-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callChat", + "chatCompletionId": "1717721868", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\": \"How are you doing?\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-06T20:57:47.015912-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:57:47.354264-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {} + }, + { + "time": "2024-06-06T20:57:47.403644-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-06T20:57:47.429265-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for" + }, + { + "time": "2024-06-06T20:57:47.444354-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-06T20:57:47.470971-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"" + }, + { + "time": "2024-06-06T20:57:47.492704-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How" + }, + { + "time": "2024-06-06T20:57:47.508504-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are" + }, + { + "time": "2024-06-06T20:57:47.529197-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you" + }, + { + "time": "2024-06-06T20:57:47.547729-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing" + }, + { + "time": "2024-06-06T20:57:47.567629-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?" + }, + { + "time": "2024-06-06T20:57:47.58774-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\"," + }, + { + "time": "2024-06-06T20:57:47.607371-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I" + }, + { + "time": "2024-06-06T20:57:47.627325-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'" + }, + { + "time": "2024-06-06T20:57:47.647745-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm" + }, + { + "time": "2024-06-06T20:57:47.666556-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing" + }, + { + "time": "2024-06-06T20:57:47.686828-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great" + }, + { + "time": "2024-06-06T20:57:47.706183-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow" + }, + { + "time": "2024-06-06T20:57:47.726267-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:57:47.746525-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-06T20:57:47.765116-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:57:47.786832-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:57:47.81109-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callProgress", + "chatCompletionId": "1717721868", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:57:47.812892-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callChat", + "chatCompletionId": "1717721868", + "usage": { + "promptTokens": 41, + "completionTokens": 20, + "totalTokens": 61 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 41, + "completionTokens": 20, + "totalTokens": 61 + } + } + }, + { + "time": "2024-06-06T20:57:47.813082-04:00", + "callContext": { + "id": "7sPqe5ERc", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721866" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-06T20:57:47.813184-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-06T20:57:47.981851-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721869", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "7sPqe5ERc", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"How are you doing?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "7sPqe5ERc" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:57:47.982156-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:57:48.11789-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {} + }, + { + "time": "2024-06-06T20:57:48.189708-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:57:48.205425-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said" + }, + { + "time": "2024-06-06T20:57:48.227965-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said," + }, + { + "time": "2024-06-06T20:57:48.245576-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"" + }, + { + "time": "2024-06-06T20:57:48.266371-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks" + }, + { + "time": "2024-06-06T20:57:48.285452-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for" + }, + { + "time": "2024-06-06T20:57:48.306735-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking" + }, + { + "time": "2024-06-06T20:57:48.330511-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking '" + }, + { + "time": "2024-06-06T20:57:48.347556-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How" + }, + { + "time": "2024-06-06T20:57:48.367554-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are" + }, + { + "time": "2024-06-06T20:57:48.387523-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you" + }, + { + "time": "2024-06-06T20:57:48.408257-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing" + }, + { + "time": "2024-06-06T20:57:48.428538-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?" + }, + { + "time": "2024-06-06T20:57:48.448774-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:57:48.468948-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I" + }, + { + "time": "2024-06-06T20:57:48.488223-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'" + }, + { + "time": "2024-06-06T20:57:48.508672-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm" + }, + { + "time": "2024-06-06T20:57:48.529176-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing" + }, + { + "time": "2024-06-06T20:57:48.550465-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + }, + { + "time": "2024-06-06T20:57:48.572031-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow" + }, + { + "time": "2024-06-06T20:57:48.590327-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:57:48.611919-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-06T20:57:48.630583-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:57:48.653492-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:57:48.665778-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721869", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:57:48.667516-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721869", + "usage": { + "promptTokens": 145, + "completionTokens": 24, + "totalTokens": 169 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 145, + "completionTokens": 24, + "totalTokens": 169 + } + } + }, + { + "time": "2024-06-06T20:57:48.667593-04:00", + "callContext": { + "id": "1717721866", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:57:48.667619-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/test.gpt b/pkg/tests/smoke/testdata/Bob/test.gpt new file mode 100644 index 00000000..2391b615 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/test.gpt @@ -0,0 +1,10 @@ +tools: bob + +Ask Bob how he is doing and let me know exactly what he said. + +--- +name: bob +description: I'm Bob, a friendly guy. +args: question: The question to ask Bob. + +When asked how I am doing, respond with "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json new file mode 100644 index 00000000..326aef09 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json @@ -0,0 +1,819 @@ +[ + { + "time": "2024-06-18T11:31:43.810792-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:43.810985-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:44.177028-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:44.177099-04:00", + "callContext": { + "id": "1718724705", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-18T11:31:45.190929-04:00", + "callContext": { + "id": "1718724705", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:11109" + }, + { + "time": "2024-06-18T11:31:45.19104-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-06-18T11:31:45.191084-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724706", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-18T11:31:45.19133-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724706", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-18T11:31:54.917676-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724706", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how he is doing\"}" + }, + { + "time": "2024-06-18T11:31:54.917887-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724706", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how he is doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:54.917988-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01F3QeAp35HdxoBSEed1gaUJ": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\": \"how he is doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-18T11:31:54.918017-04:00", + "callContext": { + "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724704", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how he is doing\"}" + }, + { + "time": "2024-06-18T11:31:54.91845-04:00", + "callContext": { + "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724704", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718724707", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-18T11:31:54.922608-04:00", + "callContext": { + "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724704", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718724707", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:54.922696-04:00", + "callContext": { + "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718724704", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-18T11:31:54.922726-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-18T11:31:55.096576-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724708", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how he is doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "toolu_01F3QeAp35HdxoBSEed1gaUJ" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-18T11:31:55.097061-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724708", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-18T11:31:58.228157-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718724708", + "usage": {}, + "content": "Bob said exactly: \"Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-18T11:31:58.228613-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718724708", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said exactly: \"Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": {} + } + }, + { + "time": "2024-06-18T11:31:58.228674-04:00", + "callContext": { + "id": "1718724704", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said exactly: \"Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-18T11:31:58.228686-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json new file mode 100644 index 00000000..f81bd202 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json @@ -0,0 +1,1694 @@ +[ + { + "time": "2024-06-06T20:22:33.171056-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-06T20:22:33.171333-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-06T20:22:33.371153-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719755", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:22:33.371528-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:22:34.086505-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question" + }, + { + "time": "2024-06-06T20:22:34.086808-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question" + }, + { + "time": "2024-06-06T20:22:34.08687-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question" + }, + { + "time": "2024-06-06T20:22:34.086895-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:22:34.109771-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:22:34.109961-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:22:34.154694-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:22:34.154833-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:22:34.181671-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:34.18183-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:34.181879-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:34.181915-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719755", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:34.182218-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719755", + "usage": { + "promptTokens": 143, + "completionTokens": 18, + "totalTokens": 161 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + } + ], + "usage": { + "promptTokens": 143, + "completionTokens": 18, + "totalTokens": 161 + } + } + }, + { + "time": "2024-06-06T20:22:34.182557-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_LJKF6bO1dztkg01aWksN4RPJ": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\":\"How are you doing?\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-06T20:22:34.182664-04:00", + "callContext": { + "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719754", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:22:34.184191-04:00", + "callContext": { + "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719754", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1717719756", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-06T20:22:34.192956-04:00", + "callContext": { + "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719754", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1717719756", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-06T20:22:34.193263-04:00", + "callContext": { + "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719754", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-06T20:22:34.193374-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-06T20:22:34.337399-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719757", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "call_LJKF6bO1dztkg01aWksN4RPJ" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:22:34.337825-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:22:35.110166-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:22:35.110412-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:22:35.147735-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said," + }, + { + "time": "2024-06-06T20:22:35.147815-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said," + }, + { + "time": "2024-06-06T20:22:35.245256-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"" + }, + { + "time": "2024-06-06T20:22:35.245466-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I" + }, + { + "time": "2024-06-06T20:22:35.324279-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm" + }, + { + "time": "2024-06-06T20:22:35.324425-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing" + }, + { + "time": "2024-06-06T20:22:35.404608-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow" + }, + { + "time": "2024-06-06T20:22:35.404848-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:22:35.404887-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:22:35.557862-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:22:35.5581-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:22:35.558233-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:35.558308-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:35.558348-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719757", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:35.559946-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719757", + "usage": { + "promptTokens": 185, + "completionTokens": 14, + "totalTokens": 199 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 185, + "completionTokens": 14, + "totalTokens": 199 + } + } + }, + { + "time": "2024-06-06T20:22:35.56022-04:00", + "callContext": { + "id": "1717719754", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:22:35.560294-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json new file mode 100644 index 00000000..3ce00112 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json @@ -0,0 +1,1953 @@ +[ + { + "time": "2024-06-06T20:21:49.457487-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-06T20:21:49.457771-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-06T20:21:49.67064-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719711", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:21:49.670852-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:21:50.142412-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-06T20:21:50.142691-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-06T20:21:50.164329-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:21:50.164636-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-06T20:21:50.182304-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:21:50.182411-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + }, + { + "time": "2024-06-06T20:21:50.210798-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:21:50.210958-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + }, + { + "time": "2024-06-06T20:21:50.220648-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:50.220734-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:50.220753-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:50.220768-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719711", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:50.221021-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719711", + "usage": { + "promptTokens": 139, + "completionTokens": 18, + "totalTokens": 157 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + } + ], + "usage": { + "promptTokens": 139, + "completionTokens": 18, + "totalTokens": 157 + } + } + }, + { + "time": "2024-06-06T20:21:50.221288-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_AZtZBeP4Ofv9CbW8Tiw066Cr": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\":\"How are you doing?\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-06T20:21:50.221358-04:00", + "callContext": { + "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719710", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:21:50.22312-04:00", + "callContext": { + "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719710", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1717719712", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-06T20:21:50.231558-04:00", + "callContext": { + "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719710", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1717719712", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-06T20:21:50.232297-04:00", + "callContext": { + "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717719710", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-06T20:21:50.232391-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-06T20:21:50.398083-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719713", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"How are you doing?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:21:50.398429-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:21:50.873677-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:21:50.873906-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:21:50.875164-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said:" + }, + { + "time": "2024-06-06T20:21:50.875259-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"" + }, + { + "time": "2024-06-06T20:21:50.875299-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"" + }, + { + "time": "2024-06-06T20:21:50.875379-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for" + }, + { + "time": "2024-06-06T20:21:50.875553-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking" + }, + { + "time": "2024-06-06T20:21:50.875607-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking" + }, + { + "time": "2024-06-06T20:21:50.945695-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are" + }, + { + "time": "2024-06-06T20:21:50.945912-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are" + }, + { + "time": "2024-06-06T20:21:50.973355-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?," + }, + { + "time": "2024-06-06T20:21:50.973495-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?," + }, + { + "time": "2024-06-06T20:21:50.973577-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm" + }, + { + "time": "2024-06-06T20:21:50.973604-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm" + }, + { + "time": "2024-06-06T20:21:50.973625-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing" + }, + { + "time": "2024-06-06T20:21:50.973646-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great" + }, + { + "time": "2024-06-06T20:21:50.994229-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:21:50.994426-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:21:51.017952-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:21:51.018129-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:21:51.020818-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:51.020913-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:51.020955-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717719713", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:51.02285-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717719713", + "usage": { + "promptTokens": 180, + "completionTokens": 21, + "totalTokens": 201 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 180, + "completionTokens": 21, + "totalTokens": 201 + } + } + }, + { + "time": "2024-06-06T20:21:51.022991-04:00", + "callContext": { + "id": "1717719710", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:21:51.023016-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json new file mode 100644 index 00000000..7eb9a414 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json @@ -0,0 +1,1692 @@ +[ + { + "time": "2024-06-06T20:57:48.704491-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-06T20:57:48.70479-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-06T20:57:48.929935-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721870", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:57:48.930412-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721870", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:57:49.11122-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721870", + "usage": {} + }, + { + "time": "2024-06-06T20:57:49.641536-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721870", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:57:49.642104-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721870", + "usage": { + "promptTokens": 86, + "completionTokens": 23, + "totalTokens": 109 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "SJR7ytkCE", + "function": { + "name": "bob", + "arguments": "{\"question\": \"How are you doing?\"}" + } + } + } + ], + "usage": { + "promptTokens": 86, + "completionTokens": 23, + "totalTokens": 109 + } + } + }, + { + "time": "2024-06-06T20:57:49.642359-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "SJR7ytkCE": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\": \"How are you doing?\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-06T20:57:49.64244-04:00", + "callContext": { + "id": "SJR7ytkCE", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721869", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"How are you doing?\"}" + }, + { + "time": "2024-06-06T20:57:49.643547-04:00", + "callContext": { + "id": "SJR7ytkCE", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721869", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1717721871", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-06T20:57:49.651156-04:00", + "callContext": { + "id": "SJR7ytkCE", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721869", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1717721871", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-06T20:57:49.651393-04:00", + "callContext": { + "id": "SJR7ytkCE", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1717721869", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-06T20:57:49.651461-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-06T20:57:49.851552-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721872", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob how he is doing and let me know exactly what he said." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "SJR7ytkCE", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"How are you doing?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "SJR7ytkCE" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-06T20:57:49.851831-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-06T20:57:50.017629-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {} + }, + { + "time": "2024-06-06T20:57:50.060377-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-06T20:57:50.084824-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said" + }, + { + "time": "2024-06-06T20:57:50.104197-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said," + }, + { + "time": "2024-06-06T20:57:50.126794-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"" + }, + { + "time": "2024-06-06T20:57:50.147974-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks" + }, + { + "time": "2024-06-06T20:57:50.17264-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for" + }, + { + "time": "2024-06-06T20:57:50.194235-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking" + }, + { + "time": "2024-06-06T20:57:50.218937-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking '" + }, + { + "time": "2024-06-06T20:57:50.239766-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How" + }, + { + "time": "2024-06-06T20:57:50.261815-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are" + }, + { + "time": "2024-06-06T20:57:50.28776-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you" + }, + { + "time": "2024-06-06T20:57:50.307171-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing" + }, + { + "time": "2024-06-06T20:57:50.330839-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?" + }, + { + "time": "2024-06-06T20:57:50.352459-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?'," + }, + { + "time": "2024-06-06T20:57:50.375291-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I" + }, + { + "time": "2024-06-06T20:57:50.398407-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'" + }, + { + "time": "2024-06-06T20:57:50.421233-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm" + }, + { + "time": "2024-06-06T20:57:50.443362-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing" + }, + { + "time": "2024-06-06T20:57:50.466617-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + }, + { + "time": "2024-06-06T20:57:50.488545-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow" + }, + { + "time": "2024-06-06T20:57:50.514471-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + }, + { + "time": "2024-06-06T20:57:50.533973-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-06T20:57:50.55935-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-06T20:57:50.579532-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:57:50.603725-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1717721872", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:57:50.604733-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1717721872", + "usage": { + "promptTokens": 145, + "completionTokens": 24, + "totalTokens": 169 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 145, + "completionTokens": 24, + "totalTokens": 169 + } + } + }, + { + "time": "2024-06-06T20:57:50.604844-04:00", + "callContext": { + "id": "1717721869", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-06T20:57:50.604875-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/test.gpt b/pkg/tests/smoke/testdata/BobAsShell/test.gpt new file mode 100644 index 00000000..ca726b09 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/test.gpt @@ -0,0 +1,13 @@ + +tools: bob + +Ask Bob how he is doing and let me know exactly what he said. + +--- +name: bob +description: I'm Bob, a friendly guy. +args: question: The question to ask Bob. + +#!/bin/bash + +echo "Thanks for asking ${question}, I'm doing great fellow friendly AI tool!" From cc5464163a3c39cb27942f751c9dadd48af23b83 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Wed, 19 Jun 2024 15:26:06 -0400 Subject: [PATCH 07/22] chore: fix smoke workflow name The current name is "test" which conflicts with the existing "test" workflow and causes the GitHub UI to get confused. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index 447bffd1..5f0aee22 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -1,4 +1,4 @@ -name: test +name: smoke on: pull_request_target: From e9c2bf91a55dfb3f4ad272689aba84cdf3e3f6e8 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 20 Jun 2024 10:18:35 -0700 Subject: [PATCH 08/22] chore: add progress output for builtins specifically sys.exec --- pkg/builtin/builtin.go | 73 ++++++++++++++++++++++++++----------- pkg/builtin/builtin_test.go | 6 ++- pkg/engine/cmd.go | 26 ++++++++++++- pkg/prompt/credential.go | 3 +- pkg/prompt/prompt.go | 2 +- pkg/types/tool.go | 2 +- 6 files changed, 84 insertions(+), 28 deletions(-) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 989f523c..291384c3 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -1,6 +1,7 @@ package builtin import ( + "bytes" "context" "encoding/json" "errors" @@ -264,7 +265,7 @@ func Builtin(name string) (types.Tool, bool) { return SetDefaults(t), ok } -func SysFind(_ context.Context, _ []string, input string) (string, error) { +func SysFind(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var result []string var params struct { Pattern string `json:"pattern,omitempty"` @@ -305,7 +306,7 @@ func SysFind(_ context.Context, _ []string, input string) (string, error) { return strings.Join(result, "\n"), nil } -func SysExec(_ context.Context, env []string, input string) (string, error) { +func SysExec(_ context.Context, env []string, input string, progress chan<- string) (string, error) { var params struct { Command string `json:"command,omitempty"` Directory string `json:"directory,omitempty"` @@ -328,13 +329,30 @@ func SysExec(_ context.Context, env []string, input string) (string, error) { cmd = exec.Command("/bin/sh", "-c", params.Command) } + var ( + out bytes.Buffer + pw = progressWriter{ + out: progress, + } + combined = io.MultiWriter(&out, &pw) + ) cmd.Env = env cmd.Dir = params.Directory - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Sprintf("ERROR: %s\nOUTPUT:\n%s", err, out), nil + cmd.Stdout = combined + cmd.Stderr = combined + if err := cmd.Run(); err != nil { + return fmt.Sprintf("ERROR: %s\nOUTPUT:\n%s", err, &out), nil } - return string(out), nil + return out.String(), nil +} + +type progressWriter struct { + out chan<- string +} + +func (pw *progressWriter) Write(p []byte) (n int, err error) { + pw.out <- string(p) + return len(p), nil } func getWorkspaceDir(envs []string) (string, error) { @@ -347,7 +365,7 @@ func getWorkspaceDir(envs []string) (string, error) { return "", fmt.Errorf("no workspace directory found in env") } -func SysLs(_ context.Context, _ []string, input string) (string, error) { +func SysLs(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Dir string `json:"dir,omitempty"` } @@ -383,7 +401,7 @@ func SysLs(_ context.Context, _ []string, input string) (string, error) { return strings.Join(result, "\n"), nil } -func SysRead(_ context.Context, _ []string, input string) (string, error) { +func SysRead(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filename string `json:"filename,omitempty"` } @@ -411,7 +429,7 @@ func SysRead(_ context.Context, _ []string, input string) (string, error) { return string(data), nil } -func SysWrite(_ context.Context, _ []string, input string) (string, error) { +func SysWrite(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filename string `json:"filename,omitempty"` Content string `json:"content,omitempty"` @@ -443,7 +461,7 @@ func SysWrite(_ context.Context, _ []string, input string) (string, error) { return fmt.Sprintf("Wrote (%d) bytes to file %s", len(data), file), nil } -func SysAppend(_ context.Context, _ []string, input string) (string, error) { +func SysAppend(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filename string `json:"filename,omitempty"` Content string `json:"content,omitempty"` @@ -489,7 +507,7 @@ func fixQueries(u string) string { return url.String() } -func SysHTTPGet(_ context.Context, _ []string, input string) (_ string, err error) { +func SysHTTPGet(_ context.Context, _ []string, input string, _ chan<- string) (_ string, err error) { var params struct { URL string `json:"url,omitempty"` } @@ -523,8 +541,8 @@ func SysHTTPGet(_ context.Context, _ []string, input string) (_ string, err erro return string(data), nil } -func SysHTTPHtml2Text(ctx context.Context, env []string, input string) (string, error) { - content, err := SysHTTPGet(ctx, env, input) +func SysHTTPHtml2Text(ctx context.Context, env []string, input string, progress chan<- string) (string, error) { + content, err := SysHTTPGet(ctx, env, input, progress) if err != nil { return "", err } @@ -533,7 +551,7 @@ func SysHTTPHtml2Text(ctx context.Context, env []string, input string) (string, }) } -func SysHTTPPost(ctx context.Context, _ []string, input string) (_ string, err error) { +func SysHTTPPost(ctx context.Context, _ []string, input string, _ chan<- string) (_ string, err error) { var params struct { URL string `json:"url,omitempty"` Content string `json:"content,omitempty"` @@ -569,7 +587,18 @@ func SysHTTPPost(ctx context.Context, _ []string, input string) (_ string, err e return fmt.Sprintf("Wrote %d to %s", len([]byte(params.Content)), params.URL), nil } -func SysGetenv(_ context.Context, env []string, input string) (string, error) { +func DiscardProgress() (progress chan<- string, closeFunc func()) { + ch := make(chan string) + go func() { + for range ch { + } + }() + return ch, func() { + close(ch) + } +} + +func SysGetenv(_ context.Context, env []string, input string, _ chan<- string) (string, error) { var params struct { Name string `json:"name,omitempty"` } @@ -597,7 +626,7 @@ func invalidArgument(input string, err error) string { return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) } -func SysChatHistory(ctx context.Context, _ []string, _ string) (string, error) { +func SysChatHistory(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { engineContext, _ := engine.FromContext(ctx) data, err := json.Marshal(engine.ChatHistory{ @@ -627,7 +656,7 @@ func writeHistory(ctx *engine.Context) (result []engine.ChatHistoryCall) { return } -func SysChatFinish(_ context.Context, _ []string, input string) (string, error) { +func SysChatFinish(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Message string `json:"return,omitempty"` } @@ -641,7 +670,7 @@ func SysChatFinish(_ context.Context, _ []string, input string) (string, error) } } -func SysAbort(_ context.Context, _ []string, input string) (string, error) { +func SysAbort(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Message string `json:"message,omitempty"` } @@ -651,7 +680,7 @@ func SysAbort(_ context.Context, _ []string, input string) (string, error) { return "", fmt.Errorf("ABORT: %s", params.Message) } -func SysRemove(_ context.Context, _ []string, input string) (string, error) { +func SysRemove(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Location string `json:"location,omitempty"` } @@ -670,7 +699,7 @@ func SysRemove(_ context.Context, _ []string, input string) (string, error) { return fmt.Sprintf("Removed file: %s", params.Location), nil } -func SysStat(_ context.Context, _ []string, input string) (string, error) { +func SysStat(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filepath string `json:"filepath,omitempty"` } @@ -690,7 +719,7 @@ func SysStat(_ context.Context, _ []string, input string) (string, error) { return fmt.Sprintf("%s %s mode: %s, size: %d bytes, modtime: %s", title, params.Filepath, stat.Mode().String(), stat.Size(), stat.ModTime().String()), nil } -func SysDownload(_ context.Context, env []string, input string) (_ string, err error) { +func SysDownload(_ context.Context, env []string, input string, _ chan<- string) (_ string, err error) { var params struct { URL string `json:"url,omitempty"` Location string `json:"location,omitempty"` @@ -763,6 +792,6 @@ func SysDownload(_ context.Context, env []string, input string) (_ string, err e return fmt.Sprintf("Downloaded %s to %s", params.URL, params.Location), nil } -func SysTimeNow(context.Context, []string, string) (string, error) { +func SysTimeNow(context.Context, []string, string, chan<- string) (string, error) { return time.Now().Format(time.RFC3339), nil } diff --git a/pkg/builtin/builtin_test.go b/pkg/builtin/builtin_test.go index 313b9718..c12a68f6 100644 --- a/pkg/builtin/builtin_test.go +++ b/pkg/builtin/builtin_test.go @@ -10,15 +10,17 @@ import ( ) func TestSysGetenv(t *testing.T) { + p, c := DiscardProgress() + defer c() v, err := SysGetenv(context.Background(), []string{ "MAGIC=VALUE", - }, `{"name":"MAGIC"}`) + }, `{"name":"MAGIC"}`, nil) require.NoError(t, err) autogold.Expect("VALUE").Equal(t, v) v, err = SysGetenv(context.Background(), []string{ "MAGIC=VALUE", - }, `{"name":"MAGIC2"}`) + }, `{"name":"MAGIC2"}`, p) require.NoError(t, err) autogold.Expect("MAGIC2 is not set or has no value").Equal(t, v) } diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 4a697c69..3707205e 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -12,6 +12,7 @@ import ( "runtime" "sort" "strings" + "sync" "github.com/google/shlex" "github.com/gptscript-ai/gptscript/pkg/counter" @@ -64,7 +65,30 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate "input": input, }, } - return tool.BuiltinFunc(ctx.WrappedContext(), e.Env, input) + + var ( + progress = make(chan string) + wg sync.WaitGroup + ) + wg.Add(1) + defer wg.Wait() + defer close(progress) + go func() { + defer wg.Done() + buf := strings.Builder{} + for line := range progress { + buf.WriteString(line) + e.Progress <- types.CompletionStatus{ + CompletionID: id, + PartialResponse: &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: types.Text(buf.String()), + }, + } + } + }() + + return tool.BuiltinFunc(ctx.WrappedContext(), e.Env, input, progress) } var instructions []string diff --git a/pkg/prompt/credential.go b/pkg/prompt/credential.go index a8bf6f76..9202ed49 100644 --- a/pkg/prompt/credential.go +++ b/pkg/prompt/credential.go @@ -18,7 +18,8 @@ func GetModelProviderCredential(ctx context.Context, credStore credentials.Crede if exists { k = cred.Env[env] } else { - result, err := SysPrompt(ctx, envs, fmt.Sprintf(`{"message":"%s","fields":"key","sensitive":"true"}`, message)) + // we know progress isn't used so pass as nil + result, err := SysPrompt(ctx, envs, fmt.Sprintf(`{"message":"%s","fields":"key","sensitive":"true"}`, message), nil) if err != nil { return "", err } diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go index 6cf8febd..4a9550a3 100644 --- a/pkg/prompt/prompt.go +++ b/pkg/prompt/prompt.go @@ -48,7 +48,7 @@ func sysPromptHTTP(ctx context.Context, envs []string, url string, prompt types. return string(data), err } -func SysPrompt(ctx context.Context, envs []string, input string) (_ string, err error) { +func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string) (_ string, err error) { var params struct { Message string `json:"message,omitempty"` Fields string `json:"fields,omitempty"` diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 6c016d82..9468a04a 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -117,7 +117,7 @@ func (p Program) SetBlocking() Program { return p } -type BuiltinFunc func(ctx context.Context, env []string, input string) (string, error) +type BuiltinFunc func(ctx context.Context, env []string, input string, progress chan<- string) (string, error) type Parameters struct { Name string `json:"name,omitempty"` From 1500872d087497358b5044df765a5c668b3d5ab7 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:22:27 -0400 Subject: [PATCH 09/22] fix: smoke test flakes Perform some prompt engineering on the smoke test judge and test cases to make result comparison more reliable. This will stop the smoke tests from flaking. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/tests/judge/judge.go | 33 +- pkg/tests/smoke/smoke_test.go | 9 +- .../Bob/claude-3-opus-20240229-expected.json | 351 ++--- .../Bob/gpt-4-turbo-2024-04-09-expected.json | 1203 +++++------------ .../Bob/gpt-4o-2024-05-13-expected.json | 904 +++++-------- .../Bob/mistral-large-2402-expected.json | 770 +++++------ pkg/tests/smoke/testdata/Bob/test.gpt | 4 +- .../claude-3-opus-20240229-expected.json | 206 +-- .../gpt-4-turbo-2024-04-09-expected.json | 609 ++++----- .../gpt-4o-2024-05-13-expected.json | 673 ++++----- .../mistral-large-2402-expected.json | 521 ++++--- pkg/tests/smoke/testdata/BobAsShell/test.gpt | 2 +- 12 files changed, 2021 insertions(+), 3264 deletions(-) diff --git a/pkg/tests/judge/judge.go b/pkg/tests/judge/judge.go index 656d77a4..eae12c2e 100644 --- a/pkg/tests/judge/judge.go +++ b/pkg/tests/judge/judge.go @@ -9,12 +9,15 @@ import ( openai "github.com/gptscript-ai/chat-completion-client" ) -const instructions = `When given JSON objects that conform to the following JSONSchema: +const instructions = `"actual" is considered equivalent to "expected" if and only if the following rules are satisfied: %s -Determine if "actual" is equal to "expected" based on the comparison constraints described by "criteria". -"actual" is considered equal to "expected" if and only if the all of the constraints described by "criteria" are satisfied. +When given JSON objects that conform to the following JSONSchema: + +%s + +Determine if "actual" is considered equivalent to "expected". After making a determination, respond with a JSON object that conforms to the following JSONSchema: @@ -28,7 +31,7 @@ After making a determination, respond with a JSON object that conforms to the fo }, "reasoning": { "type": "string", - "description": "The reasoning used to come to the determination, that points out all instances where the given criteria was violated" + "description": "The reasoning used to come to the determination" } }, "required": [ @@ -41,14 +44,13 @@ Your responses are concise and include only the json object described above. ` type Judge[T any] struct { - client *openai.Client - instructions string + client *openai.Client + comparisonSchema string } type comparison[T any] struct { - Expected T `json:"expected"` - Actual T `json:"actual"` - Criteria string `json:"criteria"` + Expected T `json:"expected"` + Actual T `json:"actual"` } type ruling struct { @@ -70,14 +72,14 @@ func New[T any](client *openai.Client) (*Judge[T], error) { return nil, fmt.Errorf("failed to generate JSONSchema for %T: %w", new(T), err) } - schemaJSON, err := json.MarshalIndent(schema, "", " ") + marshaled, err := json.MarshalIndent(schema, "", " ") if err != nil { return nil, fmt.Errorf("failed to marshal JSONSchema for %T: %w", new(T), err) } return &Judge[T]{ - client: client, - instructions: fmt.Sprintf(instructions, schemaJSON), + client: client, + comparisonSchema: string(marshaled), }, nil } @@ -85,7 +87,6 @@ func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria strin comparisonJSON, err := json.MarshalIndent(&comparison[T]{ Expected: expected, Actual: actual, - Criteria: criteria, }, "", " ") if err != nil { return false, "", fmt.Errorf("failed to marshal judge testcase JSON: %w", err) @@ -101,7 +102,7 @@ func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria strin Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleSystem, - Content: j.instructions, + Content: fmt.Sprintf(instructions, criteria, j.comparisonSchema), }, { Role: openai.ChatMessageRoleUser, @@ -111,11 +112,11 @@ func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria strin } response, err := j.client.CreateChatCompletion(ctx, request) if err != nil { - return false, "", fmt.Errorf("failed to make judge chat completion request: %w", err) + return false, "", fmt.Errorf("failed to create chat completion request: %w", err) } if len(response.Choices) < 1 { - return false, "", fmt.Errorf("judge chat completion request returned no choices") + return false, "", fmt.Errorf("chat completion request returned no choices") } var equality ruling diff --git a/pkg/tests/smoke/smoke_test.go b/pkg/tests/smoke/smoke_test.go index 39ec1cf5..66374ef1 100644 --- a/pkg/tests/smoke/smoke_test.go +++ b/pkg/tests/smoke/smoke_test.go @@ -81,8 +81,13 @@ func TestSmoke(t *testing.T) { ctx, expectedEvents, actualEvents, - `The field values of the elements of expected and actual must be roughly equivalent. -Ignore variations in timestamps, IDs, and verbiage when determining equivalence.`, + ` +- disregard differences in timestamps, generated IDs, natural language verbiage, and event order +- omit callProgress events from the comparision +- the overall stream of events and set of tools called should roughly match +- arguments passed in tool calls should be roughly the same +- the final callFinish event should be semantically similar +`, ) require.NoError(t, err, "error getting judge ruling on output") require.True(t, equal, reasoning) diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json index fd9c27b4..92fc786a 100644 --- a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json +++ b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-18T11:31:24.183335-04:00", + "time": "2024-06-20T17:10:23.193337-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-18T11:31:24.183735-04:00", + "time": "2024-06-20T17:10:23.19359-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -40,14 +40,14 @@ "usage": {} }, { - "time": "2024-06-18T11:31:25.851994-04:00", + "time": "2024-06-20T17:10:24.059514-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-18T11:31:25.852381-04:00", + "time": "2024-06-20T17:10:24.059807-04:00", "callContext": { - "id": "1718724686", + "id": "1718917825", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -91,158 +91,9 @@ "usage": {} }, { - "time": "2024-06-18T11:31:25.852533-04:00", + "time": "2024-06-20T17:10:25.074481-04:00", "callContext": { - "id": "1718724687", - "tool": { - "name": "claude3-anthropic-credential", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", - "internalPrompt": null, - "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", - "localTools": { - "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "credential", - "Name": "tool.gpt", - "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" - }, - "inputContext": null, - "toolCategory": "credential", - "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "parentID": "1718724686", - "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-18T11:31:25.855621-04:00", - "callContext": { - "id": "1718724687", - "tool": { - "name": "claude3-anthropic-credential", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", - "internalPrompt": null, - "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", - "localTools": { - "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "credential", - "Name": "tool.gpt", - "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" - }, - "inputContext": null, - "toolCategory": "credential", - "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "parentID": "1718724686", - "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" - }, - "type": "callChat", - "chatCompletionId": "1718724688", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-06-18T11:31:25.948224-04:00", - "callContext": { - "id": "1718724687", - "tool": { - "name": "claude3-anthropic-credential", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", - "internalPrompt": null, - "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", - "localTools": { - "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "credential", - "Name": "tool.gpt", - "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" - }, - "inputContext": null, - "toolCategory": "credential", - "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "parentID": "1718724686", - "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" - }, - "type": "callChat", - "chatCompletionId": "1718724688", - "usage": {}, - "chatResponse": { - "usage": {} - } - }, - { - "time": "2024-06-18T11:31:25.948393-04:00", - "callContext": { - "id": "1718724687", - "tool": { - "name": "claude3-anthropic-credential", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", - "internalPrompt": null, - "instructions": "#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/credential.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential", - "localTools": { - "claude3-anthropic-credential": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "credential", - "Name": "tool.gpt", - "Revision": "3b9b9365d469c4c702291f1764537ab5c226c2e0" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential" - }, - "inputContext": null, - "toolCategory": "credential", - "toolName": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "parentID": "1718724686", - "displayText": "Running claude3-anthropic-credential from github.com/gptscript-ai/claude3-anthropic-provider/credential" - }, - "type": "callFinish", - "usage": {} - }, - { - "time": "2024-06-18T11:31:26.96565-04:00", - "callContext": { - "id": "1718724686", + "id": "1718917825", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -284,24 +135,24 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:11059" + "content": "http://127.0.0.1:11060" }, { - "time": "2024-06-18T11:31:26.965764-04:00", + "time": "2024-06-20T17:10:25.074606-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-06-18T11:31:26.965828-04:00", + "time": "2024-06-20T17:10:25.074685-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -324,14 +175,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724689", + "chatCompletionId": "1718917826", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -356,16 +207,16 @@ } }, { - "time": "2024-06-18T11:31:26.966331-04:00", + "time": "2024-06-20T17:10:25.075088-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -388,21 +239,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724689", + "chatCompletionId": "1718917826", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-18T11:31:35.503953-04:00", + "time": "2024-06-20T17:10:33.389627-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -425,21 +276,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724689", + "chatCompletionId": "1718917826", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how he is doing\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-18T11:31:35.505033-04:00", + "time": "2024-06-20T17:10:33.389848-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -462,7 +313,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724689", + "chatCompletionId": "1718917826", "usage": {}, "chatResponse": { "role": "assistant", @@ -470,10 +321,10 @@ { "toolCall": { "index": 0, - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "function": { "name": "bob", - "arguments": "{\"question\": \"how he is doing\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } } @@ -482,16 +333,16 @@ } }, { - "time": "2024-06-18T11:31:35.505135-04:00", + "time": "2024-06-20T17:10:33.389967-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -514,18 +365,18 @@ "inputContext": null }, "toolSubCalls": { - "toolu_01NJaY1g8kxqQkA4wbFTGtSX": { + "toolu_01EEcv7qDLHDmGAzm15vobxM": { "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\": \"how he is doing\"}" + "input": "{\"question\": \"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-18T11:31:35.505164-04:00", + "time": "2024-06-20T17:10:33.389997-04:00", "callContext": { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -540,7 +391,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -554,16 +405,16 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724685" + "parentID": "1718917824" }, "type": "callStart", "usage": {}, - "content": "{\"question\": \"how he is doing\"}" + "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-18T11:31:35.666461-04:00", + "time": "2024-06-20T17:10:33.584228-04:00", "callContext": { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -578,7 +429,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -592,30 +443,30 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724685" + "parentID": "1718917824" }, "type": "callChat", - "chatCompletionId": "1718724690", + "chatCompletionId": "1718917827", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", "messages": [ { "role": "system", - "content": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" }, { "role": "user", - "content": "{\"question\": \"how he is doing\"}" + "content": "{\"question\": \"how are you doing\"}" } ], "temperature": 0 } }, { - "time": "2024-06-18T11:31:35.666692-04:00", + "time": "2024-06-20T17:10:33.584507-04:00", "callContext": { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -630,7 +481,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -644,17 +495,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724685" + "parentID": "1718917824" }, "type": "callProgress", - "chatCompletionId": "1718724690", + "chatCompletionId": "1718917827", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-18T11:31:38.650768-04:00", + "time": "2024-06-20T17:10:35.540664-04:00", "callContext": { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -669,7 +520,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -683,17 +534,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724685" + "parentID": "1718917824" }, "type": "callProgress", - "chatCompletionId": "1718724690", + "chatCompletionId": "1718917827", "usage": {}, - "content": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-18T11:31:38.65205-04:00", + "time": "2024-06-20T17:10:35.540967-04:00", "callContext": { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -708,7 +559,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -722,25 +573,25 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724685" + "parentID": "1718917824" }, "type": "callChat", - "chatCompletionId": "1718724690", + "chatCompletionId": "1718917827", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" } ], "usage": {} } }, { - "time": "2024-06-18T11:31:38.652106-04:00", + "time": "2024-06-20T17:10:35.541005-04:00", "callContext": { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -755,7 +606,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -769,23 +620,23 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724685" + "parentID": "1718917824" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-18T11:31:38.652143-04:00", + "time": "2024-06-20T17:10:35.541033-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -812,16 +663,16 @@ "usage": {} }, { - "time": "2024-06-18T11:31:38.847656-04:00", + "time": "2024-06-20T17:10:35.71784-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -844,34 +695,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724691", + "chatCompletionId": "1718917828", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX", + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\": \"how he is doing\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", "name": "bob", - "tool_call_id": "toolu_01NJaY1g8kxqQkA4wbFTGtSX" + "tool_call_id": "toolu_01EEcv7qDLHDmGAzm15vobxM" } ], "temperature": 0, @@ -896,16 +747,16 @@ } }, { - "time": "2024-06-18T11:31:38.847937-04:00", + "time": "2024-06-20T17:10:35.718216-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -928,21 +779,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724691", + "chatCompletionId": "1718917828", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-18T11:31:43.784059-04:00", + "time": "2024-06-20T17:10:39.50448-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -965,21 +816,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724691", + "chatCompletionId": "1718917828", "usage": {}, - "content": "Bob said exactly: \"Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-18T11:31:43.785727-04:00", + "time": "2024-06-20T17:10:39.504769-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1002,29 +853,29 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724691", + "chatCompletionId": "1718917828", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said exactly: \"Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!\"" + "text": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" } ], "usage": {} } }, { - "time": "2024-06-18T11:31:43.785811-04:00", + "time": "2024-06-20T17:10:39.504807-04:00", "callContext": { - "id": "1718724685", + "id": "1718917824", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1048,10 +899,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said exactly: \"Thanks for asking \"how he is doing\", I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-18T11:31:43.785827-04:00", + "time": "2024-06-20T17:10:39.504821-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json index d3dd346b..01745e39 100644 --- a/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json +++ b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-06T20:22:29.810249-04:00", + "time": "2024-06-20T17:08:06.902669-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-06T20:22:29.810505-04:00", + "time": "2024-06-20T17:08:06.902927-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -40,16 +40,16 @@ "usage": {} }, { - "time": "2024-06-06T20:22:30.00145-04:00", + "time": "2024-06-20T17:08:07.292073-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -72,14 +72,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, "chatRequest": { "model": "gpt-4-turbo-2024-04-09", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -104,16 +104,16 @@ } }, { - "time": "2024-06-06T20:22:30.004971-04:00", + "time": "2024-06-20T17:08:07.292172-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -136,21 +136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:22:30.810075-04:00", + "time": "2024-06-20T17:08:28.052253-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -173,21 +173,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:22:30.810238-04:00", + "time": "2024-06-20T17:08:28.05243-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -210,21 +210,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:22:30.844325-04:00", + "time": "2024-06-20T17:08:28.1369-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -247,21 +247,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:22:30.844508-04:00", + "time": "2024-06-20T17:08:28.137013-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -284,21 +284,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:22:30.895166-04:00", + "time": "2024-06-20T17:08:28.244585-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -321,21 +321,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" }, { - "time": "2024-06-06T20:22:30.895349-04:00", + "time": "2024-06-20T17:08:28.244731-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -358,21 +358,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" }, { - "time": "2024-06-06T20:22:30.945392-04:00", + "time": "2024-06-20T17:08:28.32623-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -395,21 +395,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" }, { - "time": "2024-06-06T20:22:30.945641-04:00", + "time": "2024-06-20T17:08:28.326358-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -432,21 +432,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:30.965686-04:00", + "time": "2024-06-20T17:08:28.326393-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -469,21 +469,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:30.965791-04:00", + "time": "2024-06-20T17:08:28.32645-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -506,21 +506,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:30.966161-04:00", + "time": "2024-06-20T17:08:28.326527-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -543,58 +543,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:30.966215-04:00", + "time": "2024-06-20T17:08:28.327843-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719751", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" - }, - { - "time": "2024-06-06T20:22:30.968104-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -617,11 +580,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719751", + "chatCompletionId": "1718917688", "usage": { - "promptTokens": 143, - "completionTokens": 18, - "totalTokens": 161 + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 }, "chatResponse": { "role": "assistant", @@ -629,32 +592,32 @@ { "toolCall": { "index": 0, - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } } ], "usage": { - "promptTokens": 143, - "completionTokens": 18, - "totalTokens": 161 + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 } } }, { - "time": "2024-06-06T20:22:30.968398-04:00", + "time": "2024-06-20T17:08:28.328046-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -677,18 +640,18 @@ "inputContext": null }, "toolSubCalls": { - "call_IfovJOlHh0TrPkSiric0ZYDO": { + "call_vsmL6EoDecm0oVmUnHIvNkaL": { "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\":\"How are you doing?\"}" + "input": "{\"question\":\"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-06T20:22:30.968468-04:00", + "time": "2024-06-20T17:08:28.328123-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -703,7 +666,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -717,16 +680,16 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callStart", "usage": {}, - "content": "{\"question\":\"How are you doing?\"}" + "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:31.152693-04:00", + "time": "2024-06-20T17:08:28.53993-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -741,7 +704,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -755,30 +718,30 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callChat", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, "chatRequest": { "model": "gpt-4-turbo-2024-04-09", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" }, { "role": "user", - "content": "{\"question\":\"How are you doing?\"}" + "content": "{\"question\":\"how are you doing\"}" } ], "temperature": 0 } }, { - "time": "2024-06-06T20:22:31.152842-04:00", + "time": "2024-06-20T17:08:28.540154-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -793,7 +756,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -807,17 +770,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:22:31.639639-04:00", + "time": "2024-06-20T17:08:29.188341-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -832,7 +795,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -846,17 +809,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for" + "content": "Thanks" }, { - "time": "2024-06-06T20:22:31.639789-04:00", + "time": "2024-06-20T17:08:29.188493-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -871,7 +834,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -885,17 +848,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for" + "content": "Thanks" }, { - "time": "2024-06-06T20:22:31.639837-04:00", + "time": "2024-06-20T17:08:29.244545-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -910,7 +873,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -924,17 +887,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, "content": "Thanks for asking" }, { - "time": "2024-06-06T20:22:31.639868-04:00", + "time": "2024-06-20T17:08:29.244765-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -949,7 +912,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -963,17 +926,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, "content": "Thanks for asking" }, { - "time": "2024-06-06T20:22:31.935618-04:00", + "time": "2024-06-20T17:08:29.643951-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -988,7 +951,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1002,17 +965,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How" + "content": "Thanks for asking how are" }, { - "time": "2024-06-06T20:22:31.935952-04:00", + "time": "2024-06-20T17:08:29.644128-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1027,7 +990,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1041,17 +1004,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How" + "content": "Thanks for asking how are" }, { - "time": "2024-06-06T20:22:31.94189-04:00", + "time": "2024-06-20T17:08:29.676951-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1066,7 +1029,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1080,17 +1043,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing" + "content": "Thanks for asking how are you doing," }, { - "time": "2024-06-06T20:22:31.942075-04:00", + "time": "2024-06-20T17:08:29.677047-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1105,7 +1068,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1119,17 +1082,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing" + "content": "Thanks for asking how are you doing," }, { - "time": "2024-06-06T20:22:31.942142-04:00", + "time": "2024-06-20T17:08:29.677123-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1144,7 +1107,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1158,17 +1121,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing" + "content": "Thanks for asking how are you doing, I" }, { - "time": "2024-06-06T20:22:31.942199-04:00", + "time": "2024-06-20T17:08:29.677156-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1183,7 +1146,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1197,17 +1160,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I" + "content": "Thanks for asking how are you doing, I'm" }, { - "time": "2024-06-06T20:22:31.942311-04:00", + "time": "2024-06-20T17:08:29.677184-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1222,7 +1185,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1236,17 +1199,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm" + "content": "Thanks for asking how are you doing, I'm doing" }, { - "time": "2024-06-06T20:22:31.942374-04:00", + "time": "2024-06-20T17:08:29.677251-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1261,7 +1224,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1275,17 +1238,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm" + "content": "Thanks for asking how are you doing, I'm doing great" }, { - "time": "2024-06-06T20:22:31.942437-04:00", + "time": "2024-06-20T17:08:29.677288-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1300,7 +1263,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1314,17 +1277,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing" + "content": "Thanks for asking how are you doing, I'm doing great" }, { - "time": "2024-06-06T20:22:31.94249-04:00", + "time": "2024-06-20T17:08:29.727848-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1339,7 +1302,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1353,17 +1316,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:22:31.94254-04:00", + "time": "2024-06-20T17:08:29.727983-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1378,7 +1341,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1392,17 +1355,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:22:31.942616-04:00", + "time": "2024-06-20T17:08:29.782554-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1417,7 +1380,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1431,17 +1394,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:31.977094-04:00", + "time": "2024-06-20T17:08:29.782738-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1456,7 +1419,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1470,17 +1433,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:31.977257-04:00", + "time": "2024-06-20T17:08:29.782806-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1495,7 +1458,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1509,17 +1472,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:31.977342-04:00", + "time": "2024-06-20T17:08:29.782839-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1534,7 +1497,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1548,17 +1511,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:31.977401-04:00", + "time": "2024-06-20T17:08:29.782868-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1573,7 +1536,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1587,17 +1550,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callProgress", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:31.977493-04:00", + "time": "2024-06-20T17:08:29.782988-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1612,7 +1575,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1626,72 +1589,33 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" - }, - "type": "callProgress", - "chatCompletionId": "1717719752", - "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-06T20:22:31.977695-04:00", - "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callChat", - "chatCompletionId": "1717719752", + "chatCompletionId": "1718917689", "usage": { "promptTokens": 124, - "completionTokens": 18, - "totalTokens": 142 + "completionTokens": 17, + "totalTokens": 141 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" } ], "usage": { "promptTokens": 124, - "completionTokens": 18, - "totalTokens": 142 + "completionTokens": 17, + "totalTokens": 141 } } }, { - "time": "2024-06-06T20:22:31.977777-04:00", + "time": "2024-06-20T17:08:29.783047-04:00", "callContext": { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1706,7 +1630,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1720,23 +1644,23 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719750" + "parentID": "1718917687" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:31.977832-04:00", + "time": "2024-06-20T17:08:29.783089-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1763,16 +1687,16 @@ "usage": {} }, { - "time": "2024-06-06T20:22:32.175015-04:00", + "time": "2024-06-20T17:08:29.966093-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1795,34 +1719,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, "chatRequest": { "model": "gpt-4-turbo-2024-04-09", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "call_IfovJOlHh0TrPkSiric0ZYDO", + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!", "name": "bob", - "tool_call_id": "call_IfovJOlHh0TrPkSiric0ZYDO" + "tool_call_id": "call_vsmL6EoDecm0oVmUnHIvNkaL" } ], "temperature": 0, @@ -1847,16 +1771,16 @@ } }, { - "time": "2024-06-06T20:22:32.175386-04:00", + "time": "2024-06-20T17:08:29.966449-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1879,502 +1803,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:22:32.857803-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:22:32.858022-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said" - }, - { - "time": "2024-06-06T20:22:32.858079-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said" - }, - { - "time": "2024-06-06T20:22:32.858182-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"" - }, - { - "time": "2024-06-06T20:22:32.858219-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"" - }, - { - "time": "2024-06-06T20:22:32.858301-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks" - }, - { - "time": "2024-06-06T20:22:32.882645-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking" - }, - { - "time": "2024-06-06T20:22:32.882856-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking" - }, - { - "time": "2024-06-06T20:22:33.022865-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'How" - }, - { - "time": "2024-06-06T20:22:33.023013-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are" - }, - { - "time": "2024-06-06T20:22:33.023086-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are" - }, - { - "time": "2024-06-06T20:22:33.054293-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?'," - }, - { - "time": "2024-06-06T20:22:33.054407-04:00", - "callContext": { - "id": "1717719750", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719753", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?'," - }, - { - "time": "2024-06-06T20:22:33.054428-04:00", + "time": "2024-06-20T17:08:30.668641-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2397,21 +1840,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?'," + "content": "I'm" }, { - "time": "2024-06-06T20:22:33.054454-04:00", + "time": "2024-06-20T17:08:30.668802-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2434,21 +1877,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I" + "content": "I'm" }, { - "time": "2024-06-06T20:22:33.054499-04:00", + "time": "2024-06-20T17:08:30.668957-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2471,21 +1914,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing" + "content": "I'm" }, { - "time": "2024-06-06T20:22:33.054584-04:00", + "time": "2024-06-20T17:08:30.669089-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2508,21 +1951,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + "content": "I'm doing" }, { - "time": "2024-06-06T20:22:33.0546-04:00", + "time": "2024-06-20T17:08:30.669299-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2545,21 +1988,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + "content": "I'm doing great fellow" }, { - "time": "2024-06-06T20:22:33.097034-04:00", + "time": "2024-06-20T17:08:30.669392-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2582,21 +2025,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + "content": "I'm doing great fellow" }, { - "time": "2024-06-06T20:22:33.097195-04:00", + "time": "2024-06-20T17:08:30.716062-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2619,21 +2062,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + "content": "I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:22:33.138931-04:00", + "time": "2024-06-20T17:08:30.7162-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2656,21 +2099,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:22:33.139013-04:00", + "time": "2024-06-20T17:08:30.743098-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2693,21 +2136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:33.139056-04:00", + "time": "2024-06-20T17:08:30.743401-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2730,21 +2173,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:33.139073-04:00", + "time": "2024-06-20T17:08:30.74648-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2767,21 +2210,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:33.139194-04:00", + "time": "2024-06-20T17:08:30.746568-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2804,21 +2247,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:33.141011-04:00", + "time": "2024-06-20T17:08:30.746778-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2841,37 +2284,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719753", + "chatCompletionId": "1718917690", "usage": { - "promptTokens": 186, - "completionTokens": 23, - "totalTokens": 209 + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "text": "I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 186, - "completionTokens": 23, - "totalTokens": 209 + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 } } }, { - "time": "2024-06-06T20:22:33.141051-04:00", + "time": "2024-06-20T17:08:30.746856-04:00", "callContext": { - "id": "1717719750", + "id": "1718917687", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2895,10 +2338,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:33.141083-04:00", + "time": "2024-06-20T17:08:30.746896-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json index a3524b8c..4d22287d 100644 --- a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-06T20:21:47.098949-04:00", + "time": "2024-06-20T16:58:11.3174-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-06T20:21:47.099193-04:00", + "time": "2024-06-20T16:58:11.317644-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -40,16 +40,16 @@ "usage": {} }, { - "time": "2024-06-06T20:21:47.306808-04:00", + "time": "2024-06-20T16:58:11.638778-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -72,14 +72,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, "chatRequest": { "model": "gpt-4o-2024-05-13", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -104,16 +104,16 @@ } }, { - "time": "2024-06-06T20:21:47.307426-04:00", + "time": "2024-06-20T16:58:11.639016-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -136,21 +136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:21:47.801272-04:00", + "time": "2024-06-20T16:58:12.564724-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -173,21 +173,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:21:47.802783-04:00", + "time": "2024-06-20T16:58:12.564911-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -210,21 +210,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question" }, { - "time": "2024-06-06T20:21:47.802866-04:00", + "time": "2024-06-20T16:58:12.564948-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -247,21 +247,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + "content": "\u003ctool call\u003e bob -\u003e {\"question" }, { - "time": "2024-06-06T20:21:47.802893-04:00", + "time": "2024-06-20T16:58:12.56497-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -284,58 +284,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:21:47.802911-04:00", - "callContext": { - "id": "1717719708", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719709", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How" - }, - { - "time": "2024-06-06T20:21:47.802929-04:00", + "time": "2024-06-20T16:58:12.564995-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -358,21 +321,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how" }, { - "time": "2024-06-06T20:21:47.809391-04:00", + "time": "2024-06-20T16:58:12.565045-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -395,21 +358,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you" }, { - "time": "2024-06-06T20:21:47.809508-04:00", + "time": "2024-06-20T16:58:12.565071-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -432,21 +395,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you" }, { - "time": "2024-06-06T20:21:47.821939-04:00", + "time": "2024-06-20T16:58:12.565112-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -469,21 +432,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:47.822028-04:00", + "time": "2024-06-20T16:58:12.565137-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -506,21 +469,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:47.862117-04:00", + "time": "2024-06-20T16:58:12.56516-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -543,21 +506,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:47.862256-04:00", + "time": "2024-06-20T16:58:12.565176-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -580,21 +543,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:47.864138-04:00", + "time": "2024-06-20T16:58:12.565397-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -617,11 +580,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719709", + "chatCompletionId": "1718917093", "usage": { - "promptTokens": 139, - "completionTokens": 18, - "totalTokens": 157 + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 }, "chatResponse": { "role": "assistant", @@ -629,32 +592,32 @@ { "toolCall": { "index": 0, - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } } ], "usage": { - "promptTokens": 139, - "completionTokens": 18, - "totalTokens": 157 + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 } } }, { - "time": "2024-06-06T20:21:47.864426-04:00", + "time": "2024-06-20T16:58:12.565644-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -677,18 +640,18 @@ "inputContext": null }, "toolSubCalls": { - "call_rziWxxpDmGb1tzIullRJT6zj": { + "call_rTx93wiIASDA8uk8XHwjVmCC": { "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\":\"How are you doing?\"}" + "input": "{\"question\":\"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-06T20:21:47.864526-04:00", + "time": "2024-06-20T16:58:12.565728-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -703,7 +666,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -717,16 +680,16 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callStart", "usage": {}, - "content": "{\"question\":\"How are you doing?\"}" + "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:48.045651-04:00", + "time": "2024-06-20T16:58:12.72779-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -741,7 +704,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -755,30 +718,30 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callChat", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, "chatRequest": { "model": "gpt-4o-2024-05-13", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" }, { "role": "user", - "content": "{\"question\":\"How are you doing?\"}" + "content": "{\"question\":\"how are you doing\"}" } ], "temperature": 0 } }, { - "time": "2024-06-06T20:21:48.045992-04:00", + "time": "2024-06-20T16:58:12.728069-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -793,7 +756,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -807,17 +770,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:21:48.378921-04:00", + "time": "2024-06-20T16:58:13.077264-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -832,7 +795,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -846,17 +809,16 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", - "usage": {}, - "content": "Thanks" + "chatCompletionId": "1718917094", + "usage": {} }, { - "time": "2024-06-06T20:21:48.379124-04:00", + "time": "2024-06-20T16:58:13.077534-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -871,7 +833,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -885,17 +847,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, "content": "Thanks" }, { - "time": "2024-06-06T20:21:48.380438-04:00", + "time": "2024-06-20T16:58:13.134723-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -910,7 +872,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -924,17 +886,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, "content": "Thanks for asking" }, { - "time": "2024-06-06T20:21:48.380536-04:00", + "time": "2024-06-20T16:58:13.134893-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -949,7 +911,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -963,17 +925,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, "content": "Thanks for asking" }, { - "time": "2024-06-06T20:21:48.486384-04:00", + "time": "2024-06-20T16:58:13.230591-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -988,7 +950,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1002,17 +964,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How" + "content": "Thanks for asking \"how" }, { - "time": "2024-06-06T20:21:48.486644-04:00", + "time": "2024-06-20T16:58:13.230667-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1027,7 +989,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1041,17 +1003,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How" + "content": "Thanks for asking \"how" }, { - "time": "2024-06-06T20:21:48.500327-04:00", + "time": "2024-06-20T16:58:13.246344-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1066,7 +1028,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1080,17 +1042,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you" + "content": "Thanks for asking \"how are you" }, { - "time": "2024-06-06T20:21:48.500442-04:00", + "time": "2024-06-20T16:58:13.246468-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1105,7 +1067,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1119,17 +1081,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing" + "content": "Thanks for asking \"how are you doing" }, { - "time": "2024-06-06T20:21:48.500489-04:00", + "time": "2024-06-20T16:58:13.246531-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1144,7 +1106,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1158,17 +1120,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing" + "content": "Thanks for asking \"how are you doing" }, { - "time": "2024-06-06T20:21:48.500526-04:00", + "time": "2024-06-20T16:58:13.246592-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1183,7 +1145,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1197,17 +1159,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm" + "content": "Thanks for asking \"how are you doing\", I'm" }, { - "time": "2024-06-06T20:21:48.500599-04:00", + "time": "2024-06-20T16:58:13.246645-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1222,7 +1184,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1236,17 +1198,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing" + "content": "Thanks for asking \"how are you doing\", I'm doing" }, { - "time": "2024-06-06T20:21:48.500634-04:00", + "time": "2024-06-20T16:58:13.246736-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1261,7 +1223,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1275,17 +1237,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great" + "content": "Thanks for asking \"how are you doing\", I'm doing great" }, { - "time": "2024-06-06T20:21:48.500672-04:00", + "time": "2024-06-20T16:58:13.246796-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1300,7 +1262,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1314,17 +1276,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great" + "content": "Thanks for asking \"how are you doing\", I'm doing great" }, { - "time": "2024-06-06T20:21:48.521814-04:00", + "time": "2024-06-20T16:58:13.30169-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1339,7 +1301,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1353,17 +1315,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:21:48.521956-04:00", + "time": "2024-06-20T16:58:13.301837-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1378,7 +1340,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1392,17 +1354,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:21:48.597924-04:00", + "time": "2024-06-20T16:58:13.31565-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1417,7 +1379,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1431,17 +1393,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:21:48.598155-04:00", + "time": "2024-06-20T16:58:13.315798-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1456,7 +1418,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1470,17 +1432,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:48.598237-04:00", + "time": "2024-06-20T16:58:13.315842-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1495,7 +1457,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1509,17 +1471,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:48.598304-04:00", + "time": "2024-06-20T16:58:13.315879-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1534,7 +1496,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1548,17 +1510,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:48.598371-04:00", + "time": "2024-06-20T16:58:13.315951-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1573,7 +1535,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1587,17 +1549,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callProgress", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:48.598692-04:00", + "time": "2024-06-20T16:58:13.316055-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1612,7 +1574,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1626,10 +1588,10 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callChat", - "chatCompletionId": "1717719710", + "chatCompletionId": "1718917094", "usage": { "promptTokens": 122, "completionTokens": 17, @@ -1639,7 +1601,7 @@ "role": "assistant", "content": [ { - "text": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" } ], "usage": { @@ -1650,9 +1612,9 @@ } }, { - "time": "2024-06-06T20:21:48.598808-04:00", + "time": "2024-06-20T16:58:13.316115-04:00", "callContext": { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1667,7 +1629,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1681,23 +1643,23 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719708" + "parentID": "1718917092" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:48.59889-04:00", + "time": "2024-06-20T16:58:13.316171-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1724,16 +1686,16 @@ "usage": {} }, { - "time": "2024-06-06T20:21:48.789457-04:00", + "time": "2024-06-20T16:58:13.533625-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1756,34 +1718,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, "chatRequest": { "model": "gpt-4o-2024-05-13", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "call_rziWxxpDmGb1tzIullRJT6zj", + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", "name": "bob", - "tool_call_id": "call_rziWxxpDmGb1tzIullRJT6zj" + "tool_call_id": "call_rTx93wiIASDA8uk8XHwjVmCC" } ], "temperature": 0, @@ -1808,16 +1770,16 @@ } }, { - "time": "2024-06-06T20:21:48.789942-04:00", + "time": "2024-06-20T16:58:13.53384-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1840,58 +1802,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:21:49.135224-04:00", - "callContext": { - "id": "1717719708", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719711", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:21:49.135428-04:00", + "time": "2024-06-20T16:58:13.856349-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1914,132 +1839,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:21:49.163995-04:00", - "callContext": { - "id": "1717719708", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719711", - "usage": {}, - "content": "Bob said" - }, - { - "time": "2024-06-06T20:21:49.164256-04:00", - "callContext": { - "id": "1717719708", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719711", - "usage": {}, - "content": "Bob said:" - }, - { - "time": "2024-06-06T20:21:49.192967-04:00", - "callContext": { - "id": "1717719708", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719711", - "usage": {}, - "content": "Bob said: \"Thanks" + "content": "Thanks" }, { - "time": "2024-06-06T20:21:49.193161-04:00", + "time": "2024-06-20T16:58:13.856437-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2062,21 +1876,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks" + "content": "Thanks" }, { - "time": "2024-06-06T20:21:49.235659-04:00", + "time": "2024-06-20T16:58:13.874317-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2099,21 +1913,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking" + "content": "Thanks for asking" }, { - "time": "2024-06-06T20:21:49.235818-04:00", + "time": "2024-06-20T16:58:13.874428-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2136,21 +1950,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking" + "content": "Thanks for asking" }, { - "time": "2024-06-06T20:21:49.322521-04:00", + "time": "2024-06-20T16:58:14.060243-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2173,21 +1987,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How" + "content": "Thanks for asking \"how are" }, { - "time": "2024-06-06T20:21:49.322702-04:00", + "time": "2024-06-20T16:58:14.060366-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2210,21 +2024,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are" + "content": "Thanks for asking \"how are" }, { - "time": "2024-06-06T20:21:49.322768-04:00", + "time": "2024-06-20T16:58:14.060418-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2247,21 +2061,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are" + "content": "Thanks for asking \"how are" }, { - "time": "2024-06-06T20:21:49.35099-04:00", + "time": "2024-06-20T16:58:14.060435-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2284,21 +2098,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?'," + "content": "Thanks for asking \"how are you" }, { - "time": "2024-06-06T20:21:49.35118-04:00", + "time": "2024-06-20T16:58:14.060456-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2321,21 +2135,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?'," + "content": "Thanks for asking \"how are you doing" }, { - "time": "2024-06-06T20:21:49.351215-04:00", + "time": "2024-06-20T16:58:14.060521-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2358,21 +2172,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?'," + "content": "Thanks for asking \"how are you doing\", I'm doing" }, { - "time": "2024-06-06T20:21:49.351246-04:00", + "time": "2024-06-20T16:58:14.060555-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2395,21 +2209,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm" + "content": "Thanks for asking \"how are you doing\", I'm doing" }, { - "time": "2024-06-06T20:21:49.351271-04:00", + "time": "2024-06-20T16:58:14.060577-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2432,21 +2246,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing" + "content": "Thanks for asking \"how are you doing\", I'm doing" }, { - "time": "2024-06-06T20:21:49.35129-04:00", + "time": "2024-06-20T16:58:14.06061-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2469,21 +2283,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" }, { - "time": "2024-06-06T20:21:49.382283-04:00", + "time": "2024-06-20T16:58:14.060626-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2506,21 +2320,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" }, { - "time": "2024-06-06T20:21:49.382442-04:00", + "time": "2024-06-20T16:58:14.060686-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2543,21 +2357,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:21:49.418198-04:00", + "time": "2024-06-20T16:58:14.06071-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2580,21 +2394,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:21:49.41831-04:00", + "time": "2024-06-20T16:58:14.060727-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2617,21 +2431,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:21:49.418924-04:00", + "time": "2024-06-20T16:58:14.060743-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2654,21 +2468,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:49.419001-04:00", + "time": "2024-06-20T16:58:14.060788-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2691,21 +2505,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:49.419101-04:00", + "time": "2024-06-20T16:58:14.060806-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2728,21 +2542,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:49.42086-04:00", + "time": "2024-06-20T16:58:14.061001-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2765,37 +2579,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917095", "usage": { - "promptTokens": 181, - "completionTokens": 22, - "totalTokens": 203 + "promptTokens": 179, + "completionTokens": 18, + "totalTokens": 197 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 181, - "completionTokens": 22, - "totalTokens": 203 + "promptTokens": 179, + "completionTokens": 18, + "totalTokens": 197 } } }, { - "time": "2024-06-06T20:21:49.42093-04:00", + "time": "2024-06-20T16:58:14.061031-04:00", "callContext": { - "id": "1717719708", + "id": "1718917092", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2819,10 +2633,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said: \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:49.42097-04:00", + "time": "2024-06-20T16:58:14.061047-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json index a7f0ce37..f7341a7b 100644 --- a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-06T20:57:45.68913-04:00", + "time": "2024-06-20T17:10:39.294578-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-06T20:57:45.689454-04:00", + "time": "2024-06-20T17:10:39.294835-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -40,16 +40,16 @@ "usage": {} }, { - "time": "2024-06-06T20:57:45.875198-04:00", + "time": "2024-06-20T17:10:39.501107-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -72,14 +72,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721867", + "chatCompletionId": "1718917841", "usage": {}, "chatRequest": { "model": "mistral-large-2402", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -104,16 +104,16 @@ } }, { - "time": "2024-06-06T20:57:45.875466-04:00", + "time": "2024-06-20T17:10:39.501246-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -136,21 +136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721867", + "chatCompletionId": "1718917841", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:57:46.081212-04:00", + "time": "2024-06-20T17:10:40.154068-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -173,20 +173,20 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721867", + "chatCompletionId": "1718917841", "usage": {} }, { - "time": "2024-06-06T20:57:46.831234-04:00", + "time": "2024-06-20T17:10:40.786117-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -209,21 +209,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721867", + "chatCompletionId": "1718917841", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-06T20:57:46.832764-04:00", + "time": "2024-06-20T17:10:40.786643-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -246,11 +246,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721867", + "chatCompletionId": "1718917841", "usage": { - "promptTokens": 86, + "promptTokens": 85, "completionTokens": 23, - "totalTokens": 109 + "totalTokens": 108 }, "chatResponse": { "role": "assistant", @@ -258,32 +258,32 @@ { "toolCall": { "index": 0, - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "function": { "name": "bob", - "arguments": "{\"question\": \"How are you doing?\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } } ], "usage": { - "promptTokens": 86, + "promptTokens": 85, "completionTokens": 23, - "totalTokens": 109 + "totalTokens": 108 } } }, { - "time": "2024-06-06T20:57:46.833114-04:00", + "time": "2024-06-20T17:10:40.78704-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -306,18 +306,18 @@ "inputContext": null }, "toolSubCalls": { - "7sPqe5ERc": { + "WWpUyFBHH": { "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\": \"How are you doing?\"}" + "input": "{\"question\": \"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-06T20:57:46.833185-04:00", + "time": "2024-06-20T17:10:40.787133-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -332,7 +332,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -346,16 +346,16 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callStart", "usage": {}, - "content": "{\"question\": \"How are you doing?\"}" + "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-06T20:57:47.015577-04:00", + "time": "2024-06-20T17:10:40.94973-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -370,7 +370,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -384,30 +384,30 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callChat", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, "chatRequest": { "model": "mistral-large-2402", "messages": [ { "role": "system", - "content": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" }, { "role": "user", - "content": "{\"question\": \"How are you doing?\"}" + "content": "{\"question\": \"how are you doing\"}" } ], "temperature": 0 } }, { - "time": "2024-06-06T20:57:47.015912-04:00", + "time": "2024-06-20T17:10:40.950046-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -422,7 +422,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -436,17 +436,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:57:47.354264-04:00", + "time": "2024-06-20T17:10:41.218814-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -461,7 +461,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -475,16 +475,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", - "usage": {} + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks" }, { - "time": "2024-06-06T20:57:47.403644-04:00", + "time": "2024-06-20T17:10:41.218971-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -499,7 +500,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -513,17 +514,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, "content": "Thanks" }, { - "time": "2024-06-06T20:57:47.429265-04:00", + "time": "2024-06-20T17:10:41.242129-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -538,7 +539,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -552,17 +553,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, "content": "Thanks for" }, { - "time": "2024-06-06T20:57:47.444354-04:00", + "time": "2024-06-20T17:10:41.278815-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -577,7 +578,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -591,17 +592,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, "content": "Thanks for asking" }, { - "time": "2024-06-06T20:57:47.470971-04:00", + "time": "2024-06-20T17:10:41.298557-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -616,7 +617,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -630,56 +631,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, "content": "Thanks for asking \"" }, { - "time": "2024-06-06T20:57:47.492704-04:00", - "callContext": { - "id": "7sPqe5ERc", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1717721866" - }, - "type": "callProgress", - "chatCompletionId": "1717721868", - "usage": {}, - "content": "Thanks for asking \"How" - }, - { - "time": "2024-06-06T20:57:47.508504-04:00", + "time": "2024-06-20T17:10:41.329684-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -694,7 +656,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -708,17 +670,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are" + "content": "Thanks for asking \"how" }, { - "time": "2024-06-06T20:57:47.529197-04:00", + "time": "2024-06-20T17:10:41.36213-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -733,7 +695,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -747,17 +709,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you" + "content": "Thanks for asking \"how are" }, { - "time": "2024-06-06T20:57:47.547729-04:00", + "time": "2024-06-20T17:10:41.404159-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -772,7 +734,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -786,17 +748,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing" + "content": "Thanks for asking \"how are you" }, { - "time": "2024-06-06T20:57:47.567629-04:00", + "time": "2024-06-20T17:10:41.443596-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -811,7 +773,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -825,17 +787,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?" + "content": "Thanks for asking \"how are you doing" }, { - "time": "2024-06-06T20:57:47.58774-04:00", + "time": "2024-06-20T17:10:41.46035-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -850,7 +812,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -864,17 +826,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\"," + "content": "Thanks for asking \"how are you doing\"," }, { - "time": "2024-06-06T20:57:47.607371-04:00", + "time": "2024-06-20T17:10:41.479186-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -889,7 +851,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -903,17 +865,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I" + "content": "Thanks for asking \"how are you doing\", I" }, { - "time": "2024-06-06T20:57:47.627325-04:00", + "time": "2024-06-20T17:10:41.508921-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -928,7 +890,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -942,17 +904,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'" + "content": "Thanks for asking \"how are you doing\", I'" }, { - "time": "2024-06-06T20:57:47.647745-04:00", + "time": "2024-06-20T17:10:41.538159-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -967,7 +929,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -981,17 +943,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm" + "content": "Thanks for asking \"how are you doing\", I'm" }, { - "time": "2024-06-06T20:57:47.666556-04:00", + "time": "2024-06-20T17:10:41.578073-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1006,7 +968,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1020,17 +982,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing" + "content": "Thanks for asking \"how are you doing\", I'm doing" }, { - "time": "2024-06-06T20:57:47.686828-04:00", + "time": "2024-06-20T17:10:41.59766-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1045,7 +1007,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1059,17 +1021,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great" + "content": "Thanks for asking \"how are you doing\", I'm doing great" }, { - "time": "2024-06-06T20:57:47.706183-04:00", + "time": "2024-06-20T17:10:41.711868-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1084,7 +1046,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1098,17 +1060,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" }, { - "time": "2024-06-06T20:57:47.726267-04:00", + "time": "2024-06-20T17:10:41.712039-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1123,7 +1085,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1137,17 +1099,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:57:47.746525-04:00", + "time": "2024-06-20T17:10:41.712067-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1162,7 +1124,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1176,17 +1138,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:57:47.765116-04:00", + "time": "2024-06-20T17:10:41.718662-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1201,7 +1163,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1215,17 +1177,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:57:47.786832-04:00", + "time": "2024-06-20T17:10:41.748288-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1240,7 +1202,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1254,17 +1216,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:57:47.81109-04:00", + "time": "2024-06-20T17:10:41.78608-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1279,7 +1241,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1293,17 +1255,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callProgress", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:57:47.812892-04:00", + "time": "2024-06-20T17:10:41.786165-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1318,7 +1280,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1332,33 +1294,33 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callChat", - "chatCompletionId": "1717721868", + "chatCompletionId": "1718917842", "usage": { "promptTokens": 41, - "completionTokens": 20, - "totalTokens": 61 + "completionTokens": 19, + "totalTokens": 60 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" } ], "usage": { "promptTokens": 41, - "completionTokens": 20, - "totalTokens": 61 + "completionTokens": 19, + "totalTokens": 60 } } }, { - "time": "2024-06-06T20:57:47.813082-04:00", + "time": "2024-06-20T17:10:41.7862-04:00", "callContext": { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -1373,7 +1335,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -1387,23 +1349,23 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721866" + "parentID": "1718917840" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:57:47.813184-04:00", + "time": "2024-06-20T17:10:41.78624-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1430,16 +1392,16 @@ "usage": {} }, { - "time": "2024-06-06T20:57:47.981851-04:00", + "time": "2024-06-20T17:10:41.957577-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1462,34 +1424,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, "chatRequest": { "model": "mistral-large-2402", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "7sPqe5ERc", + "id": "WWpUyFBHH", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\": \"How are you doing?\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking \"How are you doing?\", I'm doing great fellow friendly AI tool!", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", "name": "bob", - "tool_call_id": "7sPqe5ERc" + "tool_call_id": "WWpUyFBHH" } ], "temperature": 0, @@ -1514,16 +1476,16 @@ } }, { - "time": "2024-06-06T20:57:47.982156-04:00", + "time": "2024-06-20T17:10:41.957749-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1546,21 +1508,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:57:48.11789-04:00", + "time": "2024-06-20T17:10:42.455931-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1583,57 +1545,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", - "usage": {} - }, - { - "time": "2024-06-06T20:57:48.189708-04:00", - "callContext": { - "id": "1717721866", - "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, "content": "Bob" }, { - "time": "2024-06-06T20:57:48.205425-04:00", + "time": "2024-06-20T17:10:42.45599-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1656,21 +1582,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said" + "content": "Bob replied" }, { - "time": "2024-06-06T20:57:48.227965-04:00", + "time": "2024-06-20T17:10:42.456033-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1693,21 +1619,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said," + "content": "Bob replied" }, { - "time": "2024-06-06T20:57:48.245576-04:00", + "time": "2024-06-20T17:10:42.475053-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1730,21 +1656,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"" + "content": "Bob replied," }, { - "time": "2024-06-06T20:57:48.266371-04:00", + "time": "2024-06-20T17:10:42.534667-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1767,21 +1693,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks" + "content": "Bob replied, \"" }, { - "time": "2024-06-06T20:57:48.285452-04:00", + "time": "2024-06-20T17:10:42.594649-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1804,21 +1730,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for" + "content": "Bob replied, \"Thanks" }, { - "time": "2024-06-06T20:57:48.306735-04:00", + "time": "2024-06-20T17:10:42.653279-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1841,21 +1767,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking" + "content": "Bob replied, \"Thanks for" }, { - "time": "2024-06-06T20:57:48.330511-04:00", + "time": "2024-06-20T17:10:42.760299-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1878,21 +1804,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking '" + "content": "Bob replied, \"Thanks for asking" }, { - "time": "2024-06-06T20:57:48.347556-04:00", + "time": "2024-06-20T17:10:42.774637-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1915,21 +1841,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How" + "content": "Bob replied, \"Thanks for asking '" }, { - "time": "2024-06-06T20:57:48.367554-04:00", + "time": "2024-06-20T17:10:42.835456-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1952,21 +1878,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are" + "content": "Bob replied, \"Thanks for asking 'how" }, { - "time": "2024-06-06T20:57:48.387523-04:00", + "time": "2024-06-20T17:10:42.889942-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -1989,21 +1915,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you" + "content": "Bob replied, \"Thanks for asking 'how are" }, { - "time": "2024-06-06T20:57:48.408257-04:00", + "time": "2024-06-20T17:10:42.951997-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2026,21 +1952,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing" + "content": "Bob replied, \"Thanks for asking 'how are you" }, { - "time": "2024-06-06T20:57:48.428538-04:00", + "time": "2024-06-20T17:10:43.009024-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2063,21 +1989,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?" + "content": "Bob replied, \"Thanks for asking 'how are you doing" }, { - "time": "2024-06-06T20:57:48.448774-04:00", + "time": "2024-06-20T17:10:43.072963-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2100,21 +2026,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?'," + "content": "Bob replied, \"Thanks for asking 'how are you doing'," }, { - "time": "2024-06-06T20:57:48.468948-04:00", + "time": "2024-06-20T17:10:43.129687-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2137,21 +2063,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I" }, { - "time": "2024-06-06T20:57:48.488223-04:00", + "time": "2024-06-20T17:10:43.168387-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2174,21 +2100,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'" }, { - "time": "2024-06-06T20:57:48.508672-04:00", + "time": "2024-06-20T17:10:43.286476-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2211,21 +2137,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm" }, { - "time": "2024-06-06T20:57:48.529176-04:00", + "time": "2024-06-20T17:10:43.286589-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2248,21 +2174,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great" }, { - "time": "2024-06-06T20:57:48.550465-04:00", + "time": "2024-06-20T17:10:43.286632-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2285,21 +2211,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great" }, { - "time": "2024-06-06T20:57:48.572031-04:00", + "time": "2024-06-20T17:10:43.323232-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2322,21 +2248,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow" }, { - "time": "2024-06-06T20:57:48.590327-04:00", + "time": "2024-06-20T17:10:43.3614-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2359,21 +2285,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:57:48.611919-04:00", + "time": "2024-06-20T17:10:43.402971-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2396,21 +2322,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:57:48.630583-04:00", + "time": "2024-06-20T17:10:43.442811-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2433,21 +2359,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:57:48.653492-04:00", + "time": "2024-06-20T17:10:43.477932-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2470,21 +2396,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-06T20:57:48.665778-04:00", + "time": "2024-06-20T17:10:43.524916-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2507,21 +2433,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-06T20:57:48.667516-04:00", + "time": "2024-06-20T17:10:43.525171-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2544,37 +2470,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721869", + "chatCompletionId": "1718917843", "usage": { - "promptTokens": 145, - "completionTokens": 24, - "totalTokens": 169 + "promptTokens": 143, + "completionTokens": 23, + "totalTokens": 166 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "text": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" } ], "usage": { - "promptTokens": 145, - "completionTokens": 24, - "totalTokens": 169 + "promptTokens": 143, + "completionTokens": 23, + "totalTokens": 166 } } }, { - "time": "2024-06-06T20:57:48.667593-04:00", + "time": "2024-06-20T17:10:43.525284-04:00", "callContext": { - "id": "1717721866", + "id": "1718917840", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -2598,10 +2524,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-06T20:57:48.667619-04:00", + "time": "2024-06-20T17:10:43.525327-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/test.gpt b/pkg/tests/smoke/testdata/Bob/test.gpt index 2391b615..fe8ffb62 100644 --- a/pkg/tests/smoke/testdata/Bob/test.gpt +++ b/pkg/tests/smoke/testdata/Bob/test.gpt @@ -1,10 +1,10 @@ tools: bob -Ask Bob how he is doing and let me know exactly what he said. +Ask Bob "how are you doing" and repeat his reply exactly. --- name: bob description: I'm Bob, a friendly guy. args: question: The question to ask Bob. -When asked how I am doing, respond with "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" +When asked how I am doing, respond with exactly "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json index 326aef09..7b0477fc 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-18T11:31:43.810792-04:00", + "time": "2024-06-20T17:10:39.522006-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-18T11:31:43.810985-04:00", + "time": "2024-06-20T17:10:39.522174-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -40,14 +40,14 @@ "usage": {} }, { - "time": "2024-06-18T11:31:44.177028-04:00", + "time": "2024-06-20T17:10:39.975716-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-18T11:31:44.177099-04:00", + "time": "2024-06-20T17:10:39.975965-04:00", "callContext": { - "id": "1718724705", + "id": "1718917841", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -91,9 +91,9 @@ "usage": {} }, { - "time": "2024-06-18T11:31:45.190929-04:00", + "time": "2024-06-20T17:10:40.990696-04:00", "callContext": { - "id": "1718724705", + "id": "1718917841", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -135,24 +135,24 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:11109" + "content": "http://127.0.0.1:11124" }, { - "time": "2024-06-18T11:31:45.19104-04:00", + "time": "2024-06-20T17:10:40.990787-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-06-18T11:31:45.191084-04:00", + "time": "2024-06-20T17:10:40.990853-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -175,14 +175,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724706", + "chatCompletionId": "1718917842", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -207,16 +207,16 @@ } }, { - "time": "2024-06-18T11:31:45.19133-04:00", + "time": "2024-06-20T17:10:40.991247-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -239,21 +239,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724706", + "chatCompletionId": "1718917842", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-18T11:31:54.917676-04:00", + "time": "2024-06-20T17:10:49.116225-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -276,21 +276,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724706", + "chatCompletionId": "1718917842", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how he is doing\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-18T11:31:54.917887-04:00", + "time": "2024-06-20T17:10:49.116466-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -313,7 +313,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724706", + "chatCompletionId": "1718917842", "usage": {}, "chatResponse": { "role": "assistant", @@ -321,10 +321,10 @@ { "toolCall": { "index": 0, - "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", "function": { "name": "bob", - "arguments": "{\"question\": \"how he is doing\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } } @@ -333,16 +333,16 @@ } }, { - "time": "2024-06-18T11:31:54.917988-04:00", + "time": "2024-06-20T17:10:49.116557-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -365,18 +365,18 @@ "inputContext": null }, "toolSubCalls": { - "toolu_01F3QeAp35HdxoBSEed1gaUJ": { + "toolu_01BzEjSj1HhTi52c1N9mS1jK": { "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\": \"how he is doing\"}" + "input": "{\"question\": \"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-18T11:31:54.918017-04:00", + "time": "2024-06-20T17:10:49.116583-04:00", "callContext": { - "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -405,17 +405,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724704", + "parentID": "1718917840", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", "usage": {}, - "content": "{\"question\": \"how he is doing\"}" + "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-18T11:31:54.91845-04:00", + "time": "2024-06-20T17:10:49.116924-04:00", "callContext": { - "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -444,11 +444,11 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724704", + "parentID": "1718917840", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1718724707", + "chatCompletionId": "1718917843", "usage": {}, "chatRequest": { "model": "", @@ -456,9 +456,9 @@ } }, { - "time": "2024-06-18T11:31:54.922608-04:00", + "time": "2024-06-20T17:10:49.119266-04:00", "callContext": { - "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -487,20 +487,60 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724704", + "parentID": "1718917840", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:10:49.119489-04:00", + "callContext": { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1718724707", + "chatCompletionId": "1718917843", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-06-18T11:31:54.922696-04:00", + "time": "2024-06-20T17:10:49.119539-04:00", "callContext": { - "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -529,24 +569,24 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1718724704", + "parentID": "1718917840", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\n" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-18T11:31:54.922726-04:00", + "time": "2024-06-20T17:10:49.119572-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -573,16 +613,16 @@ "usage": {} }, { - "time": "2024-06-18T11:31:55.096576-04:00", + "time": "2024-06-20T17:10:49.298305-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -605,34 +645,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724708", + "chatCompletionId": "1718917844", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "toolu_01F3QeAp35HdxoBSEed1gaUJ", + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\": \"how he is doing\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\n", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "toolu_01F3QeAp35HdxoBSEed1gaUJ" + "tool_call_id": "toolu_01BzEjSj1HhTi52c1N9mS1jK" } ], "temperature": 0, @@ -657,16 +697,16 @@ } }, { - "time": "2024-06-18T11:31:55.097061-04:00", + "time": "2024-06-20T17:10:49.298759-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -689,21 +729,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724708", + "chatCompletionId": "1718917844", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-18T11:31:58.228157-04:00", + "time": "2024-06-20T17:10:51.580939-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -726,21 +766,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718724708", + "chatCompletionId": "1718917844", "usage": {}, - "content": "Bob said exactly: \"Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-18T11:31:58.228613-04:00", + "time": "2024-06-20T17:10:51.581258-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -763,29 +803,29 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718724708", + "chatCompletionId": "1718917844", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said exactly: \"Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\"" + "text": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" } ], "usage": {} } }, { - "time": "2024-06-18T11:31:58.228674-04:00", + "time": "2024-06-20T17:10:51.581281-04:00", "callContext": { - "id": "1718724704", + "id": "1718917840", "tool": { "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -809,10 +849,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said exactly: \"Thanks for asking how he is doing, I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-18T11:31:58.228686-04:00", + "time": "2024-06-20T17:10:51.581291-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json index f81bd202..5b2409f6 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-06T20:22:33.171056-04:00", + "time": "2024-06-20T17:08:30.778302-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-06T20:22:33.171333-04:00", + "time": "2024-06-20T17:08:30.778582-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -40,16 +40,16 @@ "usage": {} }, { - "time": "2024-06-06T20:22:33.371153-04:00", + "time": "2024-06-20T17:08:30.981266-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -72,14 +72,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, "chatRequest": { "model": "gpt-4-turbo-2024-04-09", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -104,16 +104,16 @@ } }, { - "time": "2024-06-06T20:22:33.371528-04:00", + "time": "2024-06-20T17:08:30.981391-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -136,21 +136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:22:34.086505-04:00", + "time": "2024-06-20T17:08:32.232987-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -173,21 +173,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question" + "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:22:34.086808-04:00", + "time": "2024-06-20T17:08:32.233265-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -210,21 +210,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question" + "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:22:34.08687-04:00", + "time": "2024-06-20T17:08:32.344744-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -247,58 +247,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question" - }, - { - "time": "2024-06-06T20:22:34.086895-04:00", - "callContext": { - "id": "1717719754", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:22:34.109771-04:00", + "time": "2024-06-20T17:08:32.344882-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -321,21 +284,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:22:34.109961-04:00", + "time": "2024-06-20T17:08:32.361676-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -358,21 +321,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" }, { - "time": "2024-06-06T20:22:34.154694-04:00", + "time": "2024-06-20T17:08:32.361793-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -395,21 +358,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" }, { - "time": "2024-06-06T20:22:34.154833-04:00", + "time": "2024-06-20T17:08:32.440498-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -432,21 +395,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" }, { - "time": "2024-06-06T20:22:34.181671-04:00", + "time": "2024-06-20T17:08:32.440743-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -469,21 +432,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:34.18183-04:00", + "time": "2024-06-20T17:08:32.440798-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -506,21 +469,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:34.181879-04:00", + "time": "2024-06-20T17:08:32.440836-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -543,21 +506,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:34.181915-04:00", + "time": "2024-06-20T17:08:32.440873-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -580,21 +543,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:34.182218-04:00", + "time": "2024-06-20T17:08:32.441115-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -617,11 +580,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719755", + "chatCompletionId": "1718917712", "usage": { - "promptTokens": 143, - "completionTokens": 18, - "totalTokens": 161 + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 }, "chatResponse": { "role": "assistant", @@ -629,32 +592,32 @@ { "toolCall": { "index": 0, - "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } } ], "usage": { - "promptTokens": 143, - "completionTokens": 18, - "totalTokens": 161 + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 } } }, { - "time": "2024-06-06T20:22:34.182557-04:00", + "time": "2024-06-20T17:08:32.441462-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -677,18 +640,18 @@ "inputContext": null }, "toolSubCalls": { - "call_LJKF6bO1dztkg01aWksN4RPJ": { + "call_slFgd2P2lMxXQoyrPbm2YsrQ": { "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\":\"How are you doing?\"}" + "input": "{\"question\":\"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-06T20:22:34.182664-04:00", + "time": "2024-06-20T17:08:32.441542-04:00", "callContext": { - "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -717,17 +680,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719754", + "parentID": "1718917711", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", "usage": {}, - "content": "{\"question\":\"How are you doing?\"}" + "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:22:34.184191-04:00", + "time": "2024-06-20T17:08:32.442736-04:00", "callContext": { - "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -756,11 +719,11 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719754", + "parentID": "1718917711", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1717719756", + "chatCompletionId": "1718917713", "usage": {}, "chatRequest": { "model": "", @@ -768,9 +731,49 @@ } }, { - "time": "2024-06-06T20:22:34.192956-04:00", + "time": "2024-06-20T17:08:32.448288-04:00", + "callContext": { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917711", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917713", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:08:32.448728-04:00", "callContext": { - "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -799,20 +802,20 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719754", + "parentID": "1718917711", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1717719756", + "chatCompletionId": "1718917713", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-06-06T20:22:34.193263-04:00", + "time": "2024-06-20T17:08:32.448906-04:00", "callContext": { - "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -841,24 +844,24 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719754", + "parentID": "1718917711", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-06T20:22:34.193374-04:00", + "time": "2024-06-20T17:08:32.448977-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -885,16 +888,16 @@ "usage": {} }, { - "time": "2024-06-06T20:22:34.337399-04:00", + "time": "2024-06-20T17:08:32.624086-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -917,34 +920,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, "chatRequest": { "model": "gpt-4-turbo-2024-04-09", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "call_LJKF6bO1dztkg01aWksN4RPJ", + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "call_LJKF6bO1dztkg01aWksN4RPJ" + "tool_call_id": "call_slFgd2P2lMxXQoyrPbm2YsrQ" } ], "temperature": 0, @@ -969,16 +972,16 @@ } }, { - "time": "2024-06-06T20:22:34.337825-04:00", + "time": "2024-06-20T17:08:32.624367-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1001,169 +1004,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:22:35.110166-04:00", - "callContext": { - "id": "1717719754", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719757", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:22:35.110412-04:00", - "callContext": { - "id": "1717719754", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719757", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:22:35.147735-04:00", - "callContext": { - "id": "1717719754", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719757", - "usage": {}, - "content": "Bob said," - }, - { - "time": "2024-06-06T20:22:35.147815-04:00", - "callContext": { - "id": "1717719754", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719757", - "usage": {}, - "content": "Bob said," - }, - { - "time": "2024-06-06T20:22:35.245256-04:00", + "time": "2024-06-20T17:08:33.020025-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1186,21 +1041,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"" + "content": "I" }, { - "time": "2024-06-06T20:22:35.245466-04:00", + "time": "2024-06-20T17:08:33.020187-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1223,21 +1078,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I" + "content": "I" }, { - "time": "2024-06-06T20:22:35.324279-04:00", + "time": "2024-06-20T17:08:33.09047-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1260,21 +1115,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm" + "content": "I'm doing" }, { - "time": "2024-06-06T20:22:35.324425-04:00", + "time": "2024-06-20T17:08:33.090722-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1297,21 +1152,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing" + "content": "I'm doing" }, { - "time": "2024-06-06T20:22:35.404608-04:00", + "time": "2024-06-20T17:08:33.150983-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1334,21 +1189,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow" + "content": "I'm doing great fellow" }, { - "time": "2024-06-06T20:22:35.404848-04:00", + "time": "2024-06-20T17:08:33.151128-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1371,21 +1226,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly" + "content": "I'm doing great fellow" }, { - "time": "2024-06-06T20:22:35.404887-04:00", + "time": "2024-06-20T17:08:33.26424-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1408,21 +1263,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly" + "content": "I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:22:35.557862-04:00", + "time": "2024-06-20T17:08:33.264352-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1445,21 +1300,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly AI tool" + "content": "I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:22:35.5581-04:00", + "time": "2024-06-20T17:08:33.264393-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1482,21 +1337,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly AI tool" + "content": "I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:22:35.558233-04:00", + "time": "2024-06-20T17:08:33.264427-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1519,21 +1374,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:35.558308-04:00", + "time": "2024-06-20T17:08:33.264492-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1556,21 +1411,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:35.558348-04:00", + "time": "2024-06-20T17:08:33.264575-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1593,21 +1448,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:35.559946-04:00", + "time": "2024-06-20T17:08:33.264897-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1630,37 +1485,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719757", + "chatCompletionId": "1718917714", "usage": { - "promptTokens": 185, - "completionTokens": 14, - "totalTokens": 199 + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + "text": "I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 185, - "completionTokens": 14, - "totalTokens": 199 + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 } } }, { - "time": "2024-06-06T20:22:35.56022-04:00", + "time": "2024-06-20T17:08:33.264985-04:00", "callContext": { - "id": "1717719754", + "id": "1718917711", "tool": { "modelName": "gpt-4-turbo-2024-04-09", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1684,10 +1539,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said, \"I'm doing great fellow friendly AI tool!\"" + "content": "I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:22:35.560294-04:00", + "time": "2024-06-20T17:08:33.265021-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json index 3ce00112..f61edd7d 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-06T20:21:49.457487-04:00", + "time": "2024-06-20T16:58:14.093283-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-06T20:21:49.457771-04:00", + "time": "2024-06-20T16:58:14.093568-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -40,16 +40,16 @@ "usage": {} }, { - "time": "2024-06-06T20:21:49.67064-04:00", + "time": "2024-06-20T16:58:14.31069-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -72,14 +72,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, "chatRequest": { "model": "gpt-4o-2024-05-13", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -104,16 +104,16 @@ } }, { - "time": "2024-06-06T20:21:49.670852-04:00", + "time": "2024-06-20T16:58:14.311071-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -136,21 +136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:21:50.142412-04:00", + "time": "2024-06-20T16:58:14.807492-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -173,21 +173,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:21:50.142691-04:00", + "time": "2024-06-20T16:58:14.807779-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -210,21 +210,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"" }, { - "time": "2024-06-06T20:21:50.164329-04:00", + "time": "2024-06-20T16:58:14.832551-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -247,21 +247,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:21:50.164636-04:00", + "time": "2024-06-20T16:58:14.832684-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -284,21 +284,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" }, { - "time": "2024-06-06T20:21:50.182304-04:00", + "time": "2024-06-20T16:58:14.865368-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -321,21 +321,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" }, { - "time": "2024-06-06T20:21:50.182411-04:00", + "time": "2024-06-20T16:58:14.865484-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -358,21 +358,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" }, { - "time": "2024-06-06T20:21:50.210798-04:00", + "time": "2024-06-20T16:58:14.899511-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -395,21 +395,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" }, { - "time": "2024-06-06T20:21:50.210958-04:00", + "time": "2024-06-20T16:58:14.899668-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -432,21 +432,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" }, { - "time": "2024-06-06T20:21:50.220648-04:00", + "time": "2024-06-20T16:58:14.900883-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -469,21 +469,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:50.220734-04:00", + "time": "2024-06-20T16:58:14.900938-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -506,21 +506,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:50.220753-04:00", + "time": "2024-06-20T16:58:14.900969-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -543,58 +543,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:50.220768-04:00", + "time": "2024-06-20T16:58:14.901222-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719711", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"How are you doing?\"}" - }, - { - "time": "2024-06-06T20:21:50.221021-04:00", - "callContext": { - "id": "1717719710", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -617,11 +580,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719711", + "chatCompletionId": "1718917096", "usage": { - "promptTokens": 139, - "completionTokens": 18, - "totalTokens": 157 + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 }, "chatResponse": { "role": "assistant", @@ -629,32 +592,32 @@ { "toolCall": { "index": 0, - "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } } ], "usage": { - "promptTokens": 139, - "completionTokens": 18, - "totalTokens": 157 + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 } } }, { - "time": "2024-06-06T20:21:50.221288-04:00", + "time": "2024-06-20T16:58:14.901521-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -677,18 +640,18 @@ "inputContext": null }, "toolSubCalls": { - "call_AZtZBeP4Ofv9CbW8Tiw066Cr": { + "call_PGLxooO6eBPt3eSEBCMkuWYN": { "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\":\"How are you doing?\"}" + "input": "{\"question\":\"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-06T20:21:50.221358-04:00", + "time": "2024-06-20T16:58:14.901599-04:00", "callContext": { - "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -717,17 +680,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719710", + "parentID": "1718917095", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", "usage": {}, - "content": "{\"question\":\"How are you doing?\"}" + "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-06-06T20:21:50.22312-04:00", + "time": "2024-06-20T16:58:14.90268-04:00", "callContext": { - "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -756,11 +719,11 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719710", + "parentID": "1718917095", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1717719712", + "chatCompletionId": "1718917097", "usage": {}, "chatRequest": { "model": "", @@ -768,9 +731,49 @@ } }, { - "time": "2024-06-06T20:21:50.231558-04:00", + "time": "2024-06-20T16:58:14.908538-04:00", + "callContext": { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917095", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917097", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T16:58:14.908991-04:00", "callContext": { - "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -799,20 +802,20 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719710", + "parentID": "1718917095", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1717719712", + "chatCompletionId": "1718917097", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-06-06T20:21:50.232297-04:00", + "time": "2024-06-20T16:58:14.909126-04:00", "callContext": { - "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -841,24 +844,24 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717719710", + "parentID": "1718917095", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-06T20:21:50.232391-04:00", + "time": "2024-06-20T16:58:14.909293-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -885,16 +888,16 @@ "usage": {} }, { - "time": "2024-06-06T20:21:50.398083-04:00", + "time": "2024-06-20T16:58:15.10962-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -917,34 +920,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, "chatRequest": { "model": "gpt-4o-2024-05-13", "messages": [ { "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob how he is doing and let me know exactly what he said." + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr", + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\":\"How are you doing?\"}" + "arguments": "{\"question\":\"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "call_AZtZBeP4Ofv9CbW8Tiw066Cr" + "tool_call_id": "call_PGLxooO6eBPt3eSEBCMkuWYN" } ], "temperature": 0, @@ -969,16 +972,16 @@ } }, { - "time": "2024-06-06T20:21:50.398429-04:00", + "time": "2024-06-20T16:58:15.110087-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1001,169 +1004,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:21:50.873677-04:00", - "callContext": { - "id": "1717719710", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719713", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:21:50.873906-04:00", - "callContext": { - "id": "1717719710", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719713", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-06-06T20:21:50.875164-04:00", - "callContext": { - "id": "1717719710", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719713", - "usage": {}, - "content": "Bob said:" - }, - { - "time": "2024-06-06T20:21:50.875259-04:00", - "callContext": { - "id": "1717719710", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717719713", - "usage": {}, - "content": "Bob said: \"" - }, - { - "time": "2024-06-06T20:21:50.875299-04:00", + "time": "2024-06-20T16:58:15.629792-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1186,21 +1041,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"" + "content": "Thanks" }, { - "time": "2024-06-06T20:21:50.875379-04:00", + "time": "2024-06-20T16:58:15.629968-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1223,21 +1078,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for" + "content": "Thanks" }, { - "time": "2024-06-06T20:21:50.875553-04:00", + "time": "2024-06-20T16:58:15.749206-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1260,21 +1115,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking" + "content": "Thanks for" }, { - "time": "2024-06-06T20:21:50.875607-04:00", + "time": "2024-06-20T16:58:15.749324-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1297,21 +1152,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking" + "content": "Thanks for asking" }, { - "time": "2024-06-06T20:21:50.945695-04:00", + "time": "2024-06-20T16:58:15.780062-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1334,21 +1189,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are" + "content": "Thanks for asking how are" }, { - "time": "2024-06-06T20:21:50.945912-04:00", + "time": "2024-06-20T16:58:15.780171-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1371,21 +1226,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are" + "content": "Thanks for asking how are you" }, { - "time": "2024-06-06T20:21:50.973355-04:00", + "time": "2024-06-20T16:58:15.780293-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1408,21 +1263,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?," + "content": "Thanks for asking how are you doing" }, { - "time": "2024-06-06T20:21:50.973495-04:00", + "time": "2024-06-20T16:58:15.780335-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1445,21 +1300,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?," + "content": "Thanks for asking how are you doing" }, { - "time": "2024-06-06T20:21:50.973577-04:00", + "time": "2024-06-20T16:58:15.780406-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1482,21 +1337,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm" + "content": "Thanks for asking how are you doing, I'm" }, { - "time": "2024-06-06T20:21:50.973604-04:00", + "time": "2024-06-20T16:58:15.780445-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1519,21 +1374,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm" + "content": "Thanks for asking how are you doing, I'm" }, { - "time": "2024-06-06T20:21:50.973625-04:00", + "time": "2024-06-20T16:58:15.780477-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1556,21 +1411,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing" + "content": "Thanks for asking how are you doing, I'm doing" }, { - "time": "2024-06-06T20:21:50.973646-04:00", + "time": "2024-06-20T16:58:15.780511-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1593,21 +1448,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great" + "content": "Thanks for asking how are you doing, I'm doing great" }, { - "time": "2024-06-06T20:21:50.994229-04:00", + "time": "2024-06-20T16:58:15.816742-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1630,21 +1485,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:21:50.994426-04:00", + "time": "2024-06-20T16:58:15.816889-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1667,21 +1522,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:21:51.017952-04:00", + "time": "2024-06-20T16:58:15.859699-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1704,21 +1559,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:21:51.018129-04:00", + "time": "2024-06-20T16:58:15.859764-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1741,21 +1596,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:51.020818-04:00", + "time": "2024-06-20T16:58:15.859784-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1778,21 +1633,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:51.020913-04:00", + "time": "2024-06-20T16:58:15.859841-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1815,21 +1670,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:51.020955-04:00", + "time": "2024-06-20T16:58:15.85986-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1852,21 +1707,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:51.02285-04:00", + "time": "2024-06-20T16:58:15.860819-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1889,37 +1744,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717719713", + "chatCompletionId": "1718917098", "usage": { - "promptTokens": 180, - "completionTokens": 21, - "totalTokens": 201 + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 180, - "completionTokens": 21, - "totalTokens": 201 + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 } } }, { - "time": "2024-06-06T20:21:51.022991-04:00", + "time": "2024-06-20T16:58:15.860872-04:00", "callContext": { - "id": "1717719710", + "id": "1718917095", "tool": { "modelName": "gpt-4o-2024-05-13", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1943,10 +1798,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said: \"Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-06T20:21:51.023016-04:00", + "time": "2024-06-20T16:58:15.860919-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json index 7eb9a414..b3e54dc4 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-06-06T20:57:48.704491-04:00", + "time": "2024-06-20T17:10:43.55694-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-06T20:57:48.70479-04:00", + "time": "2024-06-20T17:10:43.557263-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -40,16 +40,16 @@ "usage": {} }, { - "time": "2024-06-06T20:57:48.929935-04:00", + "time": "2024-06-20T17:10:43.805494-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -72,14 +72,14 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721870", + "chatCompletionId": "1718917845", "usage": {}, "chatRequest": { "model": "mistral-large-2402", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." } ], "temperature": 0, @@ -104,16 +104,16 @@ } }, { - "time": "2024-06-06T20:57:48.930412-04:00", + "time": "2024-06-20T17:10:43.805682-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -136,21 +136,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721870", + "chatCompletionId": "1718917845", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:57:49.11122-04:00", + "time": "2024-06-20T17:10:44.078279-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -173,20 +173,20 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721870", + "chatCompletionId": "1718917845", "usage": {} }, { - "time": "2024-06-06T20:57:49.641536-04:00", + "time": "2024-06-20T17:10:44.913816-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -209,21 +209,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721870", + "chatCompletionId": "1718917845", "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"How are you doing?\"}" + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-06T20:57:49.642104-04:00", + "time": "2024-06-20T17:10:44.914845-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -246,11 +246,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721870", + "chatCompletionId": "1718917845", "usage": { - "promptTokens": 86, + "promptTokens": 85, "completionTokens": 23, - "totalTokens": 109 + "totalTokens": 108 }, "chatResponse": { "role": "assistant", @@ -258,32 +258,32 @@ { "toolCall": { "index": 0, - "id": "SJR7ytkCE", + "id": "UPthtEfla", "function": { "name": "bob", - "arguments": "{\"question\": \"How are you doing?\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } } ], "usage": { - "promptTokens": 86, + "promptTokens": 85, "completionTokens": 23, - "totalTokens": 109 + "totalTokens": 108 } } }, { - "time": "2024-06-06T20:57:49.642359-04:00", + "time": "2024-06-20T17:10:44.915086-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -306,18 +306,18 @@ "inputContext": null }, "toolSubCalls": { - "SJR7ytkCE": { + "UPthtEfla": { "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\": \"How are you doing?\"}" + "input": "{\"question\": \"how are you doing\"}" } }, "type": "callSubCalls", "usage": {} }, { - "time": "2024-06-06T20:57:49.64244-04:00", + "time": "2024-06-20T17:10:44.915147-04:00", "callContext": { - "id": "SJR7ytkCE", + "id": "UPthtEfla", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -346,17 +346,17 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721869", + "parentID": "1718917844", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", "usage": {}, - "content": "{\"question\": \"How are you doing?\"}" + "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-06T20:57:49.643547-04:00", + "time": "2024-06-20T17:10:44.916248-04:00", "callContext": { - "id": "SJR7ytkCE", + "id": "UPthtEfla", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -385,11 +385,11 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721869", + "parentID": "1718917844", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1717721871", + "chatCompletionId": "1718917846", "usage": {}, "chatRequest": { "model": "", @@ -397,9 +397,9 @@ } }, { - "time": "2024-06-06T20:57:49.651156-04:00", + "time": "2024-06-20T17:10:44.92245-04:00", "callContext": { - "id": "SJR7ytkCE", + "id": "UPthtEfla", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -428,20 +428,60 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721869", + "parentID": "1718917844", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917846", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:10:44.922905-04:00", + "callContext": { + "id": "UPthtEfla", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917844", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1717721871", + "chatCompletionId": "1718917846", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-06-06T20:57:49.651393-04:00", + "time": "2024-06-20T17:10:44.922998-04:00", "callContext": { - "id": "SJR7ytkCE", + "id": "UPthtEfla", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -470,24 +510,24 @@ }, "inputContext": null, "toolName": "bob", - "parentID": "1717721869", + "parentID": "1718917844", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-06T20:57:49.651461-04:00", + "time": "2024-06-20T17:10:44.92306-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -514,16 +554,16 @@ "usage": {} }, { - "time": "2024-06-06T20:57:49.851552-04:00", + "time": "2024-06-20T17:10:45.091313-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -546,34 +586,34 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, "chatRequest": { "model": "mistral-large-2402", "messages": [ { "role": "system", - "content": "Ask Bob how he is doing and let me know exactly what he said." + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." }, { "role": "assistant", "content": "", "tool_calls": [ { - "id": "SJR7ytkCE", + "id": "UPthtEfla", "type": "function", "function": { "name": "bob", - "arguments": "{\"question\": \"How are you doing?\"}" + "arguments": "{\"question\": \"how are you doing\"}" } } ] }, { "role": "tool", - "content": "Thanks for asking How are you doing?, I'm doing great fellow friendly AI tool!\n", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "SJR7ytkCE" + "tool_call_id": "UPthtEfla" } ], "temperature": 0, @@ -598,16 +638,16 @@ } }, { - "time": "2024-06-06T20:57:49.851831-04:00", + "time": "2024-06-20T17:10:45.091762-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -630,21 +670,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-06T20:57:50.017629-04:00", + "time": "2024-06-20T17:10:45.427766-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -667,94 +707,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", - "usage": {} - }, - { - "time": "2024-06-06T20:57:50.060377-04:00", - "callContext": { - "id": "1717721869", - "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, "content": "Bob" }, { - "time": "2024-06-06T20:57:50.084824-04:00", - "callContext": { - "id": "1717721869", - "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1717721872", - "usage": {}, - "content": "Bob said" - }, - { - "time": "2024-06-06T20:57:50.104197-04:00", + "time": "2024-06-20T17:10:45.427886-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -777,21 +744,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said," + "content": "Bob replied" }, { - "time": "2024-06-06T20:57:50.126794-04:00", + "time": "2024-06-20T17:10:45.427938-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -814,21 +781,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"" + "content": "Bob replied," }, { - "time": "2024-06-06T20:57:50.147974-04:00", + "time": "2024-06-20T17:10:45.427998-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -851,21 +818,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks" + "content": "Bob replied, \"" }, { - "time": "2024-06-06T20:57:50.17264-04:00", + "time": "2024-06-20T17:10:45.428017-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -888,21 +855,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for" + "content": "Bob replied, \"" }, { - "time": "2024-06-06T20:57:50.194235-04:00", + "time": "2024-06-20T17:10:45.428046-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -925,21 +892,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking" + "content": "Bob replied, \"Thanks" }, { - "time": "2024-06-06T20:57:50.218937-04:00", + "time": "2024-06-20T17:10:45.434307-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -962,21 +929,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking '" + "content": "Bob replied, \"Thanks for" }, { - "time": "2024-06-06T20:57:50.239766-04:00", + "time": "2024-06-20T17:10:45.462196-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -999,21 +966,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How" + "content": "Bob replied, \"Thanks for asking" }, { - "time": "2024-06-06T20:57:50.261815-04:00", + "time": "2024-06-20T17:10:45.496748-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1036,21 +1003,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are" + "content": "Bob replied, \"Thanks for asking how" }, { - "time": "2024-06-06T20:57:50.28776-04:00", + "time": "2024-06-20T17:10:45.516868-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1073,21 +1040,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you" + "content": "Bob replied, \"Thanks for asking how are" }, { - "time": "2024-06-06T20:57:50.307171-04:00", + "time": "2024-06-20T17:10:45.545808-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1110,21 +1077,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing" + "content": "Bob replied, \"Thanks for asking how are you" }, { - "time": "2024-06-06T20:57:50.330839-04:00", + "time": "2024-06-20T17:10:45.572091-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1147,21 +1114,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?" + "content": "Bob replied, \"Thanks for asking how are you doing" }, { - "time": "2024-06-06T20:57:50.352459-04:00", + "time": "2024-06-20T17:10:45.602668-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1184,21 +1151,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?'," + "content": "Bob replied, \"Thanks for asking how are you doing," }, { - "time": "2024-06-06T20:57:50.375291-04:00", + "time": "2024-06-20T17:10:45.627618-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1221,21 +1188,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I" + "content": "Bob replied, \"Thanks for asking how are you doing, I" }, { - "time": "2024-06-06T20:57:50.398407-04:00", + "time": "2024-06-20T17:10:45.659445-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1258,21 +1225,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'" + "content": "Bob replied, \"Thanks for asking how are you doing, I'" }, { - "time": "2024-06-06T20:57:50.421233-04:00", + "time": "2024-06-20T17:10:45.682109-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1295,21 +1262,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm" }, { - "time": "2024-06-06T20:57:50.443362-04:00", + "time": "2024-06-20T17:10:45.710817-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1332,21 +1299,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing" }, { - "time": "2024-06-06T20:57:50.466617-04:00", + "time": "2024-06-20T17:10:45.743363-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1369,21 +1336,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great" }, { - "time": "2024-06-06T20:57:50.488545-04:00", + "time": "2024-06-20T17:10:45.766321-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1406,21 +1373,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow" }, { - "time": "2024-06-06T20:57:50.514471-04:00", + "time": "2024-06-20T17:10:45.801585-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1443,21 +1410,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly" }, { - "time": "2024-06-06T20:57:50.533973-04:00", + "time": "2024-06-20T17:10:46.066262-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1480,21 +1447,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI" }, { - "time": "2024-06-06T20:57:50.55935-04:00", + "time": "2024-06-20T17:10:46.093598-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1517,21 +1484,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-06T20:57:50.579532-04:00", + "time": "2024-06-20T17:10:46.118737-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1554,21 +1521,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-06T20:57:50.603725-04:00", + "time": "2024-06-20T17:10:46.146712-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1591,21 +1558,21 @@ "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-06T20:57:50.604733-04:00", + "time": "2024-06-20T17:10:46.146789-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1628,37 +1595,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1717721872", + "chatCompletionId": "1718917847", "usage": { - "promptTokens": 145, - "completionTokens": 24, - "totalTokens": 169 + "promptTokens": 144, + "completionTokens": 22, + "totalTokens": 166 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "text": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" } ], "usage": { - "promptTokens": 145, - "completionTokens": 24, - "totalTokens": 169 + "promptTokens": 144, + "completionTokens": 22, + "totalTokens": 166 } } }, { - "time": "2024-06-06T20:57:50.604844-04:00", + "time": "2024-06-20T17:10:46.146827-04:00", "callContext": { - "id": "1717721869", + "id": "1718917844", "tool": { "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob how he is doing and let me know exactly what he said.", + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -1682,10 +1649,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said, \"Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!\"" + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-06T20:57:50.604875-04:00", + "time": "2024-06-20T17:10:46.146844-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/test.gpt b/pkg/tests/smoke/testdata/BobAsShell/test.gpt index ca726b09..f04920bf 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/test.gpt +++ b/pkg/tests/smoke/testdata/BobAsShell/test.gpt @@ -1,7 +1,7 @@ tools: bob -Ask Bob how he is doing and let me know exactly what he said. +Ask Bob "how are you doing" and repeat his reply exactly. --- name: bob From 02cbed0004f2072b17e79a672ac577b5417a72f3 Mon Sep 17 00:00:00 2001 From: Craig Jellick Date: Thu, 20 Jun 2024 12:09:40 -0700 Subject: [PATCH 10/22] docs: add FAQ about caching Signed-off-by: Craig Jellick --- docs/docs/09-faqs.md | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index a06e5818..c26418e9 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -1,11 +1,31 @@ # FAQs -#### I don't have Homebrew, how can I install GPTScript? +### I don't have Homebrew, how can I install GPTScript? On MacOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` On all supported systems, you download and install the archive for your platform and architecture from the [releases page](https://github.com/gptscript-ai/gptscript/releases). -#### Does GPTScript have an SDK or API I can program against? +### Does GPTScript have an SDK or API I can program against? Currently, there are three SDKs being maintained: [Python](https://github.com/gptscript-ai/py-gptscript), [Node](https://github.com/gptscript-ai/node-gptscript), and [Go](https://github.com/gptscript-ai/go-gptscript). They are currently under development and are being iterated on relatively rapidly. The READMEs in each repository contain the most up-to-date documentation for the functionality of each. + +### I see there's a --disable-cache flag. How does caching working in GPTScript? + +GPTScript leverages caching to speed up execution and reduce LLM costs. There are two areas cached by GPTScript: +- Git commit hash lookups for tools +- LLM responses + +Caching is enabled for both of these by default. It can be disabled via the `--disable-cache` flag. Below is an explanation of how these areas behave when caching is enabled and disabled. + +#### Git commit hash lookups for tools + +When a remote tool or context is included in your script (like so: `Tools: github.com/gptscript-ai/browser`) and then invoked during script execution, GPTScript will pull the Git repo for that tool and build it. The tool’s repo and build will be stored in your system’s cache directory (at [$XDG_CACHE_HOME](https://pkg.go.dev/os#UserCacheDir)/gptscript/repos). Subsequent invocations of the tool leverage that cache. When the cache is enabled, GPTScript will only check for a newer version of the tool once an hour; if an hour hasn’t passed since the last check, it will just use the one it has. If this is the first invocation and the tool doesn’t yet exist in the cache, it will be pulled and built as normal. + +When the cache is disabled, GPTScript will check that it has the latest version of the tool (meaning the latest git commit for the repo) on every single invocation of the tool. If GPTScript determines it already has the latest version, that build will be used as-is. In other words, disabling the cache DOES NOT force GPTScript to rebuild the tool, it only forces GPTScript to always check if it has the latest version. + +#### LLM responses + +With regards to LLM responses, when the cache is enabled GPTScript will cache the LLM’s response to a chat completion request. Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. + +It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. This means that every message between user and LLM affects the cache lookup. So, when using GPTScript in chat mode, it is very unlikely you’ll receive a cached LLM response. Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. \ No newline at end of file From 9aa0a8ab5eb809e2de759f265cd037da9889d0f5 Mon Sep 17 00:00:00 2001 From: Craig Jellick Date: Thu, 20 Jun 2024 12:55:06 -0700 Subject: [PATCH 11/22] docs: update generated cli docs Signed-off-by: Craig Jellick --- docs/docs/04-command-line-reference/gptscript.md | 2 +- docs/docs/04-command-line-reference/gptscript_eval.md | 2 +- docs/docs/04-command-line-reference/gptscript_fmt.md | 2 +- docs/docs/04-command-line-reference/gptscript_parse.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 8c5d1459..4da342fb 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -30,7 +30,7 @@ gptscript [flags] PROGRAM_FILE [INPUT...] --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) -h, --help help for gptscript - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 94710662..4c485c7c 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -38,7 +38,7 @@ gptscript eval [flags] --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index c4e37856..511132d9 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -32,7 +32,7 @@ gptscript fmt [flags] --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index d2322a48..3dde0073 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -32,7 +32,7 @@ gptscript parse [flags] --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) From 748ce21c3c13019e5953edfeea6e40347298eaeb Mon Sep 17 00:00:00 2001 From: Craig Jellick Date: Thu, 20 Jun 2024 12:56:13 -0700 Subject: [PATCH 12/22] chore: run docs validation on all prs The docs validation was restricted to prs with the docs directory was changed, but this misses the primary usecase where the cli help was updated without the corresponding docs page being updated. Signed-off-by: Craig Jellick Update docs/docs/09-faqs.md Co-authored-by: Nick Hale <4175918+njhale@users.noreply.github.com> Signed-off-by: Craig Jellick --- .github/workflows/validate-docs.yaml | 6 +----- docs/docs/09-faqs.md | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/validate-docs.yaml b/.github/workflows/validate-docs.yaml index 82ca4bf4..b017af94 100644 --- a/.github/workflows/validate-docs.yaml +++ b/.github/workflows/validate-docs.yaml @@ -1,13 +1,9 @@ name: Validate docs build on: push: - paths: - - docs/** branches: - main pull_request: - paths: - - docs/** branches: - main @@ -23,4 +19,4 @@ jobs: cache: false go-version: "1.22" - run: make init-docs - - run: make validate-docs \ No newline at end of file + - run: make validate-docs diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index c26418e9..8e3f91d8 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -1,6 +1,7 @@ # FAQs ### I don't have Homebrew, how can I install GPTScript? + On MacOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` On all supported systems, you download and install the archive for your platform and architecture from the [releases page](https://github.com/gptscript-ai/gptscript/releases). From 7f17683135acf5f38e9bbddec5e969aac23b18aa Mon Sep 17 00:00:00 2001 From: Craig Jellick Date: Thu, 20 Jun 2024 15:25:10 -0700 Subject: [PATCH 13/22] docs: add FAQ for workspace Signed-off-by: Craig Jellick --- docs/docs/09-faqs.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index 8e3f91d8..b849e1d0 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -29,4 +29,27 @@ When the cache is disabled, GPTScript will check that it has the latest version With regards to LLM responses, when the cache is enabled GPTScript will cache the LLM’s response to a chat completion request. Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. -It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. This means that every message between user and LLM affects the cache lookup. So, when using GPTScript in chat mode, it is very unlikely you’ll receive a cached LLM response. Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. \ No newline at end of file +It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. This means that every message between user and LLM affects the cache lookup. So, when using GPTScript in chat mode, it is very unlikely you’ll receive a cached LLM response. Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. + +### I see there's a --workspace flag. How do I make use of that? + +Every invocation of GPTScript has a workspace directory available to it. By default, this directory is a one-off temp directory, but you can override this and explicitly set a workspace using the `--workspace` flag, like so: +``` +gptscript --workspace . my-script.gpt +``` +In the above example, the user’s current directory (denoted by `.`) will be set as the workspace. Both absolute and relative paths are supported. + +Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. + +:::info +GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. The tools must decide to make use of the workspace environment variable. +::: + +To make prompt-based tools workspace aware, you can add our workspace context, like so: +``` +Context: github.com/gptscript-ai/context/workspace +``` +This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. This will not, however, have any impact on code-based tools (ie python, bash, or go tools). Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. + +This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. This is because if a tool intends to interact with the workspace, it minimally needs these tools. + From 7f5bfb6adeebfb5fcd572a8b89acf9594091d668 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 20 Jun 2024 13:49:21 -0700 Subject: [PATCH 14/22] feat: add input filters --- pkg/engine/cmd.go | 3 +- pkg/engine/engine.go | 3 +- pkg/openai/client.go | 9 +- pkg/parser/parser.go | 11 ++- pkg/parser/parser_test.go | 24 +++++ pkg/runner/input.go | 27 ++++++ pkg/runner/runner.go | 34 +++++--- pkg/tests/runner_test.go | 25 ++++++ .../testdata/TestInput/call1-resp.golden | 9 ++ pkg/tests/testdata/TestInput/call1.golden | 24 +++++ .../testdata/TestInput/call2-resp.golden | 9 ++ pkg/tests/testdata/TestInput/call2.golden | 42 +++++++++ pkg/tests/testdata/TestInput/step1.golden | 47 ++++++++++ pkg/tests/testdata/TestInput/step2.golden | 65 ++++++++++++++ pkg/tests/testdata/TestInput/test.gpt | 23 +++++ pkg/types/set.go | 2 +- pkg/types/tool.go | 87 ++++++++++++++----- pkg/types/tool_test.go | 46 +++++----- 18 files changed, 428 insertions(+), 62 deletions(-) create mode 100644 pkg/runner/input.go create mode 100644 pkg/tests/testdata/TestInput/call1-resp.golden create mode 100644 pkg/tests/testdata/TestInput/call1.golden create mode 100644 pkg/tests/testdata/TestInput/call2-resp.golden create mode 100644 pkg/tests/testdata/TestInput/call2.golden create mode 100644 pkg/tests/testdata/TestInput/step1.golden create mode 100644 pkg/tests/testdata/TestInput/step2.golden create mode 100644 pkg/tests/testdata/TestInput/test.gpt diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 3707205e..e0ea5e3a 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -185,6 +185,8 @@ func appendInputAsEnv(env []string, input string) []string { dec := json.NewDecoder(bytes.NewReader([]byte(input))) dec.UseNumber() + env = appendEnv(env, "GPTSCRIPT_INPUT", input) + if err := json.Unmarshal([]byte(input), &data); err != nil { // ignore invalid JSON return env @@ -206,7 +208,6 @@ func appendInputAsEnv(env []string, input string) []string { } } - env = appendEnv(env, "GPTSCRIPT_INPUT", input) return env } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 2dd17b1f..9dd8c741 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -98,6 +98,7 @@ const ( ProviderToolCategory ToolCategory = "provider" CredentialToolCategory ToolCategory = "credential" ContextToolCategory ToolCategory = "context" + InputToolCategory ToolCategory = "input" NoCategory ToolCategory = "" ) @@ -180,7 +181,7 @@ func NewContext(ctx context.Context, prg *types.Program, input string) Context { return callCtx } -func (c *Context) SubCall(ctx context.Context, input, toolID, callID string, toolCategory ToolCategory) (Context, error) { +func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID string, toolCategory ToolCategory) (Context, error) { tool, ok := c.Program.ToolSet[toolID] if !ok { return Context{}, fmt.Errorf("failed to file tool for id [%s]", toolID) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 8ed4014b..42ff83bc 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -2,7 +2,6 @@ package openai import ( "context" - "fmt" "io" "log/slog" "os" @@ -16,6 +15,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/counter" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/prompt" "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" @@ -29,6 +29,7 @@ const ( var ( key = os.Getenv("OPENAI_API_KEY") url = os.Getenv("OPENAI_BASE_URL") + log = mvl.Package() ) type InvalidAuthError struct{} @@ -305,7 +306,11 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques } if len(msgs) == 0 { - return nil, fmt.Errorf("invalid request, no messages to send to LLM") + log.Errorf("invalid request, no messages to send to LLM") + return &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: types.Text(""), + }, nil } request := openai.ChatCompletionRequest{ diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index cf2f2cf9..e72dc32f 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -105,6 +105,10 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { tool.Parameters.Export = append(tool.Parameters.Export, csv(value)...) case "tool", "tools": tool.Parameters.Tools = append(tool.Parameters.Tools, csv(value)...) + case "inputfilter", "inputfilters": + tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(value)...) + case "shareinputfilter", "shareinputfilters": + tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(value)...) case "agent", "agents": tool.Parameters.Agents = append(tool.Parameters.Agents, csv(value)...) case "globaltool", "globaltools": @@ -183,10 +187,13 @@ type context struct { func (c *context) finish(tools *[]Node) { c.tool.Instructions = strings.TrimSpace(strings.Join(c.instructions, "")) - if c.tool.Instructions != "" || c.tool.Parameters.Name != "" || - len(c.tool.Export) > 0 || len(c.tool.Tools) > 0 || + if c.tool.Instructions != "" || + c.tool.Parameters.Name != "" || + len(c.tool.Export) > 0 || + len(c.tool.Tools) > 0 || c.tool.GlobalModelName != "" || len(c.tool.GlobalTools) > 0 || + len(c.tool.ExportInputFilters) > 0 || c.tool.Chat { *tools = append(*tools, Node{ ToolNode: &ToolNode{ diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index 9cdb593e..fb5a3ab7 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -191,3 +191,27 @@ name: bad }, }}).Equal(t, out) } + +func TestParseInput(t *testing.T) { + input := ` +input filters: input +share input filters: shared +` + out, err := Parse(strings.NewReader(input)) + require.NoError(t, err) + autogold.Expect(Document{Nodes: []Node{ + {ToolNode: &ToolNode{ + Tool: types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + InputFilters: []string{ + "input", + }, + ExportInputFilters: []string{"shared"}, + }, + }, + Source: types.ToolSource{LineNo: 1}, + }, + }}, + }}).Equal(t, out) +} diff --git a/pkg/runner/input.go b/pkg/runner/input.go new file mode 100644 index 00000000..0d8cb7f0 --- /dev/null +++ b/pkg/runner/input.go @@ -0,0 +1,27 @@ +package runner + +import ( + "fmt" + + "github.com/gptscript-ai/gptscript/pkg/engine" +) + +func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []string, input string) (string, error) { + inputToolRefs, err := callCtx.Tool.GetInputFilterTools(*callCtx.Program) + if err != nil { + return "", err + } + + for _, inputToolRef := range inputToolRefs { + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, input, "", engine.InputToolCategory) + if err != nil { + return "", err + } + if res.Result == nil { + return "", fmt.Errorf("invalid state: input tool [%s] can not result in a chat continuation", inputToolRef.Reference) + } + input = *res.Result + } + + return input, nil +} diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index fbb4622c..ddb4b102 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -408,6 +408,11 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en Content: input, }) + input, err := r.handleInput(callCtx, monitor, env, input) + if err != nil { + return nil, err + } + if len(callCtx.Tool.Credentials) > 0 { var err error env, err = r.handleCredentials(callCtx, monitor, env) @@ -417,7 +422,6 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en } var ( - err error newState *State ) callCtx.InputContext, newState, err = r.getContext(callCtx, state, monitor, env, input) @@ -446,7 +450,10 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en } if !authResp.Accept { - msg := fmt.Sprintf("[AUTHORIZATION ERROR]: %s", authResp.Message) + msg := authResp.Message + if msg == "" { + msg = "Tool call request has been denied" + } return &State{ Continuation: &engine.Return{ Result: &msg, @@ -631,8 +638,12 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s } if state.ResumeInput != nil { + input, err := r.handleInput(callCtx, monitor, env, *state.ResumeInput) + if err != nil { + return state, err + } engineResults = append(engineResults, engine.CallResult{ - User: *state.ResumeInput, + User: input, }) } @@ -689,16 +700,22 @@ func streamProgress(callCtx *engine.Context, monitor Monitor) (chan<- types.Comp } func (r *Runner) subCall(ctx context.Context, parentContext engine.Context, monitor Monitor, env []string, toolID, input, callID string, toolCategory engine.ToolCategory) (*State, error) { - callCtx, err := parentContext.SubCall(ctx, input, toolID, callID, toolCategory) + callCtx, err := parentContext.SubCallContext(ctx, input, toolID, callID, toolCategory) if err != nil { return nil, err } + if toolCategory == engine.ContextToolCategory && callCtx.Tool.IsNoop() { + return &State{ + Result: new(string), + }, nil + } + return r.call(callCtx, monitor, env, input) } func (r *Runner) subCallResume(ctx context.Context, parentContext engine.Context, monitor Monitor, env []string, toolID, callID string, state *State, toolCategory engine.ToolCategory) (*State, error) { - callCtx, err := parentContext.SubCall(ctx, "", toolID, callID, toolCategory) + callCtx, err := parentContext.SubCallContext(ctx, "", toolID, callID, toolCategory) if err != nil { return nil, err } @@ -882,12 +899,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env input = string(inputBytes) } - subCtx, err := callCtx.SubCall(callCtx.Ctx, input, credToolRefs[0].ToolID, "", engine.CredentialToolCategory) // leaving callID as "" will cause it to be set by the engine - if err != nil { - return nil, fmt.Errorf("failed to create subcall context for tool %s: %w", credToolName, err) - } - - res, err := r.call(subCtx, monitor, env, input) + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, credToolRefs[0].ToolID, input, "", engine.CredentialToolCategory) if err != nil { return nil, fmt.Errorf("failed to run credential tool %s: %w", credToolName, err) } diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index fb698a0e..ffb30d8f 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -822,3 +822,28 @@ func TestAgents(t *testing.T) { autogold.Expect("TEST RESULT CALL: 4").Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) } + +func TestInput(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + + prg, err := r.Load("") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "You're stupid") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 1").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "You're ugly") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) +} diff --git a/pkg/tests/testdata/TestInput/call1-resp.golden b/pkg/tests/testdata/TestInput/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestInput/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestInput/call1.golden b/pkg/tests/testdata/TestInput/call1.golden new file mode 100644 index 00000000..8f9b629c --- /dev/null +++ b/pkg/tests/testdata/TestInput/call1.golden @@ -0,0 +1,24 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestInput/call2-resp.golden b/pkg/tests/testdata/TestInput/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestInput/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestInput/call2.golden b/pkg/tests/testdata/TestInput/call2.golden new file mode 100644 index 00000000..8da96da8 --- /dev/null +++ b/pkg/tests/testdata/TestInput/call2.golden @@ -0,0 +1,42 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're ugly!\n ha ha ha\n" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestInput/step1.golden b/pkg/tests/testdata/TestInput/step1.golden new file mode 100644 index 00000000..a617d41a --- /dev/null +++ b/pkg/tests/testdata/TestInput/step1.golden @@ -0,0 +1,47 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "testdata/TestInput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "No, You're stupid!\n ha ha ha\n", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ] + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "testdata/TestInput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestInput/step2.golden b/pkg/tests/testdata/TestInput/step2.golden new file mode 100644 index 00000000..e085edb2 --- /dev/null +++ b/pkg/tests/testdata/TestInput/step2.golden @@ -0,0 +1,65 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "testdata/TestInput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "No, You're stupid!\n ha ha ha\n", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're ugly!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ] + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "testdata/TestInput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestInput/test.gpt b/pkg/tests/testdata/TestInput/test.gpt new file mode 100644 index 00000000..bcb85a43 --- /dev/null +++ b/pkg/tests/testdata/TestInput/test.gpt @@ -0,0 +1,23 @@ +input filter: taunt +context: exporter +chat: true + +Tool body + +--- +name: taunt +args: foo: this is useless +#!/bin/bash + +echo "No, ${GPTSCRIPT_INPUT}!" + +--- +name: exporter +share input filters: taunt2 + +--- +name: taunt2 +args: foo: this is useless + +#!/bin/bash +echo "${GPTSCRIPT_INPUT} ha ha ha" \ No newline at end of file diff --git a/pkg/types/set.go b/pkg/types/set.go index 9ff6b22e..230e112b 100644 --- a/pkg/types/set.go +++ b/pkg/types/set.go @@ -29,7 +29,7 @@ func (t *toolRefSet) HasTool(toolID string) bool { } func (t *toolRefSet) AddAll(values []ToolReference, err error) { - if t.err != nil { + if err != nil { t.err = err } for _, v := range values { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 9468a04a..cc84264d 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -120,26 +120,28 @@ func (p Program) SetBlocking() Program { type BuiltinFunc func(ctx context.Context, env []string, input string, progress chan<- string) (string, error) type Parameters struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - MaxTokens int `json:"maxTokens,omitempty"` - ModelName string `json:"modelName,omitempty"` - ModelProvider bool `json:"modelProvider,omitempty"` - JSONResponse bool `json:"jsonResponse,omitempty"` - Chat bool `json:"chat,omitempty"` - Temperature *float32 `json:"temperature,omitempty"` - Cache *bool `json:"cache,omitempty"` - InternalPrompt *bool `json:"internalPrompt"` - Arguments *openapi3.Schema `json:"arguments,omitempty"` - Tools []string `json:"tools,omitempty"` - GlobalTools []string `json:"globalTools,omitempty"` - GlobalModelName string `json:"globalModelName,omitempty"` - Context []string `json:"context,omitempty"` - ExportContext []string `json:"exportContext,omitempty"` - Export []string `json:"export,omitempty"` - Agents []string `json:"agents,omitempty"` - Credentials []string `json:"credentials,omitempty"` - Blocking bool `json:"-"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + MaxTokens int `json:"maxTokens,omitempty"` + ModelName string `json:"modelName,omitempty"` + ModelProvider bool `json:"modelProvider,omitempty"` + JSONResponse bool `json:"jsonResponse,omitempty"` + Chat bool `json:"chat,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + Cache *bool `json:"cache,omitempty"` + InternalPrompt *bool `json:"internalPrompt"` + Arguments *openapi3.Schema `json:"arguments,omitempty"` + Tools []string `json:"tools,omitempty"` + GlobalTools []string `json:"globalTools,omitempty"` + GlobalModelName string `json:"globalModelName,omitempty"` + Context []string `json:"context,omitempty"` + ExportContext []string `json:"exportContext,omitempty"` + Export []string `json:"export,omitempty"` + Agents []string `json:"agents,omitempty"` + Credentials []string `json:"credentials,omitempty"` + InputFilters []string `json:"inputFilters,omitempty"` + ExportInputFilters []string `json:"exportInputFilters,omitempty"` + Blocking bool `json:"-"` } func (p Parameters) ToolRefNames() []string { @@ -149,7 +151,9 @@ func (p Parameters) ToolRefNames() []string { p.Export, p.ExportContext, p.Context, - p.Credentials) + p.Credentials, + p.InputFilters, + p.ExportInputFilters) } type ToolDef struct { @@ -379,11 +383,17 @@ func (t ToolDef) String() string { if len(t.Parameters.Export) != 0 { _, _ = fmt.Fprintf(buf, "Share Tools: %s\n", strings.Join(t.Parameters.Export, ", ")) } + if len(t.Parameters.Context) != 0 { + _, _ = fmt.Fprintf(buf, "Context: %s\n", strings.Join(t.Parameters.Context, ", ")) + } if len(t.Parameters.ExportContext) != 0 { _, _ = fmt.Fprintf(buf, "Share Context: %s\n", strings.Join(t.Parameters.ExportContext, ", ")) } - if len(t.Parameters.Context) != 0 { - _, _ = fmt.Fprintf(buf, "Context: %s\n", strings.Join(t.Parameters.Context, ", ")) + if len(t.Parameters.InputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Input Filters: %s\n", strings.Join(t.Parameters.InputFilters, ", ")) + } + if len(t.Parameters.ExportInputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Share Input Filters: %s\n", strings.Join(t.Parameters.ExportInputFilters, ", ")) } if t.Parameters.MaxTokens != 0 { _, _ = fmt.Fprintf(buf, "Max Tokens: %d\n", t.Parameters.MaxTokens) @@ -469,6 +479,8 @@ func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { return result.List() } +// GetContextTools returns all tools that are in the context of the tool including all the +// contexts that are exported by the context tools. This will recurse all exports. func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} @@ -485,6 +497,31 @@ func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { return result.List() } +func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { + result := &toolRefSet{} + + inputFilterRefs, err := t.GetToolRefsFromNames(t.InputFilters) + if err != nil { + return nil, err + } + + for _, inputFilterRef := range inputFilterRefs { + result.Add(inputFilterRef) + } + + contextRefs, err := t.GetContextTools(program) + if err != nil { + return nil, err + } + + for _, contextRef := range contextRefs { + contextTool := program.ToolSet[contextRef.ToolID] + result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportInputFilters)) + } + + return result.List() +} + func (t Tool) GetAgentGroup(agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { newAgentGroup := toolRefSet{} if err := t.addAgents(&newAgentGroup); err != nil { @@ -659,6 +696,10 @@ func (t Tool) GetInterpreter() string { return fields[0] } +func (t Tool) IsNoop() bool { + return t.Instructions == "" +} + func (t Tool) IsCommand() bool { return strings.HasPrefix(t.Instructions, CommandPrefix) } diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index f1edc340..6e3d98d3 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -9,26 +9,28 @@ import ( func TestToolDef_String(t *testing.T) { tool := ToolDef{ Parameters: Parameters{ - Name: "Tool Sample", - Description: "This is a sample tool", - MaxTokens: 1024, - ModelName: "ModelSample", - ModelProvider: true, - JSONResponse: true, - Chat: true, - Temperature: float32Ptr(0.8), - Cache: boolPtr(true), - InternalPrompt: boolPtr(true), - Arguments: ObjectSchema("arg1", "desc1", "arg2", "desc2"), - Tools: []string{"Tool1", "Tool2"}, - GlobalTools: []string{"GlobalTool1", "GlobalTool2"}, - GlobalModelName: "GlobalModelSample", - Context: []string{"Context1", "Context2"}, - ExportContext: []string{"ExportContext1", "ExportContext2"}, - Export: []string{"Export1", "Export2"}, - Agents: []string{"Agent1", "Agent2"}, - Credentials: []string{"Credential1", "Credential2"}, - Blocking: true, + Name: "Tool Sample", + Description: "This is a sample tool", + MaxTokens: 1024, + ModelName: "ModelSample", + ModelProvider: true, + JSONResponse: true, + Chat: true, + Temperature: float32Ptr(0.8), + Cache: boolPtr(true), + InternalPrompt: boolPtr(true), + Arguments: ObjectSchema("arg1", "desc1", "arg2", "desc2"), + Tools: []string{"Tool1", "Tool2"}, + GlobalTools: []string{"GlobalTool1", "GlobalTool2"}, + GlobalModelName: "GlobalModelSample", + Context: []string{"Context1", "Context2"}, + ExportContext: []string{"ExportContext1", "ExportContext2"}, + Export: []string{"Export1", "Export2"}, + Agents: []string{"Agent1", "Agent2"}, + Credentials: []string{"Credential1", "Credential2"}, + Blocking: true, + InputFilters: []string{"Filter1", "Filter2"}, + ExportInputFilters: []string{"SharedFilter1", "SharedFilter2"}, }, Instructions: "This is a sample instruction", } @@ -40,8 +42,10 @@ Description: This is a sample tool Agents: Agent1, Agent2 Tools: Tool1, Tool2 Share Tools: Export1, Export2 -Share Context: ExportContext1, ExportContext2 Context: Context1, Context2 +Share Context: ExportContext1, ExportContext2 +Input Filters: Filter1, Filter2 +Share Input Filters: SharedFilter1, SharedFilter2 Max Tokens: 1024 Model: ModelSample Model Provider: true From 19bd1766615137bdc332e9ffd5e232532f2a8134 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 20 Jun 2024 16:47:25 -0700 Subject: [PATCH 15/22] feat: add sys.context to introspect your current tools and agents --- pkg/builtin/builtin.go | 37 ++++++++--- pkg/embedded/embed.go | 4 +- pkg/engine/engine.go | 13 +++- pkg/runner/runner.go | 6 +- pkg/tests/runner_test.go | 31 +++++++++ .../testdata/TestSysContext/call1-resp.golden | 9 +++ .../testdata/TestSysContext/call1.golden | 41 ++++++++++++ .../testdata/TestSysContext/call1.golden.bkp | 41 ++++++++++++ .../testdata/TestSysContext/context.json | 1 + pkg/tests/testdata/TestSysContext/file.gpt | 3 + .../testdata/TestSysContext/step1.golden | 64 +++++++++++++++++++ pkg/tests/testdata/TestSysContext/test.go | 22 +++++++ pkg/tests/testdata/TestSysContext/test.gpt | 14 ++++ pkg/types/tool.go | 44 +++++++++---- pkg/types/toolstring.go | 2 +- 15 files changed, 301 insertions(+), 31 deletions(-) create mode 100644 pkg/tests/testdata/TestSysContext/call1-resp.golden create mode 100644 pkg/tests/testdata/TestSysContext/call1.golden create mode 100644 pkg/tests/testdata/TestSysContext/call1.golden.bkp create mode 100644 pkg/tests/testdata/TestSysContext/context.json create mode 100644 pkg/tests/testdata/TestSysContext/file.gpt create mode 100644 pkg/tests/testdata/TestSysContext/step1.golden create mode 100644 pkg/tests/testdata/TestSysContext/test.go create mode 100644 pkg/tests/testdata/TestSysContext/test.gpt diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 291384c3..3e05f05c 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -32,6 +32,7 @@ var SafeTools = map[string]struct{}{ "sys.echo": {}, "sys.prompt": {}, "sys.time.now": {}, + "sys.context": {}, } var tools = map[string]types.Tool{ @@ -228,16 +229,15 @@ var tools = map[string]types.Tool{ BuiltinFunc: SysChatHistory, }, }, -} - -func SysProgram() *types.Program { - result := &types.Program{ - ToolSet: types.ToolSet{}, - } - for _, tool := range ListTools() { - result.ToolSet[tool.ID] = tool - } - return result + "sys.context": { + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Description: "Retrieves the current internal GPTScript tool call context information", + Arguments: types.ObjectSchema(), + }, + BuiltinFunc: SysContext, + }, + }, } func ListTools() (result []types.Tool) { @@ -626,6 +626,23 @@ func invalidArgument(input string, err error) string { return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) } +func SysContext(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { + engineContext, _ := engine.FromContext(ctx) + + callContext := *engineContext.GetCallContext() + callContext.ID = "" + callContext.ParentID = "" + data, err := json.Marshal(map[string]any{ + "program": engineContext.Program, + "call": callContext, + }) + if err != nil { + return invalidArgument("", err), nil + } + + return string(data), nil +} + func SysChatHistory(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { engineContext, _ := engine.FromContext(ctx) diff --git a/pkg/embedded/embed.go b/pkg/embedded/embed.go index de12bec3..7ea7edb9 100644 --- a/pkg/embedded/embed.go +++ b/pkg/embedded/embed.go @@ -3,7 +3,6 @@ package embedded import ( "io/fs" "os" - "strings" "github.com/gptscript-ai/gptscript/internal" "github.com/gptscript-ai/gptscript/pkg/cli" @@ -22,10 +21,11 @@ func Run(opts ...Options) bool { } system.SetBinToSelf() - if len(os.Args) > 1 && strings.HasPrefix(os.Args[1], "sys.") { + if os.Getenv("GPTSCRIPT_EMBEDDED") == "true" { cli.Main() return true } + _ = os.Setenv("GPTSCRIPT_EMBEDDED", "true") return false } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 9dd8c741..a76a3556 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -165,7 +165,7 @@ func WithToolCategory(ctx context.Context, toolCategory ToolCategory) context.Co return context.WithValue(ctx, toolCategoryKey{}, toolCategory) } -func NewContext(ctx context.Context, prg *types.Program, input string) Context { +func NewContext(ctx context.Context, prg *types.Program, input string) (Context, error) { category, _ := ctx.Value(toolCategoryKey{}).(ToolCategory) callCtx := Context{ @@ -178,7 +178,14 @@ func NewContext(ctx context.Context, prg *types.Program, input string) Context { Program: prg, Input: input, } - return callCtx + + agentGroup, err := callCtx.Tool.GetAgents(*prg) + if err != nil { + return callCtx, err + } + + callCtx.AgentGroup = agentGroup + return callCtx, nil } func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID string, toolCategory ToolCategory) (Context, error) { @@ -191,7 +198,7 @@ func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID stri callID = counter.Next() } - agentGroup, err := c.Tool.GetAgentGroup(c.AgentGroup, toolID) + agentGroup, err := c.Tool.GetNextAgentGroup(*c.Program, c.AgentGroup, toolID) if err != nil { return Context{}, err } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index ddb4b102..7df697ff 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -165,7 +165,11 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra monitor.Stop(resp.Content, err) }() - callCtx := engine.NewContext(ctx, &prg, input) + callCtx, err := engine.NewContext(ctx, &prg, input) + if err != nil { + return resp, err + } + if state == nil || state.StartContinuation { if state != nil { state = state.WithResumeInput(&input) diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index ffb30d8f..1421e849 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -7,6 +7,7 @@ import ( "runtime" "testing" + "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/tests/tester" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" @@ -847,3 +848,33 @@ func TestInput(t *testing.T) { autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) } + +func TestSysContext(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + + prg, err := r.Load("") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 1").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + data, err := os.ReadFile("testdata/TestSysContext/context.json") + require.NoError(t, err) + + context := struct { + Call engine.CallContext `json:"call"` + }{} + err = json.Unmarshal(data, &context) + require.NoError(t, err) + + require.Len(t, context.Call.AgentGroup, 1) + assert.Equal(t, context.Call.AgentGroup[0].Named, "iAmSuperman") +} diff --git a/pkg/tests/testdata/TestSysContext/call1-resp.golden b/pkg/tests/testdata/TestSysContext/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestSysContext/call1.golden b/pkg/tests/testdata/TestSysContext/call1.golden new file mode 100644 index 00000000..c315d381 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/call1.golden @@ -0,0 +1,41 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestSysContext/file.gpt:I am Superman Agent", + "name": "iAmSuperman", + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestSysContext/call1.golden.bkp b/pkg/tests/testdata/TestSysContext/call1.golden.bkp new file mode 100644 index 00000000..7bfb5403 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/call1.golden.bkp @@ -0,0 +1,41 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestSysContext/test.gpt:foo", + "name": "foo", + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\",\"parentID\":\"1718924874\",\"displayText\":\"Running sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"foo\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}],\"foo\":[{\"reference\":\"foo\",\"toolID\":\"testdata/TestSysContext/test.gpt:foo\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\",\"foo\":\"testdata/TestSysContext/test.gpt:foo\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\",\"foo\":\"testdata/TestSysContext/test.gpt:foo\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":13},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:foo\":{\"name\":\"foo\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm an agent\",\"id\":\"testdata/TestSysContext/test.gpt:foo\",\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\",\"foo\":\"testdata/TestSysContext/test.gpt:foo\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestSysContext/context.json b/pkg/tests/testdata/TestSysContext/context.json new file mode 100644 index 00000000..c5608ec4 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/context.json @@ -0,0 +1 @@ +{"call":{"id":"","tool":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"agentGroup":[{"named":"iAmSuperman","reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"inputContext":null,"toolCategory":"context","toolName":"sys.context"},"program":{"name":"testdata/TestSysContext/test.gpt","entryToolId":"testdata/TestSysContext/test.gpt:","toolSet":{"sys.context":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"testdata/TestSysContext/file.gpt:I am Superman Agent":{"name":"I am Superman Agent","modelName":"gpt-4o","internalPrompt":null,"instructions":"I'm super","id":"testdata/TestSysContext/file.gpt:I am Superman Agent","localTools":{"i am superman agent":"testdata/TestSysContext/file.gpt:I am Superman Agent"},"source":{"location":"testdata/TestSysContext/file.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:":{"modelName":"gpt-4o","chat":true,"internalPrompt":null,"context":["agents"],"agents":["./file.gpt"],"instructions":"Tool body","id":"testdata/TestSysContext/test.gpt:","toolMapping":{"./file.gpt":[{"reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"agents":[{"reference":"agents","toolID":"testdata/TestSysContext/test.gpt:agents"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:agents":{"name":"agents","modelName":"gpt-4o","internalPrompt":null,"context":["sys.context"],"instructions":"#!/bin/bash\n\necho \"${GPTSCRIPT_CONTEXT}\"\necho \"${GPTSCRIPT_CONTEXT}\" \u003e ${GPTSCRIPT_TOOL_DIR}/context.json","id":"testdata/TestSysContext/test.gpt:agents","toolMapping":{"sys.context":[{"reference":"sys.context","toolID":"sys.context"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":8},"workingDir":"testdata/TestSysContext"}}}} diff --git a/pkg/tests/testdata/TestSysContext/file.gpt b/pkg/tests/testdata/TestSysContext/file.gpt new file mode 100644 index 00000000..75f63087 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/file.gpt @@ -0,0 +1,3 @@ +name: I am Superman Agent + +I'm super \ No newline at end of file diff --git a/pkg/tests/testdata/TestSysContext/step1.golden b/pkg/tests/testdata/TestSysContext/step1.golden new file mode 100644 index 00000000..26b75508 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/step1.golden @@ -0,0 +1,64 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "testdata/TestSysContext/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestSysContext/file.gpt:I am Superman Agent", + "name": "iAmSuperman", + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ] + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "testdata/TestSysContext/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestSysContext/test.go b/pkg/tests/testdata/TestSysContext/test.go new file mode 100644 index 00000000..462ff284 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/test.go @@ -0,0 +1,22 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/gptscript-ai/gptscript/pkg/engine" +) + +func main() { + data := struct { + Call engine.CallContext `json:"call,omitempty"` + }{} + if err := json.Unmarshal([]byte(os.Getenv("GPTSCRIPT_CONTEXT")), &data); err != nil { + panic(err) + } + + for _, agent := range data.Call.AgentGroup { + fmt.Println(agent.Reference, agent.ToolID) + } +} diff --git a/pkg/tests/testdata/TestSysContext/test.gpt b/pkg/tests/testdata/TestSysContext/test.gpt new file mode 100644 index 00000000..63e93417 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/test.gpt @@ -0,0 +1,14 @@ +context: agents +agents: ./file.gpt +chat: true + +Tool body + +--- +name: agents +context: sys.context + +#!/bin/bash + +echo "${GPTSCRIPT_CONTEXT}" +echo "${GPTSCRIPT_CONTEXT}" > ${GPTSCRIPT_TOOL_DIR}/context.json \ No newline at end of file diff --git a/pkg/types/tool.go b/pkg/types/tool.go index cc84264d..dd89c471 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -335,6 +335,30 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str return originalName, alias, args, nil } +func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { + toolRefs, err := t.GetToolRefsFromNames(t.Agents) + if err != nil { + return nil, err + } + + // Agent Tool refs must be named + for i, toolRef := range toolRefs { + if toolRef.Named != "" { + continue + } + tool := prg.ToolSet[toolRef.ToolID] + name := tool.Name + if name == "" { + name = toolRef.Reference + } + normed := ToolNormalizer(name) + normed = strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant") + toolRefs[i].Named = normed + } + + return toolRefs, nil +} + func (t Tool) GetToolRefsFromNames(names []string) (result []ToolReference, _ error) { for _, toolName := range names { toolRefs, ok := t.ToolMapping[toolName] @@ -522,9 +546,9 @@ func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { return result.List() } -func (t Tool) GetAgentGroup(agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { +func (t Tool) GetNextAgentGroup(prg Program, agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { newAgentGroup := toolRefSet{} - if err := t.addAgents(&newAgentGroup); err != nil { + if err := t.addAgents(prg, &newAgentGroup); err != nil { return nil, err } @@ -533,15 +557,7 @@ func (t Tool) GetAgentGroup(agentGroup []ToolReference, toolID string) (result [ return newAgentGroup.List() } - existingAgentGroup := toolRefSet{} - existingAgentGroup.AddAll(agentGroup, nil) - - if existingAgentGroup.HasTool(toolID) { - return existingAgentGroup.List() - } - - // No group - return nil, nil + return agentGroup, nil } func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (result []CompletionTool, err error) { @@ -552,8 +568,8 @@ func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (resu return toolRefsToCompletionTools(refs, prg), nil } -func (t Tool) addAgents(result *toolRefSet) error { - subToolRefs, err := t.GetToolRefsFromNames(t.Parameters.Agents) +func (t Tool) addAgents(prg Program, result *toolRefSet) error { + subToolRefs, err := t.GetAgents(prg) if err != nil { return err } @@ -617,7 +633,7 @@ func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([] return nil, err } - if err := t.addAgents(&result); err != nil { + if err := t.addAgents(prg, &result); err != nil { return nil, err } diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 3bd6fd57..ede3401e 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -74,7 +74,7 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { return fmt.Sprintf("Removing `%s`", args["location"]), nil case "sys.write": return fmt.Sprintf("Writing `%s`", args["filename"]), nil - case "sys.stat", "sys.getenv", "sys.abort", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": + case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": return "", nil default: return "", fmt.Errorf("unknown tool for display string: %s", id) From a92428e57fc87c4e2a1a9d0c191125e4018c0770 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Fri, 21 Jun 2024 10:34:18 -0400 Subject: [PATCH 16/22] fix: auto run smoke tests for members The default GitHub workflow token isn't privileged enough to check org membership for PR authors. This prevents smoke tests for running automatically for org members. To fix this, switch to a PAT with the correct privileges for this check. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index 5f0aee22..c213dca1 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -37,7 +37,7 @@ jobs: AUTHOR="${{ github.event.pull_request.user.login }}" # Check for org membership - MEMBERSHIP_RESPONSE_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + MEMBERSHIP_RESPONSE_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token ${{ secrets.SMOKE_GH_TOKEN }}" \ "https://api.github.com/orgs/$ORG/members/$AUTHOR") if [ "$MEMBERSHIP_RESPONSE_CODE" -eq 204 ]; then From ef449d2940ced168d9269eceafd358d3f09ba17c Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Fri, 21 Jun 2024 10:44:28 -0400 Subject: [PATCH 17/22] chore: de-stutter smoke job names Rename smoke test jobs to remove stutter; e.g. `smoke/smoke-` -> `smoke/`. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index c213dca1..abea2159 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -59,7 +59,7 @@ jobs: echo "run_smoke_tests=false" >> $GITHUB_OUTPUT - smoke-gpt-4o-2024-05-13: + gpt-4o-2024-05-13: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 @@ -88,7 +88,7 @@ jobs: export PATH="$(pwd)/bin:${PATH}" make smoke - smoke-gpt-4-turbo-2024-04-09: + gpt-4-turbo-2024-04-09: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 @@ -117,7 +117,7 @@ jobs: export PATH="$(pwd)/bin:${PATH}" make smoke - smoke-claude-3-opus-20240229: + claude-3-opus-20240229: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 @@ -147,7 +147,7 @@ jobs: export PATH="$(pwd)/bin:${PATH}" make smoke - smoke-mistral-large-2402: + mistral-large-2402: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 From a87330a36cba5d678a0ad632fce3f3449a3b3321 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 20 Jun 2024 23:01:40 -0700 Subject: [PATCH 18/22] feat: output filters --- pkg/engine/engine.go | 1 + pkg/parser/parser.go | 5 ++ pkg/parser/parser_test.go | 24 ++++++ pkg/runner/input.go | 9 ++- pkg/runner/output.go | 72 +++++++++++++++++ pkg/runner/runner.go | 6 +- pkg/tests/runner_test.go | 46 +++++++++++ pkg/tests/testdata/TestInput/test.gpt | 6 +- .../testdata/TestOutput/call1-resp.golden | 9 +++ pkg/tests/testdata/TestOutput/call1.golden | 24 ++++++ .../testdata/TestOutput/call2-resp.golden | 9 +++ pkg/tests/testdata/TestOutput/call2.golden | 42 ++++++++++ pkg/tests/testdata/TestOutput/call3.golden | 60 ++++++++++++++ pkg/tests/testdata/TestOutput/step1.golden | 47 +++++++++++ pkg/tests/testdata/TestOutput/step2.golden | 65 +++++++++++++++ pkg/tests/testdata/TestOutput/step3.golden | 6 ++ pkg/tests/testdata/TestOutput/test.gpt | 31 +++++++ pkg/types/tool.go | 81 +++++++++++++------ pkg/types/tool_test.go | 48 ++++++----- 19 files changed, 542 insertions(+), 49 deletions(-) create mode 100644 pkg/runner/output.go create mode 100644 pkg/tests/testdata/TestOutput/call1-resp.golden create mode 100644 pkg/tests/testdata/TestOutput/call1.golden create mode 100644 pkg/tests/testdata/TestOutput/call2-resp.golden create mode 100644 pkg/tests/testdata/TestOutput/call2.golden create mode 100644 pkg/tests/testdata/TestOutput/call3.golden create mode 100644 pkg/tests/testdata/TestOutput/step1.golden create mode 100644 pkg/tests/testdata/TestOutput/step2.golden create mode 100644 pkg/tests/testdata/TestOutput/step3.golden create mode 100644 pkg/tests/testdata/TestOutput/test.gpt diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index a76a3556..bea8439a 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -99,6 +99,7 @@ const ( CredentialToolCategory ToolCategory = "credential" ContextToolCategory ToolCategory = "context" InputToolCategory ToolCategory = "input" + OutputToolCategory ToolCategory = "output" NoCategory ToolCategory = "" ) diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index e72dc32f..b998320b 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -109,6 +109,10 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(value)...) case "shareinputfilter", "shareinputfilters": tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(value)...) + case "outputfilter", "outputfilters": + tool.Parameters.OutputFilters = append(tool.Parameters.OutputFilters, csv(value)...) + case "shareoutputfilter", "shareoutputfilters": + tool.Parameters.ExportOutputFilters = append(tool.Parameters.ExportOutputFilters, csv(value)...) case "agent", "agents": tool.Parameters.Agents = append(tool.Parameters.Agents, csv(value)...) case "globaltool", "globaltools": @@ -194,6 +198,7 @@ func (c *context) finish(tools *[]Node) { c.tool.GlobalModelName != "" || len(c.tool.GlobalTools) > 0 || len(c.tool.ExportInputFilters) > 0 || + len(c.tool.ExportOutputFilters) > 0 || c.tool.Chat { *tools = append(*tools, Node{ ToolNode: &ToolNode{ diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index fb5a3ab7..9f682efa 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -215,3 +215,27 @@ share input filters: shared }}, }}).Equal(t, out) } + +func TestParseOutput(t *testing.T) { + output := ` +output filters: output +share output filters: shared +` + out, err := Parse(strings.NewReader(output)) + require.NoError(t, err) + autogold.Expect(Document{Nodes: []Node{ + {ToolNode: &ToolNode{ + Tool: types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + OutputFilters: []string{ + "output", + }, + ExportOutputFilters: []string{"shared"}, + }, + }, + Source: types.ToolSource{LineNo: 1}, + }, + }}, + }}).Equal(t, out) +} diff --git a/pkg/runner/input.go b/pkg/runner/input.go index 0d8cb7f0..7d77330e 100644 --- a/pkg/runner/input.go +++ b/pkg/runner/input.go @@ -1,6 +1,7 @@ package runner import ( + "encoding/json" "fmt" "github.com/gptscript-ai/gptscript/pkg/engine" @@ -13,7 +14,13 @@ func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []stri } for _, inputToolRef := range inputToolRefs { - res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, input, "", engine.InputToolCategory) + inputData, err := json.Marshal(map[string]any{ + "input": input, + }) + if err != nil { + return "", fmt.Errorf("failed to marshal input: %w", err) + } + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, string(inputData), "", engine.InputToolCategory) if err != nil { return "", err } diff --git a/pkg/runner/output.go b/pkg/runner/output.go new file mode 100644 index 00000000..858d106c --- /dev/null +++ b/pkg/runner/output.go @@ -0,0 +1,72 @@ +package runner + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/gptscript-ai/gptscript/pkg/engine" +) + +func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []string, state *State, retErr error) (*State, error) { + outputToolRefs, err := callCtx.Tool.GetOutputFilterTools(*callCtx.Program) + if err != nil { + return nil, err + } + + if len(outputToolRefs) == 0 { + return state, retErr + } + + var ( + continuation bool + chatFinish bool + output string + ) + + if errMessage := (*engine.ErrChatFinish)(nil); errors.As(retErr, &errMessage) && callCtx.Tool.Chat { + chatFinish = true + output = errMessage.Message + } else if retErr != nil { + return state, retErr + } else if state.Continuation != nil && state.Continuation.Result != nil { + continuation = true + output = *state.Continuation.Result + } else if state.Result != nil { + output = *state.Result + } else { + return state, nil + } + + for _, outputToolRef := range outputToolRefs { + inputData, err := json.Marshal(map[string]any{ + "output": output, + "chatFinish": chatFinish, + "continuation": continuation, + "chat": callCtx.Tool.Chat, + }) + if err != nil { + return nil, fmt.Errorf("marshaling input for output filter: %w", err) + } + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, outputToolRef.ToolID, string(inputData), "", engine.OutputToolCategory) + if err != nil { + return nil, err + } + if res.Result == nil { + return nil, fmt.Errorf("invalid state: output tool [%s] can not result in a chat continuation", outputToolRef.Reference) + } + output = *res.Result + } + + if chatFinish { + return state, &engine.ErrChatFinish{ + Message: output, + } + } else if continuation { + state.Continuation.Result = &output + } else { + state.Result = &output + } + + return state, nil +} diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 7df697ff..fb2cba0d 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -536,7 +536,11 @@ type Needed struct { Input string `json:"input,omitempty"` } -func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, state *State) (*State, error) { +func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, state *State) (retState *State, retErr error) { + defer func() { + retState, retErr = r.handleOutput(callCtx, monitor, env, retState, retErr) + }() + if state.StartContinuation { return nil, fmt.Errorf("invalid state, resume should not have StartContinuation set to true") } diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 1421e849..db185d75 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -849,6 +849,52 @@ func TestInput(t *testing.T) { autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) } +func TestOutput(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + r.RespondWith(tester.Result{ + Text: "Response 1", + }) + + prg, err := r.Load("") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect(`CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix +`).Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + r.RespondWith(tester.Result{ + Text: "Response 2", + }) + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 2") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect(`CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix +`).Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) + + r.RespondWith(tester.Result{ + Err: &engine.ErrChatFinish{ + Message: "Chat Done", + }, + }) + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 3") + require.NoError(t, err) + r.AssertResponded(t) + assert.True(t, resp.Done) + autogold.Expect(`CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false FINISH: true suffix +`).Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step3")) +} + func TestSysContext(t *testing.T) { if runtime.GOOS == "windows" { t.Skip() diff --git a/pkg/tests/testdata/TestInput/test.gpt b/pkg/tests/testdata/TestInput/test.gpt index bcb85a43..79522d90 100644 --- a/pkg/tests/testdata/TestInput/test.gpt +++ b/pkg/tests/testdata/TestInput/test.gpt @@ -7,9 +7,10 @@ Tool body --- name: taunt args: foo: this is useless +args: input: this is used #!/bin/bash -echo "No, ${GPTSCRIPT_INPUT}!" +echo "No, ${INPUT}!" --- name: exporter @@ -18,6 +19,7 @@ share input filters: taunt2 --- name: taunt2 args: foo: this is useless +args: input: this is used #!/bin/bash -echo "${GPTSCRIPT_INPUT} ha ha ha" \ No newline at end of file +echo "${INPUT} ha ha ha" \ No newline at end of file diff --git a/pkg/tests/testdata/TestOutput/call1-resp.golden b/pkg/tests/testdata/TestOutput/call1-resp.golden new file mode 100644 index 00000000..a6c5b94a --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestOutput/call1.golden b/pkg/tests/testdata/TestOutput/call1.golden new file mode 100644 index 00000000..9430afee --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call1.golden @@ -0,0 +1,24 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestOutput/call2-resp.golden b/pkg/tests/testdata/TestOutput/call2-resp.golden new file mode 100644 index 00000000..e5170fb8 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "Response 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestOutput/call2.golden b/pkg/tests/testdata/TestOutput/call2.golden new file mode 100644 index 00000000..32bb7039 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call2.golden @@ -0,0 +1,42 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 2" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestOutput/call3.golden b/pkg/tests/testdata/TestOutput/call3.golden new file mode 100644 index 00000000..01aed5eb --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call3.golden @@ -0,0 +1,60 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 2" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 2" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 3" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestOutput/step1.golden b/pkg/tests/testdata/TestOutput/step1.golden new file mode 100644 index 00000000..46f1b8e8 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/step1.golden @@ -0,0 +1,47 @@ +`{ + "done": false, + "content": "CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix\n", + "toolID": "testdata/TestOutput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "Input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + } + ] + } + }, + "result": "CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix\n" + }, + "continuationToolID": "testdata/TestOutput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestOutput/step2.golden b/pkg/tests/testdata/TestOutput/step2.golden new file mode 100644 index 00000000..d5fd89a0 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/step2.golden @@ -0,0 +1,65 @@ +`{ + "done": false, + "content": "CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix\n", + "toolID": "testdata/TestOutput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "Input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 2" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 2" + } + ], + "usage": {} + } + ] + } + }, + "result": "CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix\n" + }, + "continuationToolID": "testdata/TestOutput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestOutput/step3.golden b/pkg/tests/testdata/TestOutput/step3.golden new file mode 100644 index 00000000..c4e63adc --- /dev/null +++ b/pkg/tests/testdata/TestOutput/step3.golden @@ -0,0 +1,6 @@ +`{ + "done": true, + "content": "CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false FINISH: true suffix\n", + "toolID": "", + "state": null +}` diff --git a/pkg/tests/testdata/TestOutput/test.gpt b/pkg/tests/testdata/TestOutput/test.gpt new file mode 100644 index 00000000..cc35faa0 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/test.gpt @@ -0,0 +1,31 @@ +output filter: prefix +context: context +chat: true + +Tool body + +--- +name: context +share output filters: suffix + +--- +name: prefix +args: chat: is it chat +args: output: the output content +args: continuation: if this is a non-terminating response +args: chatFinish: chat finish message + +#!/bin/bash + +echo CHAT: ${CHAT} +echo CONTENT: ${OUTPUT} +echo CONTINUATION: ${CONTINUATION} +echo FINISH: ${CHATFINISH} + +--- +name: suffix +args: output: the output content + +#!/bin/bash + +echo ${OUTPUT} suffix \ No newline at end of file diff --git a/pkg/types/tool.go b/pkg/types/tool.go index dd89c471..a5124796 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -120,28 +120,30 @@ func (p Program) SetBlocking() Program { type BuiltinFunc func(ctx context.Context, env []string, input string, progress chan<- string) (string, error) type Parameters struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - MaxTokens int `json:"maxTokens,omitempty"` - ModelName string `json:"modelName,omitempty"` - ModelProvider bool `json:"modelProvider,omitempty"` - JSONResponse bool `json:"jsonResponse,omitempty"` - Chat bool `json:"chat,omitempty"` - Temperature *float32 `json:"temperature,omitempty"` - Cache *bool `json:"cache,omitempty"` - InternalPrompt *bool `json:"internalPrompt"` - Arguments *openapi3.Schema `json:"arguments,omitempty"` - Tools []string `json:"tools,omitempty"` - GlobalTools []string `json:"globalTools,omitempty"` - GlobalModelName string `json:"globalModelName,omitempty"` - Context []string `json:"context,omitempty"` - ExportContext []string `json:"exportContext,omitempty"` - Export []string `json:"export,omitempty"` - Agents []string `json:"agents,omitempty"` - Credentials []string `json:"credentials,omitempty"` - InputFilters []string `json:"inputFilters,omitempty"` - ExportInputFilters []string `json:"exportInputFilters,omitempty"` - Blocking bool `json:"-"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + MaxTokens int `json:"maxTokens,omitempty"` + ModelName string `json:"modelName,omitempty"` + ModelProvider bool `json:"modelProvider,omitempty"` + JSONResponse bool `json:"jsonResponse,omitempty"` + Chat bool `json:"chat,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + Cache *bool `json:"cache,omitempty"` + InternalPrompt *bool `json:"internalPrompt"` + Arguments *openapi3.Schema `json:"arguments,omitempty"` + Tools []string `json:"tools,omitempty"` + GlobalTools []string `json:"globalTools,omitempty"` + GlobalModelName string `json:"globalModelName,omitempty"` + Context []string `json:"context,omitempty"` + ExportContext []string `json:"exportContext,omitempty"` + Export []string `json:"export,omitempty"` + Agents []string `json:"agents,omitempty"` + Credentials []string `json:"credentials,omitempty"` + InputFilters []string `json:"inputFilters,omitempty"` + ExportInputFilters []string `json:"exportInputFilters,omitempty"` + OutputFilters []string `json:"outputFilters,omitempty"` + ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` + Blocking bool `json:"-"` } func (p Parameters) ToolRefNames() []string { @@ -153,7 +155,9 @@ func (p Parameters) ToolRefNames() []string { p.Context, p.Credentials, p.InputFilters, - p.ExportInputFilters) + p.ExportInputFilters, + p.OutputFilters, + p.ExportOutputFilters) } type ToolDef struct { @@ -419,6 +423,12 @@ func (t ToolDef) String() string { if len(t.Parameters.ExportInputFilters) != 0 { _, _ = fmt.Fprintf(buf, "Share Input Filters: %s\n", strings.Join(t.Parameters.ExportInputFilters, ", ")) } + if len(t.Parameters.OutputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Output Filters: %s\n", strings.Join(t.Parameters.OutputFilters, ", ")) + } + if len(t.Parameters.ExportOutputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Share Output Filters: %s\n", strings.Join(t.Parameters.ExportOutputFilters, ", ")) + } if t.Parameters.MaxTokens != 0 { _, _ = fmt.Fprintf(buf, "Max Tokens: %d\n", t.Parameters.MaxTokens) } @@ -521,6 +531,31 @@ func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { return result.List() } +func (t Tool) GetOutputFilterTools(program Program) ([]ToolReference, error) { + result := &toolRefSet{} + + outputFilterRefs, err := t.GetToolRefsFromNames(t.OutputFilters) + if err != nil { + return nil, err + } + + for _, outputFilterRef := range outputFilterRefs { + result.Add(outputFilterRef) + } + + contextRefs, err := t.GetContextTools(program) + if err != nil { + return nil, err + } + + for _, contextRef := range contextRefs { + contextTool := program.ToolSet[contextRef.ToolID] + result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportOutputFilters)) + } + + return result.List() +} + func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { result := &toolRefSet{} diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index 6e3d98d3..43af6cee 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -9,28 +9,30 @@ import ( func TestToolDef_String(t *testing.T) { tool := ToolDef{ Parameters: Parameters{ - Name: "Tool Sample", - Description: "This is a sample tool", - MaxTokens: 1024, - ModelName: "ModelSample", - ModelProvider: true, - JSONResponse: true, - Chat: true, - Temperature: float32Ptr(0.8), - Cache: boolPtr(true), - InternalPrompt: boolPtr(true), - Arguments: ObjectSchema("arg1", "desc1", "arg2", "desc2"), - Tools: []string{"Tool1", "Tool2"}, - GlobalTools: []string{"GlobalTool1", "GlobalTool2"}, - GlobalModelName: "GlobalModelSample", - Context: []string{"Context1", "Context2"}, - ExportContext: []string{"ExportContext1", "ExportContext2"}, - Export: []string{"Export1", "Export2"}, - Agents: []string{"Agent1", "Agent2"}, - Credentials: []string{"Credential1", "Credential2"}, - Blocking: true, - InputFilters: []string{"Filter1", "Filter2"}, - ExportInputFilters: []string{"SharedFilter1", "SharedFilter2"}, + Name: "Tool Sample", + Description: "This is a sample tool", + MaxTokens: 1024, + ModelName: "ModelSample", + ModelProvider: true, + JSONResponse: true, + Chat: true, + Temperature: float32Ptr(0.8), + Cache: boolPtr(true), + InternalPrompt: boolPtr(true), + Arguments: ObjectSchema("arg1", "desc1", "arg2", "desc2"), + Tools: []string{"Tool1", "Tool2"}, + GlobalTools: []string{"GlobalTool1", "GlobalTool2"}, + GlobalModelName: "GlobalModelSample", + Context: []string{"Context1", "Context2"}, + ExportContext: []string{"ExportContext1", "ExportContext2"}, + Export: []string{"Export1", "Export2"}, + Agents: []string{"Agent1", "Agent2"}, + Credentials: []string{"Credential1", "Credential2"}, + Blocking: true, + InputFilters: []string{"Filter1", "Filter2"}, + ExportInputFilters: []string{"SharedFilter1", "SharedFilter2"}, + OutputFilters: []string{"Filter1", "Filter2"}, + ExportOutputFilters: []string{"SharedFilter1", "SharedFilter2"}, }, Instructions: "This is a sample instruction", } @@ -46,6 +48,8 @@ Context: Context1, Context2 Share Context: ExportContext1, ExportContext2 Input Filters: Filter1, Filter2 Share Input Filters: SharedFilter1, SharedFilter2 +Output Filters: Filter1, Filter2 +Share Output Filters: SharedFilter1, SharedFilter2 Max Tokens: 1024 Model: ModelSample Model Provider: true From 418a00a3b79724214d53b2d5483d704140e37955 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 21 Jun 2024 16:10:35 -0700 Subject: [PATCH 19/22] bug: don't ignore default prompt in chat tools anymore --- pkg/engine/engine.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index bea8439a..2c7c729d 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -10,7 +10,6 @@ import ( "github.com/gptscript-ai/gptscript/pkg/config" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/counter" - "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/gptscript-ai/gptscript/pkg/version" ) @@ -282,11 +281,6 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { completion.Messages = addUpdateSystem(ctx, tool, completion.Messages) - if _, def := system.IsDefaultPrompt(input); tool.Chat && def { - // Ignore "default prompts" from chat - input = "" - } - if tool.Chat && input == "{}" { input = "" } From fbcacba48901b1040f7462fdadb884dc17e6bcfd Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Fri, 21 Jun 2024 11:32:32 -0400 Subject: [PATCH 20/22] fix: strip file extension from local tool paths at ui startup Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index fbac48a0..2b4bcdb3 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -348,7 +348,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { return fmt.Errorf("cannot determine absolute path to script %s: %v", file, err) } gptOpt.Env = append(gptOpt.Env, "SCRIPTS_PATH="+filepath.Dir(absPathToScript)) - file = filepath.Base(file) + file = strings.TrimSuffix(filepath.Base(file), ".gpt") } else { cwd, err := os.Getwd() if err != nil { From 60da900928139454bce5b4d1fe89931f197698d0 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 24 Jun 2024 14:10:01 -0400 Subject: [PATCH 21/22] enhance: update credentials framework for OAuth support (#305) Signed-off-by: Grant Linville --- pkg/cli/credential.go | 80 ++++++++++++++++++++++------------- pkg/credentials/credential.go | 47 +++++++++++++------- pkg/runner/runner.go | 45 +++++++++++--------- 3 files changed, 108 insertions(+), 64 deletions(-) diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index 5f617941..6bfb1ed6 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -6,6 +6,7 @@ import ( "sort" "strings" "text/tabwriter" + "time" cmd2 "github.com/acorn-io/cmd" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -14,6 +15,11 @@ import ( "github.com/spf13/cobra" ) +const ( + expiresNever = "never" + expiresExpired = "expired" +) + type Credential struct { root *GPTScript AllContexts bool `usage:"List credentials for all contexts" local:"true"` @@ -46,6 +52,7 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { } opts.Cache = cache.Complete(opts.Cache) + // Initialize the credential store and get all the credentials. store, err := credentials.NewStore(cfg, ctx, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) @@ -56,6 +63,10 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { return fmt.Errorf("failed to list credentials: %w", err) } + w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) + defer w.Flush() + + // Sort credentials and print column names, depending on the options. if c.AllContexts { // Sort credentials by context sort.Slice(creds, func(i, j int) bool { @@ -65,25 +76,10 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { return creds[i].Context < creds[j].Context }) - w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) - defer w.Flush() - if c.ShowEnvVars { - _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\tENVIRONMENT VARIABLES\n")) - - for _, cred := range creds { - envVars := make([]string, 0, len(cred.Env)) - for envVar := range cred.Env { - envVars = append(envVars, envVar) - } - sort.Strings(envVars) - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", cred.Context, cred.ToolName, strings.Join(envVars, ", ")) - } + _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\tEXPIRES IN\tENV\n")) } else { - _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\n")) - for _, cred := range creds { - _, _ = fmt.Fprintf(w, "%s\t%s\n", cred.Context, cred.ToolName) - } + _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\tEXPIRES IN\n")) } } else { // Sort credentials by tool name @@ -92,24 +88,48 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { }) if c.ShowEnvVars { - w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) - defer w.Flush() - _, _ = w.Write([]byte("CREDENTIAL\tENVIRONMENT VARIABLES\n")) - - for _, cred := range creds { - envVars := make([]string, 0, len(cred.Env)) - for envVar := range cred.Env { - envVars = append(envVars, envVar) - } - sort.Strings(envVars) - _, _ = fmt.Fprintf(w, "%s\t%s\n", cred.ToolName, strings.Join(envVars, ", ")) + _, _ = w.Write([]byte("CREDENTIAL\tEXPIRES IN\tENV\n")) + } else { + _, _ = w.Write([]byte("CREDENTIAL\tEXPIRES IN\n")) + } + } + + for _, cred := range creds { + expires := expiresNever + if cred.ExpiresAt != nil { + expires = expiresExpired + if !cred.IsExpired() { + expires = time.Until(*cred.ExpiresAt).Truncate(time.Second).String() } + } + + var fields []any + if c.AllContexts { + fields = []any{cred.Context, cred.ToolName, expires} } else { - for _, cred := range creds { - fmt.Println(cred.ToolName) + fields = []any{cred.ToolName, expires} + } + + if c.ShowEnvVars { + envVars := make([]string, 0, len(cred.Env)) + for envVar := range cred.Env { + envVars = append(envVars, envVar) } + sort.Strings(envVars) + fields = append(fields, strings.Join(envVars, ", ")) } + + printFields(w, fields) } return nil } + +func printFields(w *tabwriter.Writer, fields []any) { + if len(fields) == 0 { + return + } + + fmtStr := strings.Repeat("%s\t", len(fields)-1) + "%s\n" + _, _ = fmt.Fprintf(w, fmtStr, fields...) +} diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index 46f705dc..fc247b38 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -4,43 +4,58 @@ import ( "encoding/json" "fmt" "strings" + "time" "github.com/docker/cli/cli/config/types" ) -const ctxSeparator = "///" - type CredentialType string const ( + ctxSeparator = "///" CredentialTypeTool CredentialType = "tool" CredentialTypeModelProvider CredentialType = "modelProvider" + ExistingCredential = "GPTSCRIPT_EXISTING_CREDENTIAL" ) type Credential struct { - Context string `json:"context"` - ToolName string `json:"toolName"` - Type CredentialType `json:"type"` - Env map[string]string `json:"env"` + Context string `json:"context"` + ToolName string `json:"toolName"` + Type CredentialType `json:"type"` + Env map[string]string `json:"env"` + ExpiresAt *time.Time `json:"expiresAt"` + RefreshToken string `json:"refreshToken"` +} + +func (c Credential) IsExpired() bool { + if c.ExpiresAt == nil { + return false + } + return time.Now().After(*c.ExpiresAt) } func (c Credential) toDockerAuthConfig() (types.AuthConfig, error) { - env, err := json.Marshal(c.Env) + cred, err := json.Marshal(c) if err != nil { return types.AuthConfig{}, err } return types.AuthConfig{ Username: string(c.Type), - Password: string(env), + Password: string(cred), ServerAddress: toolNameWithCtx(c.ToolName, c.Context), }, nil } func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error) { - var env map[string]string - if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { - return Credential{}, err + var cred Credential + if err := json.Unmarshal([]byte(authCfg.Password), &cred); err != nil || len(cred.Env) == 0 { + // Legacy: try unmarshalling into just an env map + var env map[string]string + if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { + return Credential{}, err + } + cred.Env = env } // We used to hardcode the username as "gptscript" before CredentialType was introduced, so @@ -62,10 +77,12 @@ func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error } return Credential{ - Context: ctx, - ToolName: tool, - Type: CredentialType(credType), - Env: env, + Context: ctx, + ToolName: tool, + Type: CredentialType(credType), + Env: cred.Env, + ExpiresAt: cred.ExpiresAt, + RefreshToken: cred.RefreshToken, }, nil } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index fb2cba0d..e4794535 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -250,7 +250,7 @@ var ( EventTypeRunFinish EventType = "runFinish" ) -func getContextInput(prg *types.Program, ref types.ToolReference, input string) (string, error) { +func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) (string, error) { if ref.Arg == "" { return "", nil } @@ -355,7 +355,7 @@ func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monito continue } - contextInput, err := getContextInput(callCtx.Program, toolRef, input) + contextInput, err := getToolRefInput(callCtx.Program, toolRef, input) if err != nil { return nil, nil, err } @@ -867,7 +867,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } var ( - cred *credentials.Credential + c *credentials.Credential exists bool ) @@ -879,25 +879,39 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env // Only try to look up the cred if the tool is on GitHub or has an alias. // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. if isGitHubTool(toolName) && credentialAlias == "" { - cred, exists, err = r.credStore.Get(toolName) + c, exists, err = r.credStore.Get(toolName) if err != nil { return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) } } else if credentialAlias != "" { - cred, exists, err = r.credStore.Get(credentialAlias) + c, exists, err = r.credStore.Get(credentialAlias) if err != nil { return nil, fmt.Errorf("failed to get credentials for tool %s: %w", credentialAlias, err) } } + if c == nil { + c = &credentials.Credential{} + } + // If the credential doesn't already exist in the store, run the credential tool in order to get the value, // and save it in the store. - if !exists { + if !exists || c.IsExpired() { credToolRefs, ok := callCtx.Tool.ToolMapping[credToolName] if !ok || len(credToolRefs) != 1 { return nil, fmt.Errorf("failed to find ID for tool %s", credToolName) } + // If the existing credential is expired, we need to provide it to the cred tool through the environment. + if exists && c.IsExpired() { + credJSON, err := json.Marshal(c) + if err != nil { + return nil, fmt.Errorf("failed to marshal credential: %w", err) + } + env = append(env, fmt.Sprintf("%s=%s", credentials.ExistingCredential, string(credJSON))) + } + + // Get the input for the credential tool, if there is any. var input string if args != nil { inputBytes, err := json.Marshal(args) @@ -916,21 +930,14 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env return nil, fmt.Errorf("invalid state: credential tool [%s] can not result in a continuation", credToolName) } - var envMap struct { - Env map[string]string `json:"env"` - } - if err := json.Unmarshal([]byte(*res.Result), &envMap); err != nil { + if err := json.Unmarshal([]byte(*res.Result), &c); err != nil { return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", credToolName, err) } - - cred = &credentials.Credential{ - Type: credentials.CredentialTypeTool, - Env: envMap.Env, - ToolName: credName, - } + c.ToolName = credName + c.Type = credentials.CredentialTypeTool isEmpty := true - for _, v := range cred.Env { + for _, v := range c.Env { if v != "" { isEmpty = false break @@ -941,7 +948,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env if (isGitHubTool(toolName) && callCtx.Program.ToolSet[credToolRefs[0].ToolID].Source.Repo != nil) || credentialAlias != "" { if isEmpty { log.Warnf("Not saving empty credential for tool %s", toolName) - } else if err := r.credStore.Add(*cred); err != nil { + } else if err := r.credStore.Add(*c); err != nil { return nil, fmt.Errorf("failed to add credential for tool %s: %w", toolName, err) } } else { @@ -949,7 +956,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } } - for k, v := range cred.Env { + for k, v := range c.Env { env = append(env, fmt.Sprintf("%s=%s", k, v)) } } From 770411a8db0d328ec53c9dfa69359c726e0197f0 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 24 Jun 2024 17:17:08 -0400 Subject: [PATCH 22/22] chore: bump ui for v0.8.4 Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index fbac48a0..1bce6c81 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -330,7 +330,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { - args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v2")}, args...) + args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v0.8.4")}, args...) // If args has more than one element, then the user has provided a file. if len(args) > 1 {