diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml new file mode 100644 index 00000000..abea2159 --- /dev/null +++ b/.github/workflows/smoke.yaml @@ -0,0 +1,179 @@ +name: smoke + +on: + pull_request_target: + types: [opened, synchronize, reopened] + branches: + - main + push: + branches: + - main + paths-ignore: + - docs/** + workflow_dispatch: + +jobs: + check-label: + runs-on: ubuntu-22.04 + outputs: + run_smoke_tests: ${{ steps.check.outputs.run_smoke_tests }} + steps: + - name: Check if PR author is a member of the organization or has the run-smoke label + id: check + run: | + case "${{ github.event_name }}" in + push) + # Run smoke tests for push to base repo + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + ;; + workflow_dispatch) + # Run smoke tests for manual runs against base branch + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + ;; + pull_request_target) + ORG="gptscript-ai" + AUTHOR="${{ github.event.pull_request.user.login }}" + + # Check for org membership + MEMBERSHIP_RESPONSE_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token ${{ secrets.SMOKE_GH_TOKEN }}" \ + "https://api.github.com/orgs/$ORG/members/$AUTHOR") + + if [ "$MEMBERSHIP_RESPONSE_CODE" -eq 204 ]; then + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + fi + + # Check for "run-smoke" label + LABELS=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository_owner }}/${{ github.event.repository.name }}/issues/${{ github.event.pull_request.number }}/labels" | jq -r '.[].name') + if echo "$LABELS" | grep -q "run-smoke"; then + # Run smoke tests for PR with the "run-smoke" label + echo "run_smoke_tests=true" >> $GITHUB_OUTPUT + exit 0 + fi + + ;; + esac + + echo "run_smoke_tests=false" >> $GITHUB_OUTPUT + + gpt-4o-2024-05-13: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: gpt-4o-2024-05-13 + name: Run smoke test for gpt-4o-2024-05-13 + run: | + echo "Running smoke test for model gpt-4o-2024-05-13" + export PATH="$(pwd)/bin:${PATH}" + make smoke + + gpt-4-turbo-2024-04-09: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: gpt-4-turbo-2024-04-09 + name: Run smoke test for gpt-4-turbo-2024-04-09 + run: | + echo "Running smoke test for model gpt-4-turbo-2024-04-09" + export PATH="$(pwd)/bin:${PATH}" + make smoke + + claude-3-opus-20240229: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta + ANTHROPIC_API_KEY: ${{ secrets.SMOKE_ANTHROPIC_API_KEY }} + name: Run smoke test for claude-3-opus-20240229 + run: | + echo "Running smoke test for model claude-3-opus-20240229" + export PATH="$(pwd)/bin:${PATH}" + make smoke + + mistral-large-2402: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: mistral-large-2402 from https://api.mistral.ai/v1 + GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY: ${{ secrets.SMOKE_GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY }} + name: Run smoke test for mistral-large-2402 + run: | + echo "Running smoke test for model mistral-large-2402" + export PATH="$(pwd)/bin:${PATH}" + make smoke + diff --git a/.github/workflows/validate-docs.yaml b/.github/workflows/validate-docs.yaml index 82ca4bf4..b017af94 100644 --- a/.github/workflows/validate-docs.yaml +++ b/.github/workflows/validate-docs.yaml @@ -1,13 +1,9 @@ name: Validate docs build on: push: - paths: - - docs/** branches: - main pull_request: - paths: - - docs/** branches: - main @@ -23,4 +19,4 @@ jobs: cache: false go-version: "1.22" - run: make init-docs - - run: make validate-docs \ No newline at end of file + - run: make validate-docs diff --git a/Makefile b/Makefile index 0186125d..0fed6344 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,10 @@ tidy: test: go test -v ./... +smoke: build +smoke: + go test -v -tags='smoke' ./pkg/tests/smoke/... + GOLANGCI_LINT_VERSION ?= v1.59.0 lint: if ! command -v golangci-lint &> /dev/null; then \ @@ -52,4 +56,4 @@ validate-docs: echo "Encountered dirty repo!"; \ git diff; \ exit 1 \ - ;fi \ No newline at end of file + ;fi diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 8c5d1459..4da342fb 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -30,7 +30,7 @@ gptscript [flags] PROGRAM_FILE [INPUT...] --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) -h, --help help for gptscript - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 94710662..4c485c7c 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -38,7 +38,7 @@ gptscript eval [flags] --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index c4e37856..511132d9 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -32,7 +32,7 @@ gptscript fmt [flags] --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index d2322a48..3dde0073 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -32,7 +32,7 @@ gptscript parse [flags] --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index a06e5818..b849e1d0 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -1,11 +1,55 @@ # FAQs -#### I don't have Homebrew, how can I install GPTScript? +### I don't have Homebrew, how can I install GPTScript? + On MacOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` On all supported systems, you download and install the archive for your platform and architecture from the [releases page](https://github.com/gptscript-ai/gptscript/releases). -#### Does GPTScript have an SDK or API I can program against? +### Does GPTScript have an SDK or API I can program against? Currently, there are three SDKs being maintained: [Python](https://github.com/gptscript-ai/py-gptscript), [Node](https://github.com/gptscript-ai/node-gptscript), and [Go](https://github.com/gptscript-ai/go-gptscript). They are currently under development and are being iterated on relatively rapidly. The READMEs in each repository contain the most up-to-date documentation for the functionality of each. + +### I see there's a --disable-cache flag. How does caching working in GPTScript? + +GPTScript leverages caching to speed up execution and reduce LLM costs. There are two areas cached by GPTScript: +- Git commit hash lookups for tools +- LLM responses + +Caching is enabled for both of these by default. It can be disabled via the `--disable-cache` flag. Below is an explanation of how these areas behave when caching is enabled and disabled. + +#### Git commit hash lookups for tools + +When a remote tool or context is included in your script (like so: `Tools: github.com/gptscript-ai/browser`) and then invoked during script execution, GPTScript will pull the Git repo for that tool and build it. The tool’s repo and build will be stored in your system’s cache directory (at [$XDG_CACHE_HOME](https://pkg.go.dev/os#UserCacheDir)/gptscript/repos). Subsequent invocations of the tool leverage that cache. When the cache is enabled, GPTScript will only check for a newer version of the tool once an hour; if an hour hasn’t passed since the last check, it will just use the one it has. If this is the first invocation and the tool doesn’t yet exist in the cache, it will be pulled and built as normal. + +When the cache is disabled, GPTScript will check that it has the latest version of the tool (meaning the latest git commit for the repo) on every single invocation of the tool. If GPTScript determines it already has the latest version, that build will be used as-is. In other words, disabling the cache DOES NOT force GPTScript to rebuild the tool, it only forces GPTScript to always check if it has the latest version. + +#### LLM responses + +With regards to LLM responses, when the cache is enabled GPTScript will cache the LLM’s response to a chat completion request. Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. + +It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. This means that every message between user and LLM affects the cache lookup. So, when using GPTScript in chat mode, it is very unlikely you’ll receive a cached LLM response. Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. + +### I see there's a --workspace flag. How do I make use of that? + +Every invocation of GPTScript has a workspace directory available to it. By default, this directory is a one-off temp directory, but you can override this and explicitly set a workspace using the `--workspace` flag, like so: +``` +gptscript --workspace . my-script.gpt +``` +In the above example, the user’s current directory (denoted by `.`) will be set as the workspace. Both absolute and relative paths are supported. + +Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. + +:::info +GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. The tools must decide to make use of the workspace environment variable. +::: + +To make prompt-based tools workspace aware, you can add our workspace context, like so: +``` +Context: github.com/gptscript-ai/context/workspace +``` +This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. This will not, however, have any impact on code-based tools (ie python, bash, or go tools). Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. + +This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. This is because if a tool intends to interact with the workspace, it minimally needs these tools. + diff --git a/go.mod b/go.mod index f3b7eb22..4ab2b395 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 - github.com/gptscript-ai/tui v0.0.0-20240614062633-985091711b0a + github.com/gptscript-ai/tui v0.0.0-20240618230843-2b5961f3341b github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -32,6 +32,7 @@ require ( golang.org/x/sync v0.7.0 golang.org/x/term v0.20.0 gopkg.in/yaml.v3 v3.0.1 + gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 ) @@ -107,6 +108,5 @@ require ( golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect golang.org/x/tools v0.20.0 // indirect - gotest.tools/v3 v3.5.1 // indirect mvdan.cc/gofumpt v0.6.0 // indirect ) diff --git a/go.sum b/go.sum index bbaa5899..1ab5607d 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/go-gptscript v0.0.0-20240613214812-8111c2b02d71 h1:WehkkausLuXI91ePpIVrzZ6eBmfFIU/HfNsSA1CHiwo= github.com/gptscript-ai/go-gptscript v0.0.0-20240613214812-8111c2b02d71/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240614062633-985091711b0a h1:LFsEDiIAx0Rq0V6aOMlRjXMMIfkA3uEhqqqjoggLlDQ= -github.com/gptscript-ai/tui v0.0.0-20240614062633-985091711b0a/go.mod h1:ZlyM+BRiD6mV04w+Xw2mXP1VKGEUbn8BvwrosWlplUo= +github.com/gptscript-ai/tui v0.0.0-20240618230843-2b5961f3341b h1:OJfmpDQ/6ffz5P4UdJJEd5xeqo2dfWnsg1YZLDqJWYo= +github.com/gptscript-ai/tui v0.0.0-20240618230843-2b5961f3341b/go.mod h1:ZlyM+BRiD6mV04w+Xw2mXP1VKGEUbn8BvwrosWlplUo= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index a4aeb74e..3e05f05c 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -1,6 +1,7 @@ package builtin import ( + "bytes" "context" "encoding/json" "errors" @@ -31,6 +32,7 @@ var SafeTools = map[string]struct{}{ "sys.echo": {}, "sys.prompt": {}, "sys.time.now": {}, + "sys.context": {}, } var tools = map[string]types.Tool{ @@ -227,16 +229,15 @@ var tools = map[string]types.Tool{ BuiltinFunc: SysChatHistory, }, }, -} - -func SysProgram() *types.Program { - result := &types.Program{ - ToolSet: types.ToolSet{}, - } - for _, tool := range ListTools() { - result.ToolSet[tool.ID] = tool - } - return result + "sys.context": { + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Description: "Retrieves the current internal GPTScript tool call context information", + Arguments: types.ObjectSchema(), + }, + BuiltinFunc: SysContext, + }, + }, } func ListTools() (result []types.Tool) { @@ -264,7 +265,7 @@ func Builtin(name string) (types.Tool, bool) { return SetDefaults(t), ok } -func SysFind(_ context.Context, _ []string, input string) (string, error) { +func SysFind(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var result []string var params struct { Pattern string `json:"pattern,omitempty"` @@ -305,7 +306,7 @@ func SysFind(_ context.Context, _ []string, input string) (string, error) { return strings.Join(result, "\n"), nil } -func SysExec(_ context.Context, env []string, input string) (string, error) { +func SysExec(_ context.Context, env []string, input string, progress chan<- string) (string, error) { var params struct { Command string `json:"command,omitempty"` Directory string `json:"directory,omitempty"` @@ -328,13 +329,30 @@ func SysExec(_ context.Context, env []string, input string) (string, error) { cmd = exec.Command("/bin/sh", "-c", params.Command) } + var ( + out bytes.Buffer + pw = progressWriter{ + out: progress, + } + combined = io.MultiWriter(&out, &pw) + ) cmd.Env = env cmd.Dir = params.Directory - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Sprintf("ERROR: %s\nOUTPUT:\n%s", err, out), nil + cmd.Stdout = combined + cmd.Stderr = combined + if err := cmd.Run(); err != nil { + return fmt.Sprintf("ERROR: %s\nOUTPUT:\n%s", err, &out), nil } - return string(out), nil + return out.String(), nil +} + +type progressWriter struct { + out chan<- string +} + +func (pw *progressWriter) Write(p []byte) (n int, err error) { + pw.out <- string(p) + return len(p), nil } func getWorkspaceDir(envs []string) (string, error) { @@ -347,7 +365,7 @@ func getWorkspaceDir(envs []string) (string, error) { return "", fmt.Errorf("no workspace directory found in env") } -func SysLs(_ context.Context, _ []string, input string) (string, error) { +func SysLs(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Dir string `json:"dir,omitempty"` } @@ -383,7 +401,7 @@ func SysLs(_ context.Context, _ []string, input string) (string, error) { return strings.Join(result, "\n"), nil } -func SysRead(_ context.Context, _ []string, input string) (string, error) { +func SysRead(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filename string `json:"filename,omitempty"` } @@ -411,7 +429,7 @@ func SysRead(_ context.Context, _ []string, input string) (string, error) { return string(data), nil } -func SysWrite(_ context.Context, _ []string, input string) (string, error) { +func SysWrite(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filename string `json:"filename,omitempty"` Content string `json:"content,omitempty"` @@ -443,7 +461,7 @@ func SysWrite(_ context.Context, _ []string, input string) (string, error) { return fmt.Sprintf("Wrote (%d) bytes to file %s", len(data), file), nil } -func SysAppend(_ context.Context, _ []string, input string) (string, error) { +func SysAppend(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filename string `json:"filename,omitempty"` Content string `json:"content,omitempty"` @@ -489,7 +507,7 @@ func fixQueries(u string) string { return url.String() } -func SysHTTPGet(_ context.Context, _ []string, input string) (_ string, err error) { +func SysHTTPGet(_ context.Context, _ []string, input string, _ chan<- string) (_ string, err error) { var params struct { URL string `json:"url,omitempty"` } @@ -523,8 +541,8 @@ func SysHTTPGet(_ context.Context, _ []string, input string) (_ string, err erro return string(data), nil } -func SysHTTPHtml2Text(ctx context.Context, env []string, input string) (string, error) { - content, err := SysHTTPGet(ctx, env, input) +func SysHTTPHtml2Text(ctx context.Context, env []string, input string, progress chan<- string) (string, error) { + content, err := SysHTTPGet(ctx, env, input, progress) if err != nil { return "", err } @@ -533,7 +551,7 @@ func SysHTTPHtml2Text(ctx context.Context, env []string, input string) (string, }) } -func SysHTTPPost(ctx context.Context, _ []string, input string) (_ string, err error) { +func SysHTTPPost(ctx context.Context, _ []string, input string, _ chan<- string) (_ string, err error) { var params struct { URL string `json:"url,omitempty"` Content string `json:"content,omitempty"` @@ -569,7 +587,18 @@ func SysHTTPPost(ctx context.Context, _ []string, input string) (_ string, err e return fmt.Sprintf("Wrote %d to %s", len([]byte(params.Content)), params.URL), nil } -func SysGetenv(_ context.Context, env []string, input string) (string, error) { +func DiscardProgress() (progress chan<- string, closeFunc func()) { + ch := make(chan string) + go func() { + for range ch { + } + }() + return ch, func() { + close(ch) + } +} + +func SysGetenv(_ context.Context, env []string, input string, _ chan<- string) (string, error) { var params struct { Name string `json:"name,omitempty"` } @@ -593,19 +622,28 @@ func SysGetenv(_ context.Context, env []string, input string) (string, error) { return value, nil } -type ErrChatFinish struct { - Message string +func invalidArgument(input string, err error) string { + return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) } -func (e *ErrChatFinish) Error() string { - return fmt.Sprintf("CHAT FINISH: %s", e.Message) -} +func SysContext(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { + engineContext, _ := engine.FromContext(ctx) -func invalidArgument(input string, err error) string { - return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) + callContext := *engineContext.GetCallContext() + callContext.ID = "" + callContext.ParentID = "" + data, err := json.Marshal(map[string]any{ + "program": engineContext.Program, + "call": callContext, + }) + if err != nil { + return invalidArgument("", err), nil + } + + return string(data), nil } -func SysChatHistory(ctx context.Context, _ []string, _ string) (string, error) { +func SysChatHistory(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { engineContext, _ := engine.FromContext(ctx) data, err := json.Marshal(engine.ChatHistory{ @@ -635,21 +673,21 @@ func writeHistory(ctx *engine.Context) (result []engine.ChatHistoryCall) { return } -func SysChatFinish(_ context.Context, _ []string, input string) (string, error) { +func SysChatFinish(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Message string `json:"return,omitempty"` } if err := json.Unmarshal([]byte(input), ¶ms); err != nil { - return "", &ErrChatFinish{ + return "", &engine.ErrChatFinish{ Message: input, } } - return "", &ErrChatFinish{ + return "", &engine.ErrChatFinish{ Message: params.Message, } } -func SysAbort(_ context.Context, _ []string, input string) (string, error) { +func SysAbort(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Message string `json:"message,omitempty"` } @@ -659,7 +697,7 @@ func SysAbort(_ context.Context, _ []string, input string) (string, error) { return "", fmt.Errorf("ABORT: %s", params.Message) } -func SysRemove(_ context.Context, _ []string, input string) (string, error) { +func SysRemove(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Location string `json:"location,omitempty"` } @@ -678,7 +716,7 @@ func SysRemove(_ context.Context, _ []string, input string) (string, error) { return fmt.Sprintf("Removed file: %s", params.Location), nil } -func SysStat(_ context.Context, _ []string, input string) (string, error) { +func SysStat(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Filepath string `json:"filepath,omitempty"` } @@ -698,7 +736,7 @@ func SysStat(_ context.Context, _ []string, input string) (string, error) { return fmt.Sprintf("%s %s mode: %s, size: %d bytes, modtime: %s", title, params.Filepath, stat.Mode().String(), stat.Size(), stat.ModTime().String()), nil } -func SysDownload(_ context.Context, env []string, input string) (_ string, err error) { +func SysDownload(_ context.Context, env []string, input string, _ chan<- string) (_ string, err error) { var params struct { URL string `json:"url,omitempty"` Location string `json:"location,omitempty"` @@ -771,6 +809,6 @@ func SysDownload(_ context.Context, env []string, input string) (_ string, err e return fmt.Sprintf("Downloaded %s to %s", params.URL, params.Location), nil } -func SysTimeNow(context.Context, []string, string) (string, error) { +func SysTimeNow(context.Context, []string, string, chan<- string) (string, error) { return time.Now().Format(time.RFC3339), nil } diff --git a/pkg/builtin/builtin_test.go b/pkg/builtin/builtin_test.go index 313b9718..c12a68f6 100644 --- a/pkg/builtin/builtin_test.go +++ b/pkg/builtin/builtin_test.go @@ -10,15 +10,17 @@ import ( ) func TestSysGetenv(t *testing.T) { + p, c := DiscardProgress() + defer c() v, err := SysGetenv(context.Background(), []string{ "MAGIC=VALUE", - }, `{"name":"MAGIC"}`) + }, `{"name":"MAGIC"}`, nil) require.NoError(t, err) autogold.Expect("VALUE").Equal(t, v) v, err = SysGetenv(context.Background(), []string{ "MAGIC=VALUE", - }, `{"name":"MAGIC2"}`) + }, `{"name":"MAGIC2"}`, p) require.NoError(t, err) autogold.Expect("MAGIC2 is not set or has no value").Equal(t, v) } diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index 5f617941..6bfb1ed6 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -6,6 +6,7 @@ import ( "sort" "strings" "text/tabwriter" + "time" cmd2 "github.com/acorn-io/cmd" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -14,6 +15,11 @@ import ( "github.com/spf13/cobra" ) +const ( + expiresNever = "never" + expiresExpired = "expired" +) + type Credential struct { root *GPTScript AllContexts bool `usage:"List credentials for all contexts" local:"true"` @@ -46,6 +52,7 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { } opts.Cache = cache.Complete(opts.Cache) + // Initialize the credential store and get all the credentials. store, err := credentials.NewStore(cfg, ctx, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) @@ -56,6 +63,10 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { return fmt.Errorf("failed to list credentials: %w", err) } + w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) + defer w.Flush() + + // Sort credentials and print column names, depending on the options. if c.AllContexts { // Sort credentials by context sort.Slice(creds, func(i, j int) bool { @@ -65,25 +76,10 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { return creds[i].Context < creds[j].Context }) - w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) - defer w.Flush() - if c.ShowEnvVars { - _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\tENVIRONMENT VARIABLES\n")) - - for _, cred := range creds { - envVars := make([]string, 0, len(cred.Env)) - for envVar := range cred.Env { - envVars = append(envVars, envVar) - } - sort.Strings(envVars) - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", cred.Context, cred.ToolName, strings.Join(envVars, ", ")) - } + _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\tEXPIRES IN\tENV\n")) } else { - _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\n")) - for _, cred := range creds { - _, _ = fmt.Fprintf(w, "%s\t%s\n", cred.Context, cred.ToolName) - } + _, _ = w.Write([]byte("CONTEXT\tCREDENTIAL\tEXPIRES IN\n")) } } else { // Sort credentials by tool name @@ -92,24 +88,48 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { }) if c.ShowEnvVars { - w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) - defer w.Flush() - _, _ = w.Write([]byte("CREDENTIAL\tENVIRONMENT VARIABLES\n")) - - for _, cred := range creds { - envVars := make([]string, 0, len(cred.Env)) - for envVar := range cred.Env { - envVars = append(envVars, envVar) - } - sort.Strings(envVars) - _, _ = fmt.Fprintf(w, "%s\t%s\n", cred.ToolName, strings.Join(envVars, ", ")) + _, _ = w.Write([]byte("CREDENTIAL\tEXPIRES IN\tENV\n")) + } else { + _, _ = w.Write([]byte("CREDENTIAL\tEXPIRES IN\n")) + } + } + + for _, cred := range creds { + expires := expiresNever + if cred.ExpiresAt != nil { + expires = expiresExpired + if !cred.IsExpired() { + expires = time.Until(*cred.ExpiresAt).Truncate(time.Second).String() } + } + + var fields []any + if c.AllContexts { + fields = []any{cred.Context, cred.ToolName, expires} } else { - for _, cred := range creds { - fmt.Println(cred.ToolName) + fields = []any{cred.ToolName, expires} + } + + if c.ShowEnvVars { + envVars := make([]string, 0, len(cred.Env)) + for envVar := range cred.Env { + envVars = append(envVars, envVar) } + sort.Strings(envVars) + fields = append(fields, strings.Join(envVars, ", ")) } + + printFields(w, fields) } return nil } + +func printFields(w *tabwriter.Writer, fields []any) { + if len(fields) == 0 { + return + } + + fmtStr := strings.Repeat("%s\t", len(fields)-1) + "%s\n" + _, _ = fmt.Fprintf(w, fmtStr, fields...) +} diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index d17afe45..34cf8766 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -45,14 +45,15 @@ type GPTScript struct { CacheOptions OpenAIOptions DisplayOptions - Color *bool `usage:"Use color in output (default true)" default:"true"` - Confirm bool `usage:"Prompt before running potentially dangerous commands"` - Debug bool `usage:"Enable debug logging"` - NoTrunc bool `usage:"Do not truncate long log messages"` - Quiet *bool `usage:"No output logging (set --quiet=false to force on even when there is no TTY)" short:"q"` - Output string `usage:"Save output to a file, or - for stdout" short:"o"` - EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f"` + Color *bool `usage:"Use color in output (default true)" default:"true"` + Confirm bool `usage:"Prompt before running potentially dangerous commands"` + Debug bool `usage:"Enable debug logging"` + NoTrunc bool `usage:"Do not truncate long log messages"` + Quiet *bool `usage:"No output logging (set --quiet=false to force on even when there is no TTY)" short:"q"` + Output string `usage:"Save output to a file, or - for stdout" short:"o"` + EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` + // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` ListModels bool `usage:"List the models available and exit" local:"true"` @@ -329,7 +330,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { - args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v0.8.3")}, args...) + args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v0.8.4")}, args...) // If args has more than one element, then the user has provided a file. if len(args) > 1 { @@ -347,7 +348,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { return fmt.Errorf("cannot determine absolute path to script %s: %v", file, err) } gptOpt.Env = append(gptOpt.Env, "SCRIPTS_PATH="+filepath.Dir(absPathToScript)) - file = filepath.Base(file) + file = strings.TrimSuffix(filepath.Base(file), ".gpt") } else { cwd, err := os.Getwd() if err != nil { diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index 46f705dc..fc247b38 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -4,43 +4,58 @@ import ( "encoding/json" "fmt" "strings" + "time" "github.com/docker/cli/cli/config/types" ) -const ctxSeparator = "///" - type CredentialType string const ( + ctxSeparator = "///" CredentialTypeTool CredentialType = "tool" CredentialTypeModelProvider CredentialType = "modelProvider" + ExistingCredential = "GPTSCRIPT_EXISTING_CREDENTIAL" ) type Credential struct { - Context string `json:"context"` - ToolName string `json:"toolName"` - Type CredentialType `json:"type"` - Env map[string]string `json:"env"` + Context string `json:"context"` + ToolName string `json:"toolName"` + Type CredentialType `json:"type"` + Env map[string]string `json:"env"` + ExpiresAt *time.Time `json:"expiresAt"` + RefreshToken string `json:"refreshToken"` +} + +func (c Credential) IsExpired() bool { + if c.ExpiresAt == nil { + return false + } + return time.Now().After(*c.ExpiresAt) } func (c Credential) toDockerAuthConfig() (types.AuthConfig, error) { - env, err := json.Marshal(c.Env) + cred, err := json.Marshal(c) if err != nil { return types.AuthConfig{}, err } return types.AuthConfig{ Username: string(c.Type), - Password: string(env), + Password: string(cred), ServerAddress: toolNameWithCtx(c.ToolName, c.Context), }, nil } func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error) { - var env map[string]string - if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { - return Credential{}, err + var cred Credential + if err := json.Unmarshal([]byte(authCfg.Password), &cred); err != nil || len(cred.Env) == 0 { + // Legacy: try unmarshalling into just an env map + var env map[string]string + if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { + return Credential{}, err + } + cred.Env = env } // We used to hardcode the username as "gptscript" before CredentialType was introduced, so @@ -62,10 +77,12 @@ func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error } return Credential{ - Context: ctx, - ToolName: tool, - Type: CredentialType(credType), - Env: env, + Context: ctx, + ToolName: tool, + Type: CredentialType(credType), + Env: cred.Env, + ExpiresAt: cred.ExpiresAt, + RefreshToken: cred.RefreshToken, }, nil } diff --git a/pkg/embedded/embed.go b/pkg/embedded/embed.go index de12bec3..7ea7edb9 100644 --- a/pkg/embedded/embed.go +++ b/pkg/embedded/embed.go @@ -3,7 +3,6 @@ package embedded import ( "io/fs" "os" - "strings" "github.com/gptscript-ai/gptscript/internal" "github.com/gptscript-ai/gptscript/pkg/cli" @@ -22,10 +21,11 @@ func Run(opts ...Options) bool { } system.SetBinToSelf() - if len(os.Args) > 1 && strings.HasPrefix(os.Args[1], "sys.") { + if os.Getenv("GPTSCRIPT_EMBEDDED") == "true" { cli.Main() return true } + _ = os.Setenv("GPTSCRIPT_EMBEDDED", "true") return false } diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 48b2d2b8..e0ea5e3a 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -12,6 +12,7 @@ import ( "runtime" "sort" "strings" + "sync" "github.com/google/shlex" "github.com/gptscript-ai/gptscript/pkg/counter" @@ -25,6 +26,24 @@ var requiredFileExtensions = map[string]string{ "powershell": "*.ps1", } +type outputWriter struct { + id string + progress chan<- types.CompletionStatus + buf bytes.Buffer +} + +func (o *outputWriter) Write(p []byte) (n int, err error) { + o.buf.Write(p) + o.progress <- types.CompletionStatus{ + CompletionID: o.id, + PartialResponse: &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: types.Text(o.buf.String()), + }, + } + return len(p), nil +} + func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCategory ToolCategory) (cmdOut string, cmdErr error) { id := counter.Next() @@ -46,7 +65,30 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate "input": input, }, } - return tool.BuiltinFunc(ctx.WrappedContext(), e.Env, input) + + var ( + progress = make(chan string) + wg sync.WaitGroup + ) + wg.Add(1) + defer wg.Wait() + defer close(progress) + go func() { + defer wg.Done() + buf := strings.Builder{} + for line := range progress { + buf.WriteString(line) + e.Progress <- types.CompletionStatus{ + CompletionID: id, + PartialResponse: &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: types.Text(buf.String()), + }, + } + } + }() + + return tool.BuiltinFunc(ctx.WrappedContext(), e.Env, input, progress) } var instructions []string @@ -74,7 +116,10 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate output := &bytes.Buffer{} all := &bytes.Buffer{} cmd.Stderr = io.MultiWriter(all, os.Stderr) - cmd.Stdout = io.MultiWriter(all, output) + cmd.Stdout = io.MultiWriter(all, output, &outputWriter{ + id: id, + progress: e.Progress, + }) if err := cmd.Run(); err != nil { if toolCategory == NoCategory { @@ -85,7 +130,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate return "", fmt.Errorf("ERROR: %s: %w", all, err) } - return output.String(), nil + return output.String(), IsChatFinishMessage(output.String()) } func (e *Engine) getRuntimeEnv(ctx context.Context, tool types.Tool, cmd, env []string) ([]string, error) { @@ -140,6 +185,8 @@ func appendInputAsEnv(env []string, input string) []string { dec := json.NewDecoder(bytes.NewReader([]byte(input))) dec.UseNumber() + env = appendEnv(env, "GPTSCRIPT_INPUT", input) + if err := json.Unmarshal([]byte(input), &data); err != nil { // ignore invalid JSON return env @@ -161,7 +208,6 @@ func appendInputAsEnv(env []string, input string) []string { } } - env = appendEnv(env, "GPTSCRIPT_INPUT", input) return env } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index c94b236a..2c7c729d 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -10,7 +10,6 @@ import ( "github.com/gptscript-ai/gptscript/pkg/config" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/counter" - "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/gptscript-ai/gptscript/pkg/version" ) @@ -21,6 +20,7 @@ type Model interface { type RuntimeManager interface { GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) + EnsureCredentialHelpers(ctx context.Context) error SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error } @@ -97,6 +97,8 @@ const ( ProviderToolCategory ToolCategory = "provider" CredentialToolCategory ToolCategory = "credential" ContextToolCategory ToolCategory = "context" + InputToolCategory ToolCategory = "input" + OutputToolCategory ToolCategory = "output" NoCategory ToolCategory = "" ) @@ -105,6 +107,21 @@ type InputContext struct { Content string `json:"content,omitempty"` } +type ErrChatFinish struct { + Message string +} + +func (e *ErrChatFinish) Error() string { + return fmt.Sprintf("CHAT FINISH: %s", e.Message) +} + +func IsChatFinishMessage(msg string) error { + if msg, ok := strings.CutPrefix(msg, "CHAT FINISH: "); ok { + return &ErrChatFinish{Message: msg} + } + return nil +} + func (c *Context) ParentID() string { if c.Parent == nil { return "" @@ -148,7 +165,7 @@ func WithToolCategory(ctx context.Context, toolCategory ToolCategory) context.Co return context.WithValue(ctx, toolCategoryKey{}, toolCategory) } -func NewContext(ctx context.Context, prg *types.Program, input string) Context { +func NewContext(ctx context.Context, prg *types.Program, input string) (Context, error) { category, _ := ctx.Value(toolCategoryKey{}).(ToolCategory) callCtx := Context{ @@ -161,10 +178,17 @@ func NewContext(ctx context.Context, prg *types.Program, input string) Context { Program: prg, Input: input, } - return callCtx + + agentGroup, err := callCtx.Tool.GetAgents(*prg) + if err != nil { + return callCtx, err + } + + callCtx.AgentGroup = agentGroup + return callCtx, nil } -func (c *Context) SubCall(ctx context.Context, input, toolID, callID string, toolCategory ToolCategory) (Context, error) { +func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID string, toolCategory ToolCategory) (Context, error) { tool, ok := c.Program.ToolSet[toolID] if !ok { return Context{}, fmt.Errorf("failed to file tool for id [%s]", toolID) @@ -174,7 +198,7 @@ func (c *Context) SubCall(ctx context.Context, input, toolID, callID string, too callID = counter.Next() } - agentGroup, err := c.Tool.GetAgentGroup(c.AgentGroup, toolID) + agentGroup, err := c.Tool.GetNextAgentGroup(*c.Program, c.AgentGroup, toolID) if err != nil { return Context{}, err } @@ -257,11 +281,6 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { completion.Messages = addUpdateSystem(ctx, tool, completion.Messages) - if _, def := system.IsDefaultPrompt(input); tool.Chat && def { - // Ignore "default prompts" from chat - input = "" - } - if tool.Chat && input == "{}" { input = "" } diff --git a/pkg/input/input.go b/pkg/input/input.go index 80bb49ee..0037fa5e 100644 --- a/pkg/input/input.go +++ b/pkg/input/input.go @@ -4,9 +4,11 @@ import ( "fmt" "io" "os" + "path/filepath" "strings" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/types" ) func FromArgs(args []string) string { @@ -31,6 +33,14 @@ func FromFile(file string) (string, error) { } return string(data), nil } else if file != "" { + if s, err := os.Stat(file); err == nil && s.IsDir() { + for _, ext := range types.DefaultFiles { + if _, err := os.Stat(filepath.Join(file, ext)); err == nil { + file = filepath.Join(file, ext) + break + } + } + } log.Debugf("reading file %s", file) data, err := os.ReadFile(file) if err != nil { diff --git a/pkg/monitor/display.go b/pkg/monitor/display.go index 36c7a35d..167498c7 100644 --- a/pkg/monitor/display.go +++ b/pkg/monitor/display.go @@ -20,25 +20,22 @@ import ( ) type Options struct { - DisplayProgress bool `usage:"-"` - DumpState string `usage:"Dump the internal execution state to a file"` - DebugMessages bool `usage:"Enable logging of chat completion calls"` + DumpState string `usage:"Dump the internal execution state to a file"` + DebugMessages bool `usage:"Enable logging of chat completion calls"` } func Complete(opts ...Options) (result Options) { for _, opt := range opts { result.DumpState = types.FirstSet(opt.DumpState, result.DumpState) - result.DisplayProgress = types.FirstSet(opt.DisplayProgress, result.DisplayProgress) result.DebugMessages = types.FirstSet(opt.DebugMessages, result.DebugMessages) } return } type Console struct { - dumpState string - displayProgress bool - printMessages bool - callLock sync.Mutex + dumpState string + printMessages bool + callLock sync.Mutex } var ( @@ -47,7 +44,7 @@ var ( func (c *Console) Start(_ context.Context, prg *types.Program, _ []string, input string) (runner.Monitor, error) { id := counter.Next() - mon := newDisplay(c.dumpState, c.displayProgress, c.printMessages) + mon := newDisplay(c.dumpState, c.printMessages) mon.callLock = &c.callLock mon.dump.ID = fmt.Sprint(id) mon.dump.Program = prg @@ -315,23 +312,20 @@ func (d *display) Stop(output string, err error) { func NewConsole(opts ...Options) *Console { opt := Complete(opts...) return &Console{ - dumpState: opt.DumpState, - displayProgress: opt.DisplayProgress, - printMessages: opt.DebugMessages, + dumpState: opt.DumpState, + printMessages: opt.DebugMessages, } } -func newDisplay(dumpState string, progress, printMessages bool) *display { +func newDisplay(dumpState string, printMessages bool) *display { display := &display{ dumpState: dumpState, callIDMap: make(map[string]string), printMessages: printMessages, } - if progress { - display.livePrinter = &livePrinter{ - lastContent: map[string]string{}, - callIDMap: display.callIDMap, - } + display.livePrinter = &livePrinter{ + lastContent: map[string]string{}, + callIDMap: display.callIDMap, } return display } diff --git a/pkg/mvl/log.go b/pkg/mvl/log.go index 6aa7c491..015fa199 100644 --- a/pkg/mvl/log.go +++ b/pkg/mvl/log.go @@ -1,6 +1,7 @@ package mvl import ( + "context" "encoding/json" "fmt" "io" @@ -156,6 +157,25 @@ func (l *Logger) Fields(kv ...any) *Logger { } } +type InfoLogger interface { + Infof(msg string, args ...any) +} + +type infoKey struct{} + +func WithInfo(ctx context.Context, logger InfoLogger) context.Context { + return context.WithValue(ctx, infoKey{}, logger) +} + +func (l *Logger) InfofCtx(ctx context.Context, msg string, args ...any) { + il, ok := ctx.Value(infoKey{}).(InfoLogger) + if ok { + il.Infof(msg, args...) + return + } + l.log.WithFields(l.fields).Infof(msg, args...) +} + func (l *Logger) Infof(msg string, args ...any) { l.log.WithFields(l.fields).Infof(msg, args...) } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 27a6317c..42ff83bc 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -2,7 +2,6 @@ package openai import ( "context" - "fmt" "io" "log/slog" "os" @@ -16,6 +15,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/counter" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/prompt" "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" @@ -29,6 +29,7 @@ const ( var ( key = os.Getenv("OPENAI_API_KEY") url = os.Getenv("OPENAI_BASE_URL") + log = mvl.Package() ) type InvalidAuthError struct{} @@ -305,7 +306,11 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques } if len(msgs) == 0 { - return nil, fmt.Errorf("invalid request, no messages to send to LLM") + log.Errorf("invalid request, no messages to send to LLM") + return &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: types.Text(""), + }, nil } request := openai.ChatCompletionRequest{ @@ -437,6 +442,8 @@ func appendMessage(msg types.CompletionMessage, response openai.ChatCompletionSt if tc.ToolCall.Function.Name != tool.Function.Name { tc.ToolCall.Function.Name += tool.Function.Name } + // OpenAI like to sometimes add this prefix for no good reason + tc.ToolCall.Function.Name = strings.TrimPrefix(tc.ToolCall.Function.Name, "namespace.") tc.ToolCall.Function.Arguments += tool.Function.Arguments msg.Content[idx] = tc diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index cf2f2cf9..b998320b 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -105,6 +105,14 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { tool.Parameters.Export = append(tool.Parameters.Export, csv(value)...) case "tool", "tools": tool.Parameters.Tools = append(tool.Parameters.Tools, csv(value)...) + case "inputfilter", "inputfilters": + tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(value)...) + case "shareinputfilter", "shareinputfilters": + tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(value)...) + case "outputfilter", "outputfilters": + tool.Parameters.OutputFilters = append(tool.Parameters.OutputFilters, csv(value)...) + case "shareoutputfilter", "shareoutputfilters": + tool.Parameters.ExportOutputFilters = append(tool.Parameters.ExportOutputFilters, csv(value)...) case "agent", "agents": tool.Parameters.Agents = append(tool.Parameters.Agents, csv(value)...) case "globaltool", "globaltools": @@ -183,10 +191,14 @@ type context struct { func (c *context) finish(tools *[]Node) { c.tool.Instructions = strings.TrimSpace(strings.Join(c.instructions, "")) - if c.tool.Instructions != "" || c.tool.Parameters.Name != "" || - len(c.tool.Export) > 0 || len(c.tool.Tools) > 0 || + if c.tool.Instructions != "" || + c.tool.Parameters.Name != "" || + len(c.tool.Export) > 0 || + len(c.tool.Tools) > 0 || c.tool.GlobalModelName != "" || len(c.tool.GlobalTools) > 0 || + len(c.tool.ExportInputFilters) > 0 || + len(c.tool.ExportOutputFilters) > 0 || c.tool.Chat { *tools = append(*tools, Node{ ToolNode: &ToolNode{ diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index 9cdb593e..9f682efa 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -191,3 +191,51 @@ name: bad }, }}).Equal(t, out) } + +func TestParseInput(t *testing.T) { + input := ` +input filters: input +share input filters: shared +` + out, err := Parse(strings.NewReader(input)) + require.NoError(t, err) + autogold.Expect(Document{Nodes: []Node{ + {ToolNode: &ToolNode{ + Tool: types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + InputFilters: []string{ + "input", + }, + ExportInputFilters: []string{"shared"}, + }, + }, + Source: types.ToolSource{LineNo: 1}, + }, + }}, + }}).Equal(t, out) +} + +func TestParseOutput(t *testing.T) { + output := ` +output filters: output +share output filters: shared +` + out, err := Parse(strings.NewReader(output)) + require.NoError(t, err) + autogold.Expect(Document{Nodes: []Node{ + {ToolNode: &ToolNode{ + Tool: types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + OutputFilters: []string{ + "output", + }, + ExportOutputFilters: []string{"shared"}, + }, + }, + Source: types.ToolSource{LineNo: 1}, + }, + }}, + }}).Equal(t, out) +} diff --git a/pkg/prompt/credential.go b/pkg/prompt/credential.go index a8bf6f76..9202ed49 100644 --- a/pkg/prompt/credential.go +++ b/pkg/prompt/credential.go @@ -18,7 +18,8 @@ func GetModelProviderCredential(ctx context.Context, credStore credentials.Crede if exists { k = cred.Env[env] } else { - result, err := SysPrompt(ctx, envs, fmt.Sprintf(`{"message":"%s","fields":"key","sensitive":"true"}`, message)) + // we know progress isn't used so pass as nil + result, err := SysPrompt(ctx, envs, fmt.Sprintf(`{"message":"%s","fields":"key","sensitive":"true"}`, message), nil) if err != nil { return "", err } diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go index 6cf8febd..4a9550a3 100644 --- a/pkg/prompt/prompt.go +++ b/pkg/prompt/prompt.go @@ -48,7 +48,7 @@ func sysPromptHTTP(ctx context.Context, envs []string, url string, prompt types. return string(data), err } -func SysPrompt(ctx context.Context, envs []string, input string) (_ string, err error) { +func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string) (_ string, err error) { var params struct { Message string `json:"message,omitempty"` Fields string `json:"fields,omitempty"` diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 291ec038..68b2e427 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "strings" + "sync" "github.com/BurntSushi/locker" "github.com/gptscript-ai/gptscript/pkg/config" @@ -43,11 +44,19 @@ func (n noopRuntime) Setup(_ context.Context, _, _ string, _ []string) ([]string } type Manager struct { - storageDir string - gitDir string - runtimeDir string - credHelperDirs credentials.CredentialHelperDirs - runtimes []Runtime + storageDir string + gitDir string + runtimeDir string + credHelperDirs credentials.CredentialHelperDirs + runtimes []Runtime + credHelperConfig *credHelperConfig +} + +type credHelperConfig struct { + lock sync.Mutex + initialized bool + cliCfg *config.CLIConfig + env []string } func New(cacheDir string, runtimes ...Runtime) *Manager { @@ -61,7 +70,32 @@ func New(cacheDir string, runtimes ...Runtime) *Manager { } } -func (m *Manager) SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error { +func (m *Manager) EnsureCredentialHelpers(ctx context.Context) error { + if m.credHelperConfig == nil { + return nil + } + m.credHelperConfig.lock.Lock() + defer m.credHelperConfig.lock.Unlock() + + if !m.credHelperConfig.initialized { + if err := m.deferredSetUpCredentialHelpers(ctx, m.credHelperConfig.cliCfg, m.credHelperConfig.env); err != nil { + return err + } + m.credHelperConfig.initialized = true + } + + return nil +} + +func (m *Manager) SetUpCredentialHelpers(_ context.Context, cliCfg *config.CLIConfig, env []string) error { + m.credHelperConfig = &credHelperConfig{ + cliCfg: cliCfg, + env: env, + } + return nil +} + +func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error { var ( helperName = cliCfg.CredentialsStore suffix string diff --git a/pkg/repos/git/git.go b/pkg/repos/git/git.go index bf0f1341..978f3a6d 100644 --- a/pkg/repos/git/git.go +++ b/pkg/repos/git/git.go @@ -33,7 +33,7 @@ func Checkout(ctx context.Context, base, repo, commit, toDir string) error { return err } - log.Infof("Checking out %s to %s", commit, toDir) + log.InfofCtx(ctx, "Checking out %s to %s", commit, toDir) return gitWorktreeAdd(ctx, gitDir(base, repo), toDir, commit) } @@ -46,11 +46,11 @@ func Fetch(ctx context.Context, base, repo, commit string) error { if found, err := exists(gitDir); err != nil { return err } else if !found { - log.Infof("Cloning %s", repo) + log.InfofCtx(ctx, "Cloning %s", repo) if err := cloneBare(ctx, repo, gitDir); err != nil { return err } } - log.Infof("Fetching %s at %s", commit, repo) + log.InfofCtx(ctx, "Fetching %s at %s", commit, repo) return fetchCommit(ctx, gitDir, commit) } diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index acf66ab1..28300439 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -68,7 +68,7 @@ func (r *Runtime) BuildCredentialHelper(ctx context.Context, helperName string, } newEnv := runtimeEnv.AppendPath(env, binPath) - log.Infof("Building credential helper %s", helperName) + log.InfofCtx(ctx, "Building credential helper %s", helperName) cmd := debugcmd.New(ctx, filepath.Join(binPath, "go"), "build", "-buildvcs=false", "-o", filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix), @@ -103,7 +103,7 @@ func stripGo(env []string) (result []string) { } func (r *Runtime) runBuild(ctx context.Context, toolSource, binDir string, env []string) error { - log.Infof("Running go build in %s", toolSource) + log.InfofCtx(ctx, "Running go build in %s", toolSource) cmd := debugcmd.New(ctx, filepath.Join(binDir, "go"), "build", "-buildvcs=false", "-o", artifactName()) cmd.Env = stripGo(env) cmd.Dir = toolSource @@ -134,7 +134,7 @@ func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { return "", err } - log.Infof("Downloading Go %s", r.Version) + log.InfofCtx(ctx, "Downloading Go %s", r.Version) tmp := target + ".download" defer os.RemoveAll(tmp) diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index 9280d3b0..575e3b23 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -101,7 +101,7 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { } func (r *Runtime) runNPM(ctx context.Context, toolSource, binDir string, env []string) error { - log.Infof("Running npm in %s", toolSource) + log.InfofCtx(ctx, "Running npm in %s", toolSource) cmd := debugcmd.New(ctx, filepath.Join(binDir, "npm"), "install") cmd.Env = env cmd.Dir = toolSource @@ -141,7 +141,7 @@ func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { return "", err } - log.Infof("Downloading Node %s.x", r.Version) + log.InfofCtx(ctx, "Downloading Node %s.x", r.Version) tmp := target + ".download" defer os.RemoveAll(tmp) diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index 72de5457..a5268f31 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -76,7 +76,7 @@ func uvBin(binDir string) string { } func (r *Runtime) installVenv(ctx context.Context, binDir, venvPath string) error { - log.Infof("Creating virtualenv in %s", venvPath) + log.InfofCtx(ctx, "Creating virtualenv in %s", venvPath) cmd := debugcmd.New(ctx, uvBin(binDir), "venv", "-p", pythonCmd(binDir), venvPath) return cmd.Run() } @@ -171,7 +171,7 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { } func (r *Runtime) runPip(ctx context.Context, toolSource, binDir string, env []string) error { - log.Infof("Running pip in %s", toolSource) + log.InfofCtx(ctx, "Running pip in %s", toolSource) for _, req := range []string{"requirements-gptscript.txt", "requirements.txt"} { reqFile := filepath.Join(toolSource, req) if s, err := os.Stat(reqFile); err == nil && !s.IsDir() { @@ -203,7 +203,7 @@ func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { return "", err } - log.Infof("Downloading Python %s.x", r.Version) + log.InfofCtx(ctx, "Downloading Python %s.x", r.Version) tmp := target + ".download" defer os.RemoveAll(tmp) diff --git a/pkg/runner/input.go b/pkg/runner/input.go new file mode 100644 index 00000000..7d77330e --- /dev/null +++ b/pkg/runner/input.go @@ -0,0 +1,34 @@ +package runner + +import ( + "encoding/json" + "fmt" + + "github.com/gptscript-ai/gptscript/pkg/engine" +) + +func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []string, input string) (string, error) { + inputToolRefs, err := callCtx.Tool.GetInputFilterTools(*callCtx.Program) + if err != nil { + return "", err + } + + for _, inputToolRef := range inputToolRefs { + inputData, err := json.Marshal(map[string]any{ + "input": input, + }) + if err != nil { + return "", fmt.Errorf("failed to marshal input: %w", err) + } + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, string(inputData), "", engine.InputToolCategory) + if err != nil { + return "", err + } + if res.Result == nil { + return "", fmt.Errorf("invalid state: input tool [%s] can not result in a chat continuation", inputToolRef.Reference) + } + input = *res.Result + } + + return input, nil +} diff --git a/pkg/runner/output.go b/pkg/runner/output.go new file mode 100644 index 00000000..858d106c --- /dev/null +++ b/pkg/runner/output.go @@ -0,0 +1,72 @@ +package runner + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/gptscript-ai/gptscript/pkg/engine" +) + +func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []string, state *State, retErr error) (*State, error) { + outputToolRefs, err := callCtx.Tool.GetOutputFilterTools(*callCtx.Program) + if err != nil { + return nil, err + } + + if len(outputToolRefs) == 0 { + return state, retErr + } + + var ( + continuation bool + chatFinish bool + output string + ) + + if errMessage := (*engine.ErrChatFinish)(nil); errors.As(retErr, &errMessage) && callCtx.Tool.Chat { + chatFinish = true + output = errMessage.Message + } else if retErr != nil { + return state, retErr + } else if state.Continuation != nil && state.Continuation.Result != nil { + continuation = true + output = *state.Continuation.Result + } else if state.Result != nil { + output = *state.Result + } else { + return state, nil + } + + for _, outputToolRef := range outputToolRefs { + inputData, err := json.Marshal(map[string]any{ + "output": output, + "chatFinish": chatFinish, + "continuation": continuation, + "chat": callCtx.Tool.Chat, + }) + if err != nil { + return nil, fmt.Errorf("marshaling input for output filter: %w", err) + } + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, outputToolRef.ToolID, string(inputData), "", engine.OutputToolCategory) + if err != nil { + return nil, err + } + if res.Result == nil { + return nil, fmt.Errorf("invalid state: output tool [%s] can not result in a chat continuation", outputToolRef.Reference) + } + output = *res.Result + } + + if chatFinish { + return state, &engine.ErrChatFinish{ + Message: output, + } + } else if continuation { + state.Continuation.Result = &output + } else { + state.Result = &output + } + + return state, nil +} diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 06ccf941..e4794535 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -131,6 +131,16 @@ type ChatState interface{} func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Program, env []string, input string) (resp ChatResponse, err error) { var state *State + defer func() { + if finish := (*engine.ErrChatFinish)(nil); errors.As(err, &finish) { + resp = ChatResponse{ + Done: true, + Content: err.Error(), + } + err = nil + } + }() + if prevState != nil { switch v := prevState.(type) { case *State: @@ -155,7 +165,11 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra monitor.Stop(resp.Content, err) }() - callCtx := engine.NewContext(ctx, &prg, input) + callCtx, err := engine.NewContext(ctx, &prg, input) + if err != nil { + return resp, err + } + if state == nil || state.StartContinuation { if state != nil { state = state.WithResumeInput(&input) @@ -236,7 +250,7 @@ var ( EventTypeRunFinish EventType = "runFinish" ) -func getContextInput(prg *types.Program, ref types.ToolReference, input string) (string, error) { +func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) (string, error) { if ref.Arg == "" { return "", nil } @@ -341,7 +355,7 @@ func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monito continue } - contextInput, err := getContextInput(callCtx.Program, toolRef, input) + contextInput, err := getToolRefInput(callCtx.Program, toolRef, input) if err != nil { return nil, nil, err } @@ -398,6 +412,11 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en Content: input, }) + input, err := r.handleInput(callCtx, monitor, env, input) + if err != nil { + return nil, err + } + if len(callCtx.Tool.Credentials) > 0 { var err error env, err = r.handleCredentials(callCtx, monitor, env) @@ -407,7 +426,6 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en } var ( - err error newState *State ) callCtx.InputContext, newState, err = r.getContext(callCtx, state, monitor, env, input) @@ -421,7 +439,7 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en e := engine.Engine{ Model: r.c, - RuntimeManager: r.runtimeManager, + RuntimeManager: runtimeWithLogger(callCtx, monitor, r.runtimeManager), Progress: progress, Env: env, } @@ -436,7 +454,10 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en } if !authResp.Accept { - msg := fmt.Sprintf("[AUTHORIZATION ERROR]: %s", authResp.Message) + msg := authResp.Message + if msg == "" { + msg = "Tool call request has been denied" + } return &State{ Continuation: &engine.Return{ Result: &msg, @@ -515,7 +536,11 @@ type Needed struct { Input string `json:"input,omitempty"` } -func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, state *State) (*State, error) { +func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, state *State) (retState *State, retErr error) { + defer func() { + retState, retErr = r.handleOutput(callCtx, monitor, env, retState, retErr) + }() + if state.StartContinuation { return nil, fmt.Errorf("invalid state, resume should not have StartContinuation set to true") } @@ -568,7 +593,7 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s ) state, callResults, err = r.subCalls(callCtx, monitor, env, state, callCtx.ToolCategory) - if errMessage := (*builtin.ErrChatFinish)(nil); errors.As(err, &errMessage) && callCtx.Tool.Chat { + if errMessage := (*engine.ErrChatFinish)(nil); errors.As(err, &errMessage) && callCtx.Tool.Chat { return &State{ Result: &errMessage.Message, }, nil @@ -602,7 +627,7 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s e := engine.Engine{ Model: r.c, - RuntimeManager: r.runtimeManager, + RuntimeManager: runtimeWithLogger(callCtx, monitor, r.runtimeManager), Progress: progress, Env: env, } @@ -621,8 +646,12 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s } if state.ResumeInput != nil { + input, err := r.handleInput(callCtx, monitor, env, *state.ResumeInput) + if err != nil { + return state, err + } engineResults = append(engineResults, engine.CallResult{ - User: *state.ResumeInput, + User: input, }) } @@ -679,16 +708,22 @@ func streamProgress(callCtx *engine.Context, monitor Monitor) (chan<- types.Comp } func (r *Runner) subCall(ctx context.Context, parentContext engine.Context, monitor Monitor, env []string, toolID, input, callID string, toolCategory engine.ToolCategory) (*State, error) { - callCtx, err := parentContext.SubCall(ctx, input, toolID, callID, toolCategory) + callCtx, err := parentContext.SubCallContext(ctx, input, toolID, callID, toolCategory) if err != nil { return nil, err } + if toolCategory == engine.ContextToolCategory && callCtx.Tool.IsNoop() { + return &State{ + Result: new(string), + }, nil + } + return r.call(callCtx, monitor, env, input) } func (r *Runner) subCallResume(ctx context.Context, parentContext engine.Context, monitor Monitor, env []string, toolID, callID string, state *State, toolCategory engine.ToolCategory) (*State, error) { - callCtx, err := parentContext.SubCall(ctx, "", toolID, callID, toolCategory) + callCtx, err := parentContext.SubCallContext(ctx, "", toolID, callID, toolCategory) if err != nil { return nil, err } @@ -832,32 +867,51 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } var ( - cred *credentials.Credential + c *credentials.Credential exists bool ) + rm := runtimeWithLogger(callCtx, monitor, r.runtimeManager) + if err := rm.EnsureCredentialHelpers(callCtx.Ctx); err != nil { + return nil, fmt.Errorf("failed to setup credential helpers: %w", err) + } + // Only try to look up the cred if the tool is on GitHub or has an alias. // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. if isGitHubTool(toolName) && credentialAlias == "" { - cred, exists, err = r.credStore.Get(toolName) + c, exists, err = r.credStore.Get(toolName) if err != nil { return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) } } else if credentialAlias != "" { - cred, exists, err = r.credStore.Get(credentialAlias) + c, exists, err = r.credStore.Get(credentialAlias) if err != nil { return nil, fmt.Errorf("failed to get credentials for tool %s: %w", credentialAlias, err) } } + if c == nil { + c = &credentials.Credential{} + } + // If the credential doesn't already exist in the store, run the credential tool in order to get the value, // and save it in the store. - if !exists { + if !exists || c.IsExpired() { credToolRefs, ok := callCtx.Tool.ToolMapping[credToolName] if !ok || len(credToolRefs) != 1 { return nil, fmt.Errorf("failed to find ID for tool %s", credToolName) } + // If the existing credential is expired, we need to provide it to the cred tool through the environment. + if exists && c.IsExpired() { + credJSON, err := json.Marshal(c) + if err != nil { + return nil, fmt.Errorf("failed to marshal credential: %w", err) + } + env = append(env, fmt.Sprintf("%s=%s", credentials.ExistingCredential, string(credJSON))) + } + + // Get the input for the credential tool, if there is any. var input string if args != nil { inputBytes, err := json.Marshal(args) @@ -867,12 +921,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env input = string(inputBytes) } - subCtx, err := callCtx.SubCall(callCtx.Ctx, input, credToolRefs[0].ToolID, "", engine.CredentialToolCategory) // leaving callID as "" will cause it to be set by the engine - if err != nil { - return nil, fmt.Errorf("failed to create subcall context for tool %s: %w", credToolName, err) - } - - res, err := r.call(subCtx, monitor, env, input) + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, credToolRefs[0].ToolID, input, "", engine.CredentialToolCategory) if err != nil { return nil, fmt.Errorf("failed to run credential tool %s: %w", credToolName, err) } @@ -881,21 +930,14 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env return nil, fmt.Errorf("invalid state: credential tool [%s] can not result in a continuation", credToolName) } - var envMap struct { - Env map[string]string `json:"env"` - } - if err := json.Unmarshal([]byte(*res.Result), &envMap); err != nil { + if err := json.Unmarshal([]byte(*res.Result), &c); err != nil { return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", credToolName, err) } - - cred = &credentials.Credential{ - Type: credentials.CredentialTypeTool, - Env: envMap.Env, - ToolName: credName, - } + c.ToolName = credName + c.Type = credentials.CredentialTypeTool isEmpty := true - for _, v := range cred.Env { + for _, v := range c.Env { if v != "" { isEmpty = false break @@ -906,7 +948,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env if (isGitHubTool(toolName) && callCtx.Program.ToolSet[credToolRefs[0].ToolID].Source.Repo != nil) || credentialAlias != "" { if isEmpty { log.Warnf("Not saving empty credential for tool %s", toolName) - } else if err := r.credStore.Add(*cred); err != nil { + } else if err := r.credStore.Add(*c); err != nil { return nil, fmt.Errorf("failed to add credential for tool %s: %w", toolName, err) } } else { @@ -914,7 +956,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } } - for k, v := range cred.Env { + for k, v := range c.Env { env = append(env, fmt.Sprintf("%s=%s", k, v)) } } diff --git a/pkg/runner/runtimemanager.go b/pkg/runner/runtimemanager.go new file mode 100644 index 00000000..e1c5a4c6 --- /dev/null +++ b/pkg/runner/runtimemanager.go @@ -0,0 +1,50 @@ +package runner + +import ( + "context" + "fmt" + "time" + + "github.com/gptscript-ai/gptscript/pkg/config" + "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/mvl" + "github.com/gptscript-ai/gptscript/pkg/types" +) + +func runtimeWithLogger(callCtx engine.Context, monitor Monitor, rm engine.RuntimeManager) engine.RuntimeManager { + if rm == nil { + return nil + } + return runtimeManagerLogger{ + callCtx: callCtx, + monitor: monitor, + rm: rm, + } +} + +type runtimeManagerLogger struct { + callCtx engine.Context + monitor Monitor + rm engine.RuntimeManager +} + +func (r runtimeManagerLogger) Infof(msg string, args ...any) { + r.monitor.Event(Event{ + Time: time.Now(), + Type: EventTypeCallProgress, + CallContext: r.callCtx.GetCallContext(), + Content: fmt.Sprintf(msg, args...), + }) +} + +func (r runtimeManagerLogger) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { + return r.rm.GetContext(mvl.WithInfo(ctx, r), tool, cmd, env) +} + +func (r runtimeManagerLogger) EnsureCredentialHelpers(ctx context.Context) error { + return r.rm.EnsureCredentialHelpers(mvl.WithInfo(ctx, r)) +} + +func (r runtimeManagerLogger) SetUpCredentialHelpers(_ context.Context, _ *config.CLIConfig, _ []string) error { + panic("not implemented") +} diff --git a/pkg/system/prompt.go b/pkg/system/prompt.go index 72858c93..98b2535a 100644 --- a/pkg/system/prompt.go +++ b/pkg/system/prompt.go @@ -24,19 +24,30 @@ You don't move to the next step until you have a result. // DefaultPromptParameter is used as the key in a json map to indication that we really wanted // to just send pure text but the interface required JSON (as that is the fundamental interface of tools in OpenAI) -var DefaultPromptParameter = "defaultPromptParameter" +var DefaultPromptParameter = "prompt" var DefaultToolSchema = openapi3.Schema{ Type: &openapi3.Types{"object"}, Properties: openapi3.Schemas{ DefaultPromptParameter: &openapi3.SchemaRef{ Value: &openapi3.Schema{ - Description: "Prompt to send to the tool or assistant. This may be instructions or question.", + Description: "Prompt to send to the tool. This may be an instruction or question.", + Type: &openapi3.Types{"string"}, + }, + }, + }, +} + +var DefaultChatSchema = openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + DefaultPromptParameter: &openapi3.SchemaRef{ + Value: &openapi3.Schema{ + Description: "Prompt to send to the assistant. This may be an instruction or question.", Type: &openapi3.Types{"string"}, }, }, }, - Required: []string{DefaultPromptParameter}, } func init() { diff --git a/pkg/tests/judge/judge.go b/pkg/tests/judge/judge.go new file mode 100644 index 00000000..eae12c2e --- /dev/null +++ b/pkg/tests/judge/judge.go @@ -0,0 +1,128 @@ +package judge + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/getkin/kin-openapi/openapi3gen" + openai "github.com/gptscript-ai/chat-completion-client" +) + +const instructions = `"actual" is considered equivalent to "expected" if and only if the following rules are satisfied: + +%s + +When given JSON objects that conform to the following JSONSchema: + +%s + +Determine if "actual" is considered equivalent to "expected". + +After making a determination, respond with a JSON object that conforms to the following JSONSchema: + +{ + "name": "ruling", + "type": "object", + "properties": { + "equal": { + "type": "boolean", + "description": "Set to true if and only if actual is considered equal to expected." + }, + "reasoning": { + "type": "string", + "description": "The reasoning used to come to the determination" + } + }, + "required": [ + "equal", + "reasoning" + ] +} + +Your responses are concise and include only the json object described above. +` + +type Judge[T any] struct { + client *openai.Client + comparisonSchema string +} + +type comparison[T any] struct { + Expected T `json:"expected"` + Actual T `json:"actual"` +} + +type ruling struct { + Equal bool `json:"equal"` + Reasoning string `json:"reasoning"` +} + +func New[T any](client *openai.Client) (*Judge[T], error) { + schema, err := openapi3gen.NewSchemaRefForValue( + new(comparison[T]), + nil, + openapi3gen.CreateComponentSchemas( + openapi3gen.ExportComponentSchemasOptions{ + ExportComponentSchemas: true, + ExportGenerics: false, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed to generate JSONSchema for %T: %w", new(T), err) + } + + marshaled, err := json.MarshalIndent(schema, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal JSONSchema for %T: %w", new(T), err) + } + + return &Judge[T]{ + client: client, + comparisonSchema: string(marshaled), + }, nil +} + +func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria string) (equal bool, reasoning string, err error) { + comparisonJSON, err := json.MarshalIndent(&comparison[T]{ + Expected: expected, + Actual: actual, + }, "", " ") + if err != nil { + return false, "", fmt.Errorf("failed to marshal judge testcase JSON: %w", err) + } + + request := openai.ChatCompletionRequest{ + Model: openai.GPT4o, + Temperature: new(float32), + N: 1, + ResponseFormat: &openai.ChatCompletionResponseFormat{ + Type: openai.ChatCompletionResponseFormatTypeJSONObject, + }, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: fmt.Sprintf(instructions, criteria, j.comparisonSchema), + }, + { + Role: openai.ChatMessageRoleUser, + Content: string(comparisonJSON), + }, + }, + } + response, err := j.client.CreateChatCompletion(ctx, request) + if err != nil { + return false, "", fmt.Errorf("failed to create chat completion request: %w", err) + } + + if len(response.Choices) < 1 { + return false, "", fmt.Errorf("chat completion request returned no choices") + } + + var equality ruling + if err := json.Unmarshal([]byte(response.Choices[0].Message.Content), &equality); err != nil { + return false, "", fmt.Errorf("failed to unmarshal judge ruling: %w", err) + } + + return equality.Equal, equality.Reasoning, nil +} diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 9470662c..db185d75 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -7,6 +7,7 @@ import ( "runtime" "testing" + "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/tests/tester" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" @@ -319,7 +320,15 @@ func TestSubChat(t *testing.T) { "function": { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -435,7 +444,15 @@ func TestSubChat(t *testing.T) { "function": { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -806,3 +823,104 @@ func TestAgents(t *testing.T) { autogold.Expect("TEST RESULT CALL: 4").Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) } + +func TestInput(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + + prg, err := r.Load("") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "You're stupid") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 1").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "You're ugly") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) +} + +func TestOutput(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + r.RespondWith(tester.Result{ + Text: "Response 1", + }) + + prg, err := r.Load("") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect(`CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix +`).Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + r.RespondWith(tester.Result{ + Text: "Response 2", + }) + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 2") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect(`CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix +`).Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) + + r.RespondWith(tester.Result{ + Err: &engine.ErrChatFinish{ + Message: "Chat Done", + }, + }) + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 3") + require.NoError(t, err) + r.AssertResponded(t) + assert.True(t, resp.Done) + autogold.Expect(`CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false FINISH: true suffix +`).Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step3")) +} + +func TestSysContext(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + + prg, err := r.Load("") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 1").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + data, err := os.ReadFile("testdata/TestSysContext/context.json") + require.NoError(t, err) + + context := struct { + Call engine.CallContext `json:"call"` + }{} + err = json.Unmarshal(data, &context) + require.NoError(t, err) + + require.Len(t, context.Call.AgentGroup, 1) + assert.Equal(t, context.Call.AgentGroup[0].Named, "iAmSuperman") +} diff --git a/pkg/tests/smoke/smoke_test.go b/pkg/tests/smoke/smoke_test.go new file mode 100644 index 00000000..66374ef1 --- /dev/null +++ b/pkg/tests/smoke/smoke_test.go @@ -0,0 +1,184 @@ +//go:build smoke + +package smoke + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + openai "github.com/gptscript-ai/chat-completion-client" + "github.com/gptscript-ai/gptscript/pkg/runner" + "github.com/gptscript-ai/gptscript/pkg/tests/judge" + "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/samber/lo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gotest.tools/v3/icmd" +) + +const defaultModelEnvVar = "GPTSCRIPT_DEFAULT_MODEL" + +func TestSmoke(t *testing.T) { + client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) + smokeJudge, err := judge.New[[]event](client) + require.NoError(t, err, "error initializing smoke test judge") + + for _, tc := range getTestcases(t) { + t.Run(tc.name, func(t *testing.T) { + cmd := icmd.Command( + "gptscript", + "--color=false", + "--disable-cache", + "--events-stream-to", + tc.actualEventsFile, + "--default-model", + tc.defaultModel, + tc.gptFile, + ) + + result := icmd.RunCmd(cmd) + defer func() { + t.Helper() + assert.NoError(t, os.Remove(tc.actualEventsFile)) + }() + + require.NoError(t, result.Error, "stderr: %q", result.Stderr()) + require.Zero(t, result.ExitCode) + + var ( + actualEvents = getActualEvents(t, tc.actualEventsFile) + expectedEvents = make([]event, 0) + ) + f, err := os.Open(tc.expectedEventsFile) + if os.IsNotExist(err) { + // No expected events found, store the results of the latest call as the golden file for future tests runs + f, err := os.Create(tc.expectedEventsFile) + require.NoError(t, err) + defer f.Close() + + encoder := json.NewEncoder(f) + encoder.SetIndent("", " ") + require.NoError(t, encoder.Encode(actualEvents)) + t.Skipf("Generated initial golden file %q, skipping test", tc.expectedEventsFile) + } else { + require.NoError(t, err) + defer f.Close() + + decoder := json.NewDecoder(f) + require.NoError(t, decoder.Decode(&expectedEvents)) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + equal, reasoning, err := smokeJudge.Equal( + ctx, + expectedEvents, + actualEvents, + ` +- disregard differences in timestamps, generated IDs, natural language verbiage, and event order +- omit callProgress events from the comparision +- the overall stream of events and set of tools called should roughly match +- arguments passed in tool calls should be roughly the same +- the final callFinish event should be semantically similar +`, + ) + require.NoError(t, err, "error getting judge ruling on output") + require.True(t, equal, reasoning) + t.Logf("reasoning: %q", reasoning) + }) + } +} + +type testcase struct { + name string + dir string + gptFile string + defaultModel string + modelName string + env []string + actualEventsFile string + expectedEventsFile string +} + +func getTestcases(t *testing.T) []testcase { + t.Helper() + + defaultModel := os.Getenv(defaultModelEnvVar) + modelName := strings.Split(defaultModel, " ")[0] + + var testcases []testcase + for _, d := range lo.Must(os.ReadDir("testdata")) { + if !d.IsDir() { + continue + } + var ( + dirName = d.Name() + dir = filepath.Join("testdata", dirName) + ) + + files, err := os.ReadDir(dir) + require.NoError(t, err, "failed to get testdata dir %q", dir) + + for _, f := range files { + if f.IsDir() || filepath.Ext(f.Name()) != ".gpt" { + continue + } + + testcases = append(testcases, testcase{ + name: dirName, + dir: dir, + gptFile: filepath.Join(dir, f.Name()), + defaultModel: defaultModel, + modelName: modelName, + expectedEventsFile: filepath.Join(dir, fmt.Sprintf("%s-expected.json", modelName)), + actualEventsFile: filepath.Join(dir, fmt.Sprintf("%s.json", modelName)), + }) + + // Only take the first .gpt file in each testcase directory + break + } + } + + return testcases +} + +type event struct { + runner.Event + ChatRequest *openai.ChatCompletionRequest `json:"chatRequest,omitempty"` + ChatResponse *types.CompletionMessage `json:"chatResponse,omitempty"` +} + +func getActualEvents(t *testing.T, eventsFile string) []event { + t.Helper() + + f, err := os.Open(eventsFile) + require.NoError(t, err) + defer f.Close() + + var ( + events []event + scanner = bufio.NewScanner(f) + ) + for scanner.Scan() { + line := scanner.Text() + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + + var e event + require.NoError(t, json.Unmarshal([]byte(line), &e)) + events = append(events, e) + } + + require.NoError(t, scanner.Err()) + + return events +} diff --git a/pkg/tests/smoke/testdata/.gitignore b/pkg/tests/smoke/testdata/.gitignore new file mode 100644 index 00000000..292de8e7 --- /dev/null +++ b/pkg/tests/smoke/testdata/.gitignore @@ -0,0 +1,3 @@ +# Ignore intermediate event stream JSON files +*-events.json + diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json new file mode 100644 index 00000000..92fc786a --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json @@ -0,0 +1,909 @@ +[ + { + "time": "2024-06-20T17:10:23.193337-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:23.19359-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:24.059514-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:24.059807-04:00", + "callContext": { + "id": "1718917825", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:25.074481-04:00", + "callContext": { + "id": "1718917825", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:11060" + }, + { + "time": "2024-06-20T17:10:25.074606-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-06-20T17:10:25.074685-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917826", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:25.075088-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917826", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:33.389627-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917826", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:33.389848-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917826", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:33.389967-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01EEcv7qDLHDmGAzm15vobxM": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T17:10:33.389997-04:00", + "callContext": { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917824" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:33.584228-04:00", + "callContext": { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917824" + }, + "type": "callChat", + "chatCompletionId": "1718917827", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\": \"how are you doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-20T17:10:33.584507-04:00", + "callContext": { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917824" + }, + "type": "callProgress", + "chatCompletionId": "1718917827", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:35.540664-04:00", + "callContext": { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917824" + }, + "type": "callProgress", + "chatCompletionId": "1718917827", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:10:35.540967-04:00", + "callContext": { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917824" + }, + "type": "callChat", + "chatCompletionId": "1718917827", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:35.541005-04:00", + "callContext": { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917824" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:10:35.541033-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T17:10:35.71784-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917828", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "toolu_01EEcv7qDLHDmGAzm15vobxM" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:35.718216-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917828", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:39.50448-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917828", + "usage": {}, + "content": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:39.504769-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917828", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:39.504807-04:00", + "callContext": { + "id": "1718917824", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:39.504821-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json new file mode 100644 index 00000000..01745e39 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json @@ -0,0 +1,2348 @@ +[ + { + "time": "2024-06-20T17:08:06.902669-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:08:06.902927-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:08:07.292073-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917688", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:08:07.292172-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:08:28.052253-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T17:08:28.05243-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T17:08:28.1369-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T17:08:28.137013-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T17:08:28.244585-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" + }, + { + "time": "2024-06-20T17:08:28.244731-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" + }, + { + "time": "2024-06-20T17:08:28.32623-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" + }, + { + "time": "2024-06-20T17:08:28.326358-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:28.326393-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:28.32645-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:28.326527-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917688", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:28.327843-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917688", + "usage": { + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 + } + } + }, + { + "time": "2024-06-20T17:08:28.328046-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_vsmL6EoDecm0oVmUnHIvNkaL": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T17:08:28.328123-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:28.53993-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callChat", + "chatCompletionId": "1718917689", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\":\"how are you doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-20T17:08:28.540154-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:08:29.188341-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T17:08:29.188493-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T17:08:29.244545-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T17:08:29.244765-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T17:08:29.643951-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are" + }, + { + "time": "2024-06-20T17:08:29.644128-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are" + }, + { + "time": "2024-06-20T17:08:29.676951-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing," + }, + { + "time": "2024-06-20T17:08:29.677047-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing," + }, + { + "time": "2024-06-20T17:08:29.677123-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I" + }, + { + "time": "2024-06-20T17:08:29.677156-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm" + }, + { + "time": "2024-06-20T17:08:29.677184-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing" + }, + { + "time": "2024-06-20T17:08:29.677251-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great" + }, + { + "time": "2024-06-20T17:08:29.677288-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great" + }, + { + "time": "2024-06-20T17:08:29.727848-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T17:08:29.727983-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T17:08:29.782554-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:29.782738-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:29.782806-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:29.782839-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:29.782868-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callProgress", + "chatCompletionId": "1718917689", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:29.782988-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callChat", + "chatCompletionId": "1718917689", + "usage": { + "promptTokens": 124, + "completionTokens": 17, + "totalTokens": 141 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 124, + "completionTokens": 17, + "totalTokens": 141 + } + } + }, + { + "time": "2024-06-20T17:08:29.783047-04:00", + "callContext": { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917687" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:29.783089-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T17:08:29.966093-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917690", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "call_vsmL6EoDecm0oVmUnHIvNkaL" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:08:29.966449-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:08:30.668641-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm" + }, + { + "time": "2024-06-20T17:08:30.668802-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm" + }, + { + "time": "2024-06-20T17:08:30.668957-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm" + }, + { + "time": "2024-06-20T17:08:30.669089-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing" + }, + { + "time": "2024-06-20T17:08:30.669299-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow" + }, + { + "time": "2024-06-20T17:08:30.669392-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow" + }, + { + "time": "2024-06-20T17:08:30.716062-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:08:30.7162-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:08:30.743098-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:30.743401-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:30.74648-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:30.746568-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917690", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:30.746778-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917690", + "usage": { + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 + } + } + }, + { + "time": "2024-06-20T17:08:30.746856-04:00", + "callContext": { + "id": "1718917687", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:30.746896-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json new file mode 100644 index 00000000..4d22287d --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json @@ -0,0 +1,2643 @@ +[ + { + "time": "2024-06-20T16:58:11.3174-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T16:58:11.317644-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T16:58:11.638778-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917093", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T16:58:11.639016-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T16:58:12.564724-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T16:58:12.564911-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question" + }, + { + "time": "2024-06-20T16:58:12.564948-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question" + }, + { + "time": "2024-06-20T16:58:12.56497-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T16:58:12.564995-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how" + }, + { + "time": "2024-06-20T16:58:12.565045-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you" + }, + { + "time": "2024-06-20T16:58:12.565071-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you" + }, + { + "time": "2024-06-20T16:58:12.565112-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:12.565137-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:12.56516-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:12.565176-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917093", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:12.565397-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917093", + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + } + } + }, + { + "time": "2024-06-20T16:58:12.565644-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_rTx93wiIASDA8uk8XHwjVmCC": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T16:58:12.565728-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:12.72779-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callChat", + "chatCompletionId": "1718917094", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\":\"how are you doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-20T16:58:12.728069-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T16:58:13.077264-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {} + }, + { + "time": "2024-06-20T16:58:13.077534-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T16:58:13.134723-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T16:58:13.134893-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T16:58:13.230591-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how" + }, + { + "time": "2024-06-20T16:58:13.230667-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how" + }, + { + "time": "2024-06-20T16:58:13.246344-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you" + }, + { + "time": "2024-06-20T16:58:13.246468-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing" + }, + { + "time": "2024-06-20T16:58:13.246531-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing" + }, + { + "time": "2024-06-20T16:58:13.246592-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm" + }, + { + "time": "2024-06-20T16:58:13.246645-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing" + }, + { + "time": "2024-06-20T16:58:13.246736-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great" + }, + { + "time": "2024-06-20T16:58:13.246796-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great" + }, + { + "time": "2024-06-20T16:58:13.30169-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T16:58:13.301837-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T16:58:13.31565-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T16:58:13.315798-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:13.315842-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:13.315879-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:13.315951-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callProgress", + "chatCompletionId": "1718917094", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:13.316055-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callChat", + "chatCompletionId": "1718917094", + "usage": { + "promptTokens": 122, + "completionTokens": 17, + "totalTokens": 139 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 122, + "completionTokens": 17, + "totalTokens": 139 + } + } + }, + { + "time": "2024-06-20T16:58:13.316115-04:00", + "callContext": { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917092" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:13.316171-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T16:58:13.533625-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917095", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_rTx93wiIASDA8uk8XHwjVmCC", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "call_rTx93wiIASDA8uk8XHwjVmCC" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T16:58:13.53384-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T16:58:13.856349-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T16:58:13.856437-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T16:58:13.874317-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T16:58:13.874428-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T16:58:14.060243-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are" + }, + { + "time": "2024-06-20T16:58:14.060366-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are" + }, + { + "time": "2024-06-20T16:58:14.060418-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are" + }, + { + "time": "2024-06-20T16:58:14.060435-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you" + }, + { + "time": "2024-06-20T16:58:14.060456-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing" + }, + { + "time": "2024-06-20T16:58:14.060521-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing" + }, + { + "time": "2024-06-20T16:58:14.060555-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing" + }, + { + "time": "2024-06-20T16:58:14.060577-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing" + }, + { + "time": "2024-06-20T16:58:14.06061-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" + }, + { + "time": "2024-06-20T16:58:14.060626-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" + }, + { + "time": "2024-06-20T16:58:14.060686-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T16:58:14.06071-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T16:58:14.060727-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T16:58:14.060743-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:14.060788-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:14.060806-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917095", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:14.061001-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917095", + "usage": { + "promptTokens": 179, + "completionTokens": 18, + "totalTokens": 197 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 179, + "completionTokens": 18, + "totalTokens": 197 + } + } + }, + { + "time": "2024-06-20T16:58:14.061031-04:00", + "callContext": { + "id": "1718917092", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:14.061047-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json new file mode 100644 index 00000000..f7341a7b --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json @@ -0,0 +1,2534 @@ +[ + { + "time": "2024-06-20T17:10:39.294578-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:39.294835-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:39.501107-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917841", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:39.501246-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917841", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:40.154068-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917841", + "usage": {} + }, + { + "time": "2024-06-20T17:10:40.786117-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917841", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:40.786643-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917841", + "usage": { + "promptTokens": 85, + "completionTokens": 23, + "totalTokens": 108 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "WWpUyFBHH", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 85, + "completionTokens": 23, + "totalTokens": 108 + } + } + }, + { + "time": "2024-06-20T17:10:40.78704-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolSubCalls": { + "WWpUyFBHH": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T17:10:40.787133-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:40.94973-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callChat", + "chatCompletionId": "1718917842", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\": \"how are you doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-06-20T17:10:40.950046-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:41.218814-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T17:10:41.218971-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T17:10:41.242129-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for" + }, + { + "time": "2024-06-20T17:10:41.278815-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T17:10:41.298557-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"" + }, + { + "time": "2024-06-20T17:10:41.329684-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how" + }, + { + "time": "2024-06-20T17:10:41.36213-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are" + }, + { + "time": "2024-06-20T17:10:41.404159-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you" + }, + { + "time": "2024-06-20T17:10:41.443596-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing" + }, + { + "time": "2024-06-20T17:10:41.46035-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\"," + }, + { + "time": "2024-06-20T17:10:41.479186-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I" + }, + { + "time": "2024-06-20T17:10:41.508921-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'" + }, + { + "time": "2024-06-20T17:10:41.538159-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm" + }, + { + "time": "2024-06-20T17:10:41.578073-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing" + }, + { + "time": "2024-06-20T17:10:41.59766-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great" + }, + { + "time": "2024-06-20T17:10:41.711868-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" + }, + { + "time": "2024-06-20T17:10:41.712039-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:10:41.712067-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:10:41.718662-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T17:10:41.748288-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:10:41.78608-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:10:41.786165-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callChat", + "chatCompletionId": "1718917842", + "usage": { + "promptTokens": 41, + "completionTokens": 19, + "totalTokens": 60 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 41, + "completionTokens": 19, + "totalTokens": 60 + } + } + }, + { + "time": "2024-06-20T17:10:41.7862-04:00", + "callContext": { + "id": "WWpUyFBHH", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:10:41.78624-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T17:10:41.957577-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917843", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "WWpUyFBHH", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "WWpUyFBHH" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:41.957749-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:42.455931-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-20T17:10:42.45599-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied" + }, + { + "time": "2024-06-20T17:10:42.456033-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied" + }, + { + "time": "2024-06-20T17:10:42.475053-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied," + }, + { + "time": "2024-06-20T17:10:42.534667-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"" + }, + { + "time": "2024-06-20T17:10:42.594649-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks" + }, + { + "time": "2024-06-20T17:10:42.653279-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for" + }, + { + "time": "2024-06-20T17:10:42.760299-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking" + }, + { + "time": "2024-06-20T17:10:42.774637-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking '" + }, + { + "time": "2024-06-20T17:10:42.835456-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how" + }, + { + "time": "2024-06-20T17:10:42.889942-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are" + }, + { + "time": "2024-06-20T17:10:42.951997-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you" + }, + { + "time": "2024-06-20T17:10:43.009024-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing" + }, + { + "time": "2024-06-20T17:10:43.072963-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing'," + }, + { + "time": "2024-06-20T17:10:43.129687-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I" + }, + { + "time": "2024-06-20T17:10:43.168387-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'" + }, + { + "time": "2024-06-20T17:10:43.286476-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm" + }, + { + "time": "2024-06-20T17:10:43.286589-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great" + }, + { + "time": "2024-06-20T17:10:43.286632-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great" + }, + { + "time": "2024-06-20T17:10:43.323232-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow" + }, + { + "time": "2024-06-20T17:10:43.3614-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T17:10:43.402971-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:10:43.442811-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T17:10:43.477932-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:43.524916-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:43.525171-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917843", + "usage": { + "promptTokens": 143, + "completionTokens": 23, + "totalTokens": 166 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 143, + "completionTokens": 23, + "totalTokens": 166 + } + } + }, + { + "time": "2024-06-20T17:10:43.525284-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:43.525327-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/test.gpt b/pkg/tests/smoke/testdata/Bob/test.gpt new file mode 100644 index 00000000..fe8ffb62 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/test.gpt @@ -0,0 +1,10 @@ +tools: bob + +Ask Bob "how are you doing" and repeat his reply exactly. + +--- +name: bob +description: I'm Bob, a friendly guy. +args: question: The question to ask Bob. + +When asked how I am doing, respond with exactly "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json new file mode 100644 index 00000000..7b0477fc --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json @@ -0,0 +1,859 @@ +[ + { + "time": "2024-06-20T17:10:39.522006-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:39.522174-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:39.975716-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:39.975965-04:00", + "callContext": { + "id": "1718917841", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:40.990696-04:00", + "callContext": { + "id": "1718917841", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/claude3-anthropic-provider/credential" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + { + "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + }, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:11124" + }, + { + "time": "2024-06-20T17:10:40.990787-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-06-20T17:10:40.990853-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917842", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:40.991247-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:49.116225-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917842", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:49.116466-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917842", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:49.116557-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01BzEjSj1HhTi52c1N9mS1jK": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T17:10:49.116583-04:00", + "callContext": { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:49.116924-04:00", + "callContext": { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917843", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-20T17:10:49.119266-04:00", + "callContext": { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917843", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:10:49.119489-04:00", + "callContext": { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917843", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:49.119539-04:00", + "callContext": { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917840", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:10:49.119572-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T17:10:49.298305-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917844", + "usage": {}, + "chatRequest": { + "model": "claude-3-opus-20240229", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "toolu_01BzEjSj1HhTi52c1N9mS1jK" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:49.298759-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917844", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:51.580939-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917844", + "usage": {}, + "content": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:51.581258-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917844", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:51.581281-04:00", + "callContext": { + "id": "1718917840", + "tool": { + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:51.581291-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json new file mode 100644 index 00000000..5b2409f6 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json @@ -0,0 +1,1549 @@ +[ + { + "time": "2024-06-20T17:08:30.778302-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:08:30.778582-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:08:30.981266-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917712", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:08:30.981391-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:08:32.232987-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T17:08:32.233265-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T17:08:32.344744-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T17:08:32.344882-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T17:08:32.361676-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" + }, + { + "time": "2024-06-20T17:08:32.361793-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" + }, + { + "time": "2024-06-20T17:08:32.440498-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" + }, + { + "time": "2024-06-20T17:08:32.440743-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:32.440798-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:32.440836-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:32.440873-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917712", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:32.441115-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917712", + "usage": { + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 142, + "completionTokens": 17, + "totalTokens": 159 + } + } + }, + { + "time": "2024-06-20T17:08:32.441462-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_slFgd2P2lMxXQoyrPbm2YsrQ": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T17:08:32.441542-04:00", + "callContext": { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917711", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T17:08:32.442736-04:00", + "callContext": { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917711", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917713", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-20T17:08:32.448288-04:00", + "callContext": { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917711", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917713", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:08:32.448728-04:00", + "callContext": { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917711", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917713", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-20T17:08:32.448906-04:00", + "callContext": { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917711", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:08:32.448977-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T17:08:32.624086-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917714", + "usage": {}, + "chatRequest": { + "model": "gpt-4-turbo-2024-04-09", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "call_slFgd2P2lMxXQoyrPbm2YsrQ" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:08:32.624367-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:08:33.020025-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I" + }, + { + "time": "2024-06-20T17:08:33.020187-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I" + }, + { + "time": "2024-06-20T17:08:33.09047-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing" + }, + { + "time": "2024-06-20T17:08:33.090722-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing" + }, + { + "time": "2024-06-20T17:08:33.150983-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow" + }, + { + "time": "2024-06-20T17:08:33.151128-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow" + }, + { + "time": "2024-06-20T17:08:33.26424-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:08:33.264352-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T17:08:33.264393-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T17:08:33.264427-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:33.264492-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:33.264575-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917714", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:33.264897-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917714", + "usage": { + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 183, + "completionTokens": 10, + "totalTokens": 193 + } + } + }, + { + "time": "2024-06-20T17:08:33.264985-04:00", + "callContext": { + "id": "1718917711", + "tool": { + "modelName": "gpt-4-turbo-2024-04-09", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T17:08:33.265021-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json new file mode 100644 index 00000000..f61edd7d --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json @@ -0,0 +1,1808 @@ +[ + { + "time": "2024-06-20T16:58:14.093283-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T16:58:14.093568-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T16:58:14.31069-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917096", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T16:58:14.311071-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T16:58:14.807492-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T16:58:14.807779-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"" + }, + { + "time": "2024-06-20T16:58:14.832551-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T16:58:14.832684-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" + }, + { + "time": "2024-06-20T16:58:14.865368-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" + }, + { + "time": "2024-06-20T16:58:14.865484-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" + }, + { + "time": "2024-06-20T16:58:14.899511-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" + }, + { + "time": "2024-06-20T16:58:14.899668-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" + }, + { + "time": "2024-06-20T16:58:14.900883-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:14.900938-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:14.900969-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917096", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:14.901222-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917096", + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + } + } + }, + { + "time": "2024-06-20T16:58:14.901521-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "call_PGLxooO6eBPt3eSEBCMkuWYN": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T16:58:14.901599-04:00", + "callContext": { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917095", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-06-20T16:58:14.90268-04:00", + "callContext": { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917095", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917097", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-20T16:58:14.908538-04:00", + "callContext": { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917095", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917097", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T16:58:14.908991-04:00", + "callContext": { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917095", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917097", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-20T16:58:14.909126-04:00", + "callContext": { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917095", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T16:58:14.909293-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T16:58:15.10962-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917098", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-05-13", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "call_PGLxooO6eBPt3eSEBCMkuWYN" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T16:58:15.110087-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T16:58:15.629792-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T16:58:15.629968-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks" + }, + { + "time": "2024-06-20T16:58:15.749206-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for" + }, + { + "time": "2024-06-20T16:58:15.749324-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking" + }, + { + "time": "2024-06-20T16:58:15.780062-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are" + }, + { + "time": "2024-06-20T16:58:15.780171-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you" + }, + { + "time": "2024-06-20T16:58:15.780293-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing" + }, + { + "time": "2024-06-20T16:58:15.780335-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing" + }, + { + "time": "2024-06-20T16:58:15.780406-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm" + }, + { + "time": "2024-06-20T16:58:15.780445-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm" + }, + { + "time": "2024-06-20T16:58:15.780477-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing" + }, + { + "time": "2024-06-20T16:58:15.780511-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great" + }, + { + "time": "2024-06-20T16:58:15.816742-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T16:58:15.816889-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T16:58:15.859699-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T16:58:15.859764-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:15.859784-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:15.859841-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:15.85986-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917098", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:15.860819-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917098", + "usage": { + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 + } + } + }, + { + "time": "2024-06-20T16:58:15.860872-04:00", + "callContext": { + "id": "1718917095", + "tool": { + "modelName": "gpt-4o-2024-05-13", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-06-20T16:58:15.860919-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json new file mode 100644 index 00000000..b3e54dc4 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json @@ -0,0 +1,1659 @@ +[ + { + "time": "2024-06-20T17:10:43.55694-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:43.557263-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-06-20T17:10:43.805494-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917845", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:43.805682-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917845", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:44.078279-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917845", + "usage": {} + }, + { + "time": "2024-06-20T17:10:44.913816-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917845", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:44.914845-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917845", + "usage": { + "promptTokens": 85, + "completionTokens": 23, + "totalTokens": 108 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "UPthtEfla", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 85, + "completionTokens": 23, + "totalTokens": 108 + } + } + }, + { + "time": "2024-06-20T17:10:44.915086-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolSubCalls": { + "UPthtEfla": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-06-20T17:10:44.915147-04:00", + "callContext": { + "id": "UPthtEfla", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917844", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-06-20T17:10:44.916248-04:00", + "callContext": { + "id": "UPthtEfla", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917844", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917846", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-06-20T17:10:44.92245-04:00", + "callContext": { + "id": "UPthtEfla", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917844", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1718917846", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:10:44.922905-04:00", + "callContext": { + "id": "UPthtEfla", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917844", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1718917846", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-06-20T17:10:44.922998-04:00", + "callContext": { + "id": "UPthtEfla", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null, + "toolName": "bob", + "parentID": "1718917844", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-06-20T17:10:44.92306-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-06-20T17:10:45.091313-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917847", + "usage": {}, + "chatRequest": { + "model": "mistral-large-2402", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "UPthtEfla", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "UPthtEfla" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-06-20T17:10:45.091762-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-06-20T17:10:45.427766-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob" + }, + { + "time": "2024-06-20T17:10:45.427886-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied" + }, + { + "time": "2024-06-20T17:10:45.427938-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied," + }, + { + "time": "2024-06-20T17:10:45.427998-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"" + }, + { + "time": "2024-06-20T17:10:45.428017-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"" + }, + { + "time": "2024-06-20T17:10:45.428046-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks" + }, + { + "time": "2024-06-20T17:10:45.434307-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for" + }, + { + "time": "2024-06-20T17:10:45.462196-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking" + }, + { + "time": "2024-06-20T17:10:45.496748-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how" + }, + { + "time": "2024-06-20T17:10:45.516868-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are" + }, + { + "time": "2024-06-20T17:10:45.545808-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you" + }, + { + "time": "2024-06-20T17:10:45.572091-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing" + }, + { + "time": "2024-06-20T17:10:45.602668-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing," + }, + { + "time": "2024-06-20T17:10:45.627618-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I" + }, + { + "time": "2024-06-20T17:10:45.659445-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'" + }, + { + "time": "2024-06-20T17:10:45.682109-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm" + }, + { + "time": "2024-06-20T17:10:45.710817-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing" + }, + { + "time": "2024-06-20T17:10:45.743363-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great" + }, + { + "time": "2024-06-20T17:10:45.766321-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow" + }, + { + "time": "2024-06-20T17:10:45.801585-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly" + }, + { + "time": "2024-06-20T17:10:46.066262-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI" + }, + { + "time": "2024-06-20T17:10:46.093598-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" + }, + { + "time": "2024-06-20T17:10:46.118737-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:46.146712-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1718917847", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:46.146789-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1718917847", + "usage": { + "promptTokens": 144, + "completionTokens": 22, + "totalTokens": 166 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + } + ], + "usage": { + "promptTokens": 144, + "completionTokens": 22, + "totalTokens": 166 + } + } + }, + { + "time": "2024-06-20T17:10:46.146827-04:00", + "callContext": { + "id": "1718917844", + "tool": { + "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + }, + { + "time": "2024-06-20T17:10:46.146844-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/test.gpt b/pkg/tests/smoke/testdata/BobAsShell/test.gpt new file mode 100644 index 00000000..f04920bf --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/test.gpt @@ -0,0 +1,13 @@ + +tools: bob + +Ask Bob "how are you doing" and repeat his reply exactly. + +--- +name: bob +description: I'm Bob, a friendly guy. +args: question: The question to ask Bob. + +#!/bin/bash + +echo "Thanks for asking ${question}, I'm doing great fellow friendly AI tool!" diff --git a/pkg/tests/testdata/TestAgents/call1.golden b/pkg/tests/testdata/TestAgents/call1.golden index a4c9d565..c0465ac7 100644 --- a/pkg/tests/testdata/TestAgents/call1.golden +++ b/pkg/tests/testdata/TestAgents/call1.golden @@ -6,14 +6,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/call2.golden b/pkg/tests/testdata/TestAgents/call2.golden index b7f53512..a4b53537 100644 --- a/pkg/tests/testdata/TestAgents/call2.golden +++ b/pkg/tests/testdata/TestAgents/call2.golden @@ -6,7 +6,15 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/call3.golden b/pkg/tests/testdata/TestAgents/call3.golden index 5b6b3b87..4a001215 100644 --- a/pkg/tests/testdata/TestAgents/call3.golden +++ b/pkg/tests/testdata/TestAgents/call3.golden @@ -6,14 +6,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent3", "name": "agent3", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAgents/step1.golden b/pkg/tests/testdata/TestAgents/step1.golden index 59f11e22..423069b5 100644 --- a/pkg/tests/testdata/TestAgents/step1.golden +++ b/pkg/tests/testdata/TestAgents/step1.golden @@ -14,14 +14,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -92,7 +108,15 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], @@ -154,14 +178,30 @@ "function": { "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestAgents/test.gpt:agent3", "name": "agent3", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestAsterick/call1.golden b/pkg/tests/testdata/TestAsterick/call1.golden index c530c3ba..d6d56df9 100644 --- a/pkg/tests/testdata/TestAsterick/call1.golden +++ b/pkg/tests/testdata/TestAsterick/call1.golden @@ -7,14 +7,11 @@ "name": "afoo", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "a", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestContextSubChat/call1.golden b/pkg/tests/testdata/TestContextSubChat/call1.golden index f976d1c8..09da7a95 100644 --- a/pkg/tests/testdata/TestContextSubChat/call1.golden +++ b/pkg/tests/testdata/TestContextSubChat/call1.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/call4.golden b/pkg/tests/testdata/TestContextSubChat/call4.golden index 4fb1d2bb..abe24599 100644 --- a/pkg/tests/testdata/TestContextSubChat/call4.golden +++ b/pkg/tests/testdata/TestContextSubChat/call4.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/call6.golden b/pkg/tests/testdata/TestContextSubChat/call6.golden index f976d1c8..09da7a95 100644 --- a/pkg/tests/testdata/TestContextSubChat/call6.golden +++ b/pkg/tests/testdata/TestContextSubChat/call6.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/call9.golden b/pkg/tests/testdata/TestContextSubChat/call9.golden index 0cb1f56a..37f67e56 100644 --- a/pkg/tests/testdata/TestContextSubChat/call9.golden +++ b/pkg/tests/testdata/TestContextSubChat/call9.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/step1.golden b/pkg/tests/testdata/TestContextSubChat/step1.golden index 1a675b28..464efb36 100644 --- a/pkg/tests/testdata/TestContextSubChat/step1.golden +++ b/pkg/tests/testdata/TestContextSubChat/step1.golden @@ -13,7 +13,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestContextSubChat/step3.golden b/pkg/tests/testdata/TestContextSubChat/step3.golden index d57035d5..3c365c54 100644 --- a/pkg/tests/testdata/TestContextSubChat/step3.golden +++ b/pkg/tests/testdata/TestContextSubChat/step3.golden @@ -54,7 +54,15 @@ "function": { "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/call1.golden b/pkg/tests/testdata/TestDualSubChat/call1.golden index a0c21ccc..267690fe 100644 --- a/pkg/tests/testdata/TestDualSubChat/call1.golden +++ b/pkg/tests/testdata/TestDualSubChat/call1.golden @@ -5,14 +5,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/call7.golden b/pkg/tests/testdata/TestDualSubChat/call7.golden index 60842c68..5f1d227a 100644 --- a/pkg/tests/testdata/TestDualSubChat/call7.golden +++ b/pkg/tests/testdata/TestDualSubChat/call7.golden @@ -5,14 +5,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/step1.golden b/pkg/tests/testdata/TestDualSubChat/step1.golden index 421bb069..46932939 100644 --- a/pkg/tests/testdata/TestDualSubChat/step1.golden +++ b/pkg/tests/testdata/TestDualSubChat/step1.golden @@ -13,14 +13,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/step2.golden b/pkg/tests/testdata/TestDualSubChat/step2.golden index 82f1a7cd..2e1511c7 100644 --- a/pkg/tests/testdata/TestDualSubChat/step2.golden +++ b/pkg/tests/testdata/TestDualSubChat/step2.golden @@ -13,14 +13,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestDualSubChat/step3.golden b/pkg/tests/testdata/TestDualSubChat/step3.golden index ab2dc7c3..d21249c3 100644 --- a/pkg/tests/testdata/TestDualSubChat/step3.golden +++ b/pkg/tests/testdata/TestDualSubChat/step3.golden @@ -13,14 +13,30 @@ "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } }, { "function": { "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestExport/call1.golden b/pkg/tests/testdata/TestExport/call1.golden index bc35465f..8b360d8a 100644 --- a/pkg/tests/testdata/TestExport/call1.golden +++ b/pkg/tests/testdata/TestExport/call1.golden @@ -7,14 +7,11 @@ "name": "frommain", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "parentLocal", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -43,14 +37,11 @@ "name": "transient", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestExport/call3.golden b/pkg/tests/testdata/TestExport/call3.golden index 3b76c8af..a653821a 100644 --- a/pkg/tests/testdata/TestExport/call3.golden +++ b/pkg/tests/testdata/TestExport/call3.golden @@ -7,14 +7,11 @@ "name": "frommain", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "parentLocal", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -43,14 +37,11 @@ "name": "transient", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestExportContext/call1.golden b/pkg/tests/testdata/TestExportContext/call1.golden index 5476a745..2e1d2421 100644 --- a/pkg/tests/testdata/TestExportContext/call1.golden +++ b/pkg/tests/testdata/TestExportContext/call1.golden @@ -7,14 +7,11 @@ "name": "subtool", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/tests/testdata/TestInput/call1-resp.golden b/pkg/tests/testdata/TestInput/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestInput/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestInput/call1.golden b/pkg/tests/testdata/TestInput/call1.golden new file mode 100644 index 00000000..8f9b629c --- /dev/null +++ b/pkg/tests/testdata/TestInput/call1.golden @@ -0,0 +1,24 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestInput/call2-resp.golden b/pkg/tests/testdata/TestInput/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestInput/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestInput/call2.golden b/pkg/tests/testdata/TestInput/call2.golden new file mode 100644 index 00000000..8da96da8 --- /dev/null +++ b/pkg/tests/testdata/TestInput/call2.golden @@ -0,0 +1,42 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're ugly!\n ha ha ha\n" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestInput/step1.golden b/pkg/tests/testdata/TestInput/step1.golden new file mode 100644 index 00000000..a617d41a --- /dev/null +++ b/pkg/tests/testdata/TestInput/step1.golden @@ -0,0 +1,47 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "testdata/TestInput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "No, You're stupid!\n ha ha ha\n", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ] + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "testdata/TestInput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestInput/step2.golden b/pkg/tests/testdata/TestInput/step2.golden new file mode 100644 index 00000000..e085edb2 --- /dev/null +++ b/pkg/tests/testdata/TestInput/step2.golden @@ -0,0 +1,65 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "testdata/TestInput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "No, You're stupid!\n ha ha ha\n", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're stupid!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "No, You're ugly!\n ha ha ha\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ] + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "testdata/TestInput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestInput/test.gpt b/pkg/tests/testdata/TestInput/test.gpt new file mode 100644 index 00000000..79522d90 --- /dev/null +++ b/pkg/tests/testdata/TestInput/test.gpt @@ -0,0 +1,25 @@ +input filter: taunt +context: exporter +chat: true + +Tool body + +--- +name: taunt +args: foo: this is useless +args: input: this is used +#!/bin/bash + +echo "No, ${INPUT}!" + +--- +name: exporter +share input filters: taunt2 + +--- +name: taunt2 +args: foo: this is useless +args: input: this is used + +#!/bin/bash +echo "${INPUT} ha ha ha" \ No newline at end of file diff --git a/pkg/tests/testdata/TestOutput/call1-resp.golden b/pkg/tests/testdata/TestOutput/call1-resp.golden new file mode 100644 index 00000000..a6c5b94a --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestOutput/call1.golden b/pkg/tests/testdata/TestOutput/call1.golden new file mode 100644 index 00000000..9430afee --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call1.golden @@ -0,0 +1,24 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestOutput/call2-resp.golden b/pkg/tests/testdata/TestOutput/call2-resp.golden new file mode 100644 index 00000000..e5170fb8 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "Response 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestOutput/call2.golden b/pkg/tests/testdata/TestOutput/call2.golden new file mode 100644 index 00000000..32bb7039 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call2.golden @@ -0,0 +1,42 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 2" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestOutput/call3.golden b/pkg/tests/testdata/TestOutput/call3.golden new file mode 100644 index 00000000..01aed5eb --- /dev/null +++ b/pkg/tests/testdata/TestOutput/call3.golden @@ -0,0 +1,60 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 2" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 2" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 3" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestOutput/step1.golden b/pkg/tests/testdata/TestOutput/step1.golden new file mode 100644 index 00000000..46f1b8e8 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/step1.golden @@ -0,0 +1,47 @@ +`{ + "done": false, + "content": "CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix\n", + "toolID": "testdata/TestOutput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "Input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + } + ] + } + }, + "result": "CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix\n" + }, + "continuationToolID": "testdata/TestOutput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestOutput/step2.golden b/pkg/tests/testdata/TestOutput/step2.golden new file mode 100644 index 00000000..d5fd89a0 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/step2.golden @@ -0,0 +1,65 @@ +`{ + "done": false, + "content": "CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix\n", + "toolID": "testdata/TestOutput/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "Input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 2" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "Response 2" + } + ], + "usage": {} + } + ] + } + }, + "result": "CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix\n" + }, + "continuationToolID": "testdata/TestOutput/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestOutput/step3.golden b/pkg/tests/testdata/TestOutput/step3.golden new file mode 100644 index 00000000..c4e63adc --- /dev/null +++ b/pkg/tests/testdata/TestOutput/step3.golden @@ -0,0 +1,6 @@ +`{ + "done": true, + "content": "CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false FINISH: true suffix\n", + "toolID": "", + "state": null +}` diff --git a/pkg/tests/testdata/TestOutput/test.gpt b/pkg/tests/testdata/TestOutput/test.gpt new file mode 100644 index 00000000..cc35faa0 --- /dev/null +++ b/pkg/tests/testdata/TestOutput/test.gpt @@ -0,0 +1,31 @@ +output filter: prefix +context: context +chat: true + +Tool body + +--- +name: context +share output filters: suffix + +--- +name: prefix +args: chat: is it chat +args: output: the output content +args: continuation: if this is a non-terminating response +args: chatFinish: chat finish message + +#!/bin/bash + +echo CHAT: ${CHAT} +echo CONTENT: ${OUTPUT} +echo CONTINUATION: ${CONTINUATION} +echo FINISH: ${CHATFINISH} + +--- +name: suffix +args: output: the output content + +#!/bin/bash + +echo ${OUTPUT} suffix \ No newline at end of file diff --git a/pkg/tests/testdata/TestSubChat/call1.golden b/pkg/tests/testdata/TestSubChat/call1.golden index dc161663..53104a4e 100644 --- a/pkg/tests/testdata/TestSubChat/call1.golden +++ b/pkg/tests/testdata/TestSubChat/call1.golden @@ -5,7 +5,15 @@ "function": { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", - "parameters": null + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } } } ], diff --git a/pkg/tests/testdata/TestSysContext/call1-resp.golden b/pkg/tests/testdata/TestSysContext/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestSysContext/call1.golden b/pkg/tests/testdata/TestSysContext/call1.golden new file mode 100644 index 00000000..c315d381 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/call1.golden @@ -0,0 +1,41 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestSysContext/file.gpt:I am Superman Agent", + "name": "iAmSuperman", + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestSysContext/call1.golden.bkp b/pkg/tests/testdata/TestSysContext/call1.golden.bkp new file mode 100644 index 00000000..7bfb5403 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/call1.golden.bkp @@ -0,0 +1,41 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestSysContext/test.gpt:foo", + "name": "foo", + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\",\"parentID\":\"1718924874\",\"displayText\":\"Running sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"foo\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}],\"foo\":[{\"reference\":\"foo\",\"toolID\":\"testdata/TestSysContext/test.gpt:foo\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\",\"foo\":\"testdata/TestSysContext/test.gpt:foo\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\",\"foo\":\"testdata/TestSysContext/test.gpt:foo\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":13},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:foo\":{\"name\":\"foo\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm an agent\",\"id\":\"testdata/TestSysContext/test.gpt:foo\",\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\",\"foo\":\"testdata/TestSysContext/test.gpt:foo\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestSysContext/context.json b/pkg/tests/testdata/TestSysContext/context.json new file mode 100644 index 00000000..c5608ec4 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/context.json @@ -0,0 +1 @@ +{"call":{"id":"","tool":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"agentGroup":[{"named":"iAmSuperman","reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"inputContext":null,"toolCategory":"context","toolName":"sys.context"},"program":{"name":"testdata/TestSysContext/test.gpt","entryToolId":"testdata/TestSysContext/test.gpt:","toolSet":{"sys.context":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"testdata/TestSysContext/file.gpt:I am Superman Agent":{"name":"I am Superman Agent","modelName":"gpt-4o","internalPrompt":null,"instructions":"I'm super","id":"testdata/TestSysContext/file.gpt:I am Superman Agent","localTools":{"i am superman agent":"testdata/TestSysContext/file.gpt:I am Superman Agent"},"source":{"location":"testdata/TestSysContext/file.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:":{"modelName":"gpt-4o","chat":true,"internalPrompt":null,"context":["agents"],"agents":["./file.gpt"],"instructions":"Tool body","id":"testdata/TestSysContext/test.gpt:","toolMapping":{"./file.gpt":[{"reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"agents":[{"reference":"agents","toolID":"testdata/TestSysContext/test.gpt:agents"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:agents":{"name":"agents","modelName":"gpt-4o","internalPrompt":null,"context":["sys.context"],"instructions":"#!/bin/bash\n\necho \"${GPTSCRIPT_CONTEXT}\"\necho \"${GPTSCRIPT_CONTEXT}\" \u003e ${GPTSCRIPT_TOOL_DIR}/context.json","id":"testdata/TestSysContext/test.gpt:agents","toolMapping":{"sys.context":[{"reference":"sys.context","toolID":"sys.context"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":8},"workingDir":"testdata/TestSysContext"}}}} diff --git a/pkg/tests/testdata/TestSysContext/file.gpt b/pkg/tests/testdata/TestSysContext/file.gpt new file mode 100644 index 00000000..75f63087 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/file.gpt @@ -0,0 +1,3 @@ +name: I am Superman Agent + +I'm super \ No newline at end of file diff --git a/pkg/tests/testdata/TestSysContext/step1.golden b/pkg/tests/testdata/TestSysContext/step1.golden new file mode 100644 index 00000000..26b75508 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/step1.golden @@ -0,0 +1,64 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "testdata/TestSysContext/test.gpt:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestSysContext/file.gpt:I am Superman Agent", + "name": "iAmSuperman", + "parameters": { + "properties": { + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ] + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "testdata/TestSysContext/test.gpt:" + } +}` diff --git a/pkg/tests/testdata/TestSysContext/test.go b/pkg/tests/testdata/TestSysContext/test.go new file mode 100644 index 00000000..462ff284 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/test.go @@ -0,0 +1,22 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/gptscript-ai/gptscript/pkg/engine" +) + +func main() { + data := struct { + Call engine.CallContext `json:"call,omitempty"` + }{} + if err := json.Unmarshal([]byte(os.Getenv("GPTSCRIPT_CONTEXT")), &data); err != nil { + panic(err) + } + + for _, agent := range data.Call.AgentGroup { + fmt.Println(agent.Reference, agent.ToolID) + } +} diff --git a/pkg/tests/testdata/TestSysContext/test.gpt b/pkg/tests/testdata/TestSysContext/test.gpt new file mode 100644 index 00000000..63e93417 --- /dev/null +++ b/pkg/tests/testdata/TestSysContext/test.gpt @@ -0,0 +1,14 @@ +context: agents +agents: ./file.gpt +chat: true + +Tool body + +--- +name: agents +context: sys.context + +#!/bin/bash + +echo "${GPTSCRIPT_CONTEXT}" +echo "${GPTSCRIPT_CONTEXT}" > ${GPTSCRIPT_TOOL_DIR}/context.json \ No newline at end of file diff --git a/pkg/tests/testdata/TestToolAs/call1.golden b/pkg/tests/testdata/TestToolAs/call1.golden index e7ec18b5..519f07b8 100644 --- a/pkg/tests/testdata/TestToolAs/call1.golden +++ b/pkg/tests/testdata/TestToolAs/call1.golden @@ -7,14 +7,11 @@ "name": "local", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } @@ -25,14 +22,11 @@ "name": "remote", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool or assistant. This may be instructions or question.", + "prompt": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, - "required": [ - "defaultPromptParameter" - ], "type": "object" } } diff --git a/pkg/types/set.go b/pkg/types/set.go index 9ff6b22e..230e112b 100644 --- a/pkg/types/set.go +++ b/pkg/types/set.go @@ -29,7 +29,7 @@ func (t *toolRefSet) HasTool(toolID string) bool { } func (t *toolRefSet) AddAll(values []ToolReference, err error) { - if t.err != nil { + if err != nil { t.err = err } for _, v := range values { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index fe137e6a..a5124796 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -117,29 +117,33 @@ func (p Program) SetBlocking() Program { return p } -type BuiltinFunc func(ctx context.Context, env []string, input string) (string, error) +type BuiltinFunc func(ctx context.Context, env []string, input string, progress chan<- string) (string, error) type Parameters struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - MaxTokens int `json:"maxTokens,omitempty"` - ModelName string `json:"modelName,omitempty"` - ModelProvider bool `json:"modelProvider,omitempty"` - JSONResponse bool `json:"jsonResponse,omitempty"` - Chat bool `json:"chat,omitempty"` - Temperature *float32 `json:"temperature,omitempty"` - Cache *bool `json:"cache,omitempty"` - InternalPrompt *bool `json:"internalPrompt"` - Arguments *openapi3.Schema `json:"arguments,omitempty"` - Tools []string `json:"tools,omitempty"` - GlobalTools []string `json:"globalTools,omitempty"` - GlobalModelName string `json:"globalModelName,omitempty"` - Context []string `json:"context,omitempty"` - ExportContext []string `json:"exportContext,omitempty"` - Export []string `json:"export,omitempty"` - Agents []string `json:"agents,omitempty"` - Credentials []string `json:"credentials,omitempty"` - Blocking bool `json:"-"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + MaxTokens int `json:"maxTokens,omitempty"` + ModelName string `json:"modelName,omitempty"` + ModelProvider bool `json:"modelProvider,omitempty"` + JSONResponse bool `json:"jsonResponse,omitempty"` + Chat bool `json:"chat,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + Cache *bool `json:"cache,omitempty"` + InternalPrompt *bool `json:"internalPrompt"` + Arguments *openapi3.Schema `json:"arguments,omitempty"` + Tools []string `json:"tools,omitempty"` + GlobalTools []string `json:"globalTools,omitempty"` + GlobalModelName string `json:"globalModelName,omitempty"` + Context []string `json:"context,omitempty"` + ExportContext []string `json:"exportContext,omitempty"` + Export []string `json:"export,omitempty"` + Agents []string `json:"agents,omitempty"` + Credentials []string `json:"credentials,omitempty"` + InputFilters []string `json:"inputFilters,omitempty"` + ExportInputFilters []string `json:"exportInputFilters,omitempty"` + OutputFilters []string `json:"outputFilters,omitempty"` + ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` + Blocking bool `json:"-"` } func (p Parameters) ToolRefNames() []string { @@ -149,7 +153,11 @@ func (p Parameters) ToolRefNames() []string { p.Export, p.ExportContext, p.Context, - p.Credentials) + p.Credentials, + p.InputFilters, + p.ExportInputFilters, + p.OutputFilters, + p.ExportOutputFilters) } type ToolDef struct { @@ -331,6 +339,30 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str return originalName, alias, args, nil } +func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { + toolRefs, err := t.GetToolRefsFromNames(t.Agents) + if err != nil { + return nil, err + } + + // Agent Tool refs must be named + for i, toolRef := range toolRefs { + if toolRef.Named != "" { + continue + } + tool := prg.ToolSet[toolRef.ToolID] + name := tool.Name + if name == "" { + name = toolRef.Reference + } + normed := ToolNormalizer(name) + normed = strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant") + toolRefs[i].Named = normed + } + + return toolRefs, nil +} + func (t Tool) GetToolRefsFromNames(names []string) (result []ToolReference, _ error) { for _, toolName := range names { toolRefs, ok := t.ToolMapping[toolName] @@ -379,11 +411,23 @@ func (t ToolDef) String() string { if len(t.Parameters.Export) != 0 { _, _ = fmt.Fprintf(buf, "Share Tools: %s\n", strings.Join(t.Parameters.Export, ", ")) } + if len(t.Parameters.Context) != 0 { + _, _ = fmt.Fprintf(buf, "Context: %s\n", strings.Join(t.Parameters.Context, ", ")) + } if len(t.Parameters.ExportContext) != 0 { _, _ = fmt.Fprintf(buf, "Share Context: %s\n", strings.Join(t.Parameters.ExportContext, ", ")) } - if len(t.Parameters.Context) != 0 { - _, _ = fmt.Fprintf(buf, "Context: %s\n", strings.Join(t.Parameters.Context, ", ")) + if len(t.Parameters.InputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Input Filters: %s\n", strings.Join(t.Parameters.InputFilters, ", ")) + } + if len(t.Parameters.ExportInputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Share Input Filters: %s\n", strings.Join(t.Parameters.ExportInputFilters, ", ")) + } + if len(t.Parameters.OutputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Output Filters: %s\n", strings.Join(t.Parameters.OutputFilters, ", ")) + } + if len(t.Parameters.ExportOutputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Share Output Filters: %s\n", strings.Join(t.Parameters.ExportOutputFilters, ", ")) } if t.Parameters.MaxTokens != 0 { _, _ = fmt.Fprintf(buf, "Max Tokens: %d\n", t.Parameters.MaxTokens) @@ -469,6 +513,8 @@ func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { return result.List() } +// GetContextTools returns all tools that are in the context of the tool including all the +// contexts that are exported by the context tools. This will recurse all exports. func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} @@ -485,9 +531,59 @@ func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { return result.List() } -func (t Tool) GetAgentGroup(agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { +func (t Tool) GetOutputFilterTools(program Program) ([]ToolReference, error) { + result := &toolRefSet{} + + outputFilterRefs, err := t.GetToolRefsFromNames(t.OutputFilters) + if err != nil { + return nil, err + } + + for _, outputFilterRef := range outputFilterRefs { + result.Add(outputFilterRef) + } + + contextRefs, err := t.GetContextTools(program) + if err != nil { + return nil, err + } + + for _, contextRef := range contextRefs { + contextTool := program.ToolSet[contextRef.ToolID] + result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportOutputFilters)) + } + + return result.List() +} + +func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { + result := &toolRefSet{} + + inputFilterRefs, err := t.GetToolRefsFromNames(t.InputFilters) + if err != nil { + return nil, err + } + + for _, inputFilterRef := range inputFilterRefs { + result.Add(inputFilterRef) + } + + contextRefs, err := t.GetContextTools(program) + if err != nil { + return nil, err + } + + for _, contextRef := range contextRefs { + contextTool := program.ToolSet[contextRef.ToolID] + result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportInputFilters)) + } + + return result.List() +} + +func (t Tool) GetNextAgentGroup(prg Program, agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { newAgentGroup := toolRefSet{} - if err := t.addAgents(&newAgentGroup); err != nil { + if err := t.addAgents(prg, &newAgentGroup); err != nil { return nil, err } @@ -496,15 +592,7 @@ func (t Tool) GetAgentGroup(agentGroup []ToolReference, toolID string) (result [ return newAgentGroup.List() } - existingAgentGroup := toolRefSet{} - existingAgentGroup.AddAll(agentGroup, nil) - - if existingAgentGroup.HasTool(toolID) { - return existingAgentGroup.List() - } - - // No group - return nil, nil + return agentGroup, nil } func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (result []CompletionTool, err error) { @@ -515,8 +603,8 @@ func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (resu return toolRefsToCompletionTools(refs, prg), nil } -func (t Tool) addAgents(result *toolRefSet) error { - subToolRefs, err := t.GetToolRefsFromNames(t.Parameters.Agents) +func (t Tool) addAgents(prg Program, result *toolRefSet) error { + subToolRefs, err := t.GetAgents(prg) if err != nil { return err } @@ -580,7 +668,7 @@ func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([] return nil, err } - if err := t.addAgents(&result); err != nil { + if err := t.addAgents(prg, &result); err != nil { return nil, err } @@ -601,6 +689,8 @@ func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (re args := subTool.Parameters.Arguments if args == nil && !subTool.IsCommand() && !subTool.Chat { args = &system.DefaultToolSchema + } else if args == nil && !subTool.IsCommand() { + args = &system.DefaultChatSchema } if subTool.Instructions == "" { @@ -657,6 +747,10 @@ func (t Tool) GetInterpreter() string { return fields[0] } +func (t Tool) IsNoop() bool { + return t.Instructions == "" +} + func (t Tool) IsCommand() bool { return strings.HasPrefix(t.Instructions, CommandPrefix) } diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index f1edc340..43af6cee 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -9,26 +9,30 @@ import ( func TestToolDef_String(t *testing.T) { tool := ToolDef{ Parameters: Parameters{ - Name: "Tool Sample", - Description: "This is a sample tool", - MaxTokens: 1024, - ModelName: "ModelSample", - ModelProvider: true, - JSONResponse: true, - Chat: true, - Temperature: float32Ptr(0.8), - Cache: boolPtr(true), - InternalPrompt: boolPtr(true), - Arguments: ObjectSchema("arg1", "desc1", "arg2", "desc2"), - Tools: []string{"Tool1", "Tool2"}, - GlobalTools: []string{"GlobalTool1", "GlobalTool2"}, - GlobalModelName: "GlobalModelSample", - Context: []string{"Context1", "Context2"}, - ExportContext: []string{"ExportContext1", "ExportContext2"}, - Export: []string{"Export1", "Export2"}, - Agents: []string{"Agent1", "Agent2"}, - Credentials: []string{"Credential1", "Credential2"}, - Blocking: true, + Name: "Tool Sample", + Description: "This is a sample tool", + MaxTokens: 1024, + ModelName: "ModelSample", + ModelProvider: true, + JSONResponse: true, + Chat: true, + Temperature: float32Ptr(0.8), + Cache: boolPtr(true), + InternalPrompt: boolPtr(true), + Arguments: ObjectSchema("arg1", "desc1", "arg2", "desc2"), + Tools: []string{"Tool1", "Tool2"}, + GlobalTools: []string{"GlobalTool1", "GlobalTool2"}, + GlobalModelName: "GlobalModelSample", + Context: []string{"Context1", "Context2"}, + ExportContext: []string{"ExportContext1", "ExportContext2"}, + Export: []string{"Export1", "Export2"}, + Agents: []string{"Agent1", "Agent2"}, + Credentials: []string{"Credential1", "Credential2"}, + Blocking: true, + InputFilters: []string{"Filter1", "Filter2"}, + ExportInputFilters: []string{"SharedFilter1", "SharedFilter2"}, + OutputFilters: []string{"Filter1", "Filter2"}, + ExportOutputFilters: []string{"SharedFilter1", "SharedFilter2"}, }, Instructions: "This is a sample instruction", } @@ -40,8 +44,12 @@ Description: This is a sample tool Agents: Agent1, Agent2 Tools: Tool1, Tool2 Share Tools: Export1, Export2 -Share Context: ExportContext1, ExportContext2 Context: Context1, Context2 +Share Context: ExportContext1, ExportContext2 +Input Filters: Filter1, Filter2 +Share Input Filters: SharedFilter1, SharedFilter2 +Output Filters: Filter1, Filter2 +Share Output Filters: SharedFilter1, SharedFilter2 Max Tokens: 1024 Model: ModelSample Model Provider: true diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 3bd6fd57..ede3401e 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -74,7 +74,7 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { return fmt.Sprintf("Removing `%s`", args["location"]), nil case "sys.write": return fmt.Sprintf("Writing `%s`", args["filename"]), nil - case "sys.stat", "sys.getenv", "sys.abort", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": + case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": return "", nil default: return "", fmt.Errorf("unknown tool for display string: %s", id)