From 4ab918d4253414bb81316a61900cb4dd74399c43 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 1 Jul 2024 19:20:48 -0400 Subject: [PATCH 001/270] fix: option hiding in cred subcommands The credential subcommands throw a nil ptr exception due to a nil credential override flag value. Prevent this by including the value when hiding flags from credential subcommands. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 19a6b2f2..0281cd04 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -109,6 +109,7 @@ func New() *cobra.Command { newFlag := pflag.Flag{ Name: f.Name, Usage: f.Usage, + Value: f.Value, } if f.Name != "credential-context" { From 966b49375e6da9968cf83aa628f00f487176ace3 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:38:28 -0400 Subject: [PATCH 002/270] enhance: set sdk cred override env var for ui Pass credential overrides to the UI by setting the UI tool's `GPTSCRIPT_SDKSERVER_CREDENTIAL_OVERRIDE` environment variable before running it. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 0281cd04..865b3873 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -363,10 +363,13 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { gptOpt.Env = append(gptOpt.Env, system.BinEnvVar+"="+system.Bin()) } - // If the DefaultModel is set, then pass the correct environment variable. + // Pass the corrected environment variables for SDK server options if r.DefaultModel != "" { gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_DEFAULT_MODEL="+r.DefaultModel) } + if len(r.CredentialOverride) > 0 { + gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_CREDENTIAL_OVERRIDE="+strings.Join(r.CredentialOverride, ",")) + } args = append([]string{args[0]}, "--file="+file) From 51733579624915fdc4babdaaf3ffcce66a90b58a Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 2 Jul 2024 17:13:38 -0400 Subject: [PATCH 003/270] fix: create cred helper dir properly on Windows (#603) Signed-off-by: Grant Linville --- pkg/credentials/util.go | 3 ++- pkg/repos/get.go | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go index f6165308..367b4d1d 100644 --- a/pkg/credentials/util.go +++ b/pkg/credentials/util.go @@ -5,7 +5,7 @@ import ( ) type CredentialHelperDirs struct { - RevisionFile, LastCheckedFile, BinDir, RepoDir string + RevisionFile, LastCheckedFile, BinDir, RepoDir, HelperDir string } func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { @@ -14,5 +14,6 @@ func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { LastCheckedFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "last-checked"), BinDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "bin"), RepoDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "repo"), + HelperDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers"), } } diff --git a/pkg/repos/get.go b/pkg/repos/get.go index be8d8594..2f96d8b3 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -7,7 +7,6 @@ import ( "fmt" "io/fs" "os" - "path" "path/filepath" "strings" "sync" @@ -133,7 +132,7 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co return err } - if err := os.MkdirAll(path.Dir(m.credHelperDirs.LastCheckedFile), 0755); err != nil { + if err := os.MkdirAll(m.credHelperDirs.HelperDir, 0755); err != nil { return err } From d39962e645bd63dce05bc2d957017729dac9ebc5 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 2 Jul 2024 18:55:19 -0400 Subject: [PATCH 004/270] enhance: generate name for OpenAPI tools when operation ID is blank (#601) Signed-off-by: Grant Linville --- pkg/loader/loader_test.go | 42 ------ pkg/loader/openapi.go | 12 +- pkg/loader/openapi_test.go | 88 ++++++++++++ .../testdata/openapi/TestOpenAPIv2.golden | 110 ++++++++++++++ .../testdata/openapi/TestOpenAPIv3.golden | 134 ++++++++++++++++++ .../TestOpenAPIv3NoOperationIDs.golden | 134 ++++++++++++++++++ .../testdata/openapi_v3_no_operation_ids.yaml | 116 +++++++++++++++ 7 files changed, 593 insertions(+), 43 deletions(-) create mode 100644 pkg/loader/openapi_test.go create mode 100644 pkg/loader/testdata/openapi/TestOpenAPIv2.golden create mode 100644 pkg/loader/testdata/openapi/TestOpenAPIv3.golden create mode 100644 pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden create mode 100644 pkg/loader/testdata/openapi_v3_no_operation_ids.yaml diff --git a/pkg/loader/loader_test.go b/pkg/loader/loader_test.go index a5c328b5..d70c45f5 100644 --- a/pkg/loader/loader_test.go +++ b/pkg/loader/loader_test.go @@ -10,7 +10,6 @@ import ( "path/filepath" "testing" - "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" "github.com/stretchr/testify/require" ) @@ -68,47 +67,6 @@ func TestIsOpenAPI(t *testing.T) { require.Equal(t, 3, v, "(json) expected openapi v3") } -func TestLoadOpenAPI(t *testing.T) { - numOpenAPITools := func(set types.ToolSet) int { - num := 0 - for _, v := range set { - if v.IsOpenAPI() { - num++ - } - } - return num - } - - prgv3 := types.Program{ - ToolSet: types.ToolSet{}, - } - datav3, err := os.ReadFile("testdata/openapi_v3.yaml") - require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") - require.NoError(t, err, "failed to read openapi v3") - require.Equal(t, 3, numOpenAPITools(prgv3.ToolSet), "expected 3 openapi tools") - - prgv2json := types.Program{ - ToolSet: types.ToolSet{}, - } - datav2, err := os.ReadFile("testdata/openapi_v2.json") - require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2json, &source{Content: datav2}, "") - require.NoError(t, err, "failed to read openapi v2") - require.Equal(t, 3, numOpenAPITools(prgv2json.ToolSet), "expected 3 openapi tools") - - prgv2yaml := types.Program{ - ToolSet: types.ToolSet{}, - } - datav2, err = os.ReadFile("testdata/openapi_v2.yaml") - require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2yaml, &source{Content: datav2}, "") - require.NoError(t, err, "failed to read openapi v2 (yaml)") - require.Equal(t, 3, numOpenAPITools(prgv2yaml.ToolSet), "expected 3 openapi tools") - - require.EqualValuesf(t, prgv2json.ToolSet, prgv2yaml.ToolSet, "expected same toolset for openapi v2 json and yaml") -} - func TestHelloWorld(t *testing.T) { prg, err := Program(context.Background(), "https://raw.githubusercontent.com/ibuildthecloud/test/bafe5a62174e8a0ea162277dcfe3a2ddb7eea928/example/sub/tool.gpt", diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index 8790fbb7..45254c9d 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/url" + "regexp" "slices" "sort" "strings" @@ -14,6 +15,8 @@ import ( "github.com/gptscript-ai/gptscript/pkg/types" ) +var toolNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_-]+`) + // getOpenAPITools parses an OpenAPI definition and generates a set of tools from it. // Each operation will become a tool definition. // The tool's Instructions will be in the format "#!sys.openapi '{JSON Instructions}'", @@ -115,6 +118,13 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { toolDesc = toolDesc[:1024] } + toolName := operation.OperationID + if toolName == "" { + // When there is no operation ID, we use the method + path as the tool name and remove all characters + // except letters, numbers, underscores, and hyphens. + toolName = toolNameRegex.ReplaceAllString(strings.ToLower(method)+strings.ReplaceAll(pathString, "/", "_"), "") + } + var ( // auths are represented as a list of maps, where each map contains the names of the required security schemes. // Items within the same map are a logical AND. The maps themselves are a logical OR. For example: @@ -133,7 +143,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { tool := types.Tool{ ToolDef: types.ToolDef{ Parameters: types.Parameters{ - Name: operation.OperationID, + Name: toolName, Description: toolDesc, Arguments: &openapi3.Schema{ Type: &openapi3.Types{"object"}, diff --git a/pkg/loader/openapi_test.go b/pkg/loader/openapi_test.go new file mode 100644 index 00000000..d00ffcca --- /dev/null +++ b/pkg/loader/openapi_test.go @@ -0,0 +1,88 @@ +package loader + +import ( + "context" + "os" + "testing" + + "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/hexops/autogold/v2" + "github.com/stretchr/testify/require" +) + +func TestLoadOpenAPI(t *testing.T) { + numOpenAPITools := func(set types.ToolSet) int { + num := 0 + for _, v := range set { + if v.IsOpenAPI() { + num++ + } + } + return num + } + + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err, "failed to read openapi v3") + require.Equal(t, 3, numOpenAPITools(prgv3.ToolSet), "expected 3 openapi tools") + + prgv2json := types.Program{ + ToolSet: types.ToolSet{}, + } + datav2, err := os.ReadFile("testdata/openapi_v2.json") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv2json, &source{Content: datav2}, "") + require.NoError(t, err, "failed to read openapi v2") + require.Equal(t, 3, numOpenAPITools(prgv2json.ToolSet), "expected 3 openapi tools") + + prgv2yaml := types.Program{ + ToolSet: types.ToolSet{}, + } + datav2, err = os.ReadFile("testdata/openapi_v2.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv2yaml, &source{Content: datav2}, "") + require.NoError(t, err, "failed to read openapi v2 (yaml)") + require.Equal(t, 3, numOpenAPITools(prgv2yaml.ToolSet), "expected 3 openapi tools") + + require.EqualValuesf(t, prgv2json.ToolSet, prgv2yaml.ToolSet, "expected same toolset for openapi v2 json and yaml") +} + +func TestOpenAPIv3(t *testing.T) { + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) +} + +func TestOpenAPIv3NoOperationIDs(t *testing.T) { + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) +} + +func TestOpenAPIv2(t *testing.T) { + prgv2 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav2, err := os.ReadFile("testdata/openapi_v2.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden new file mode 100644 index 00000000..90dd1967 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden @@ -0,0 +1,110 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + Description: "This is a tool set for the Swagger Petstore OpenAPI spec", + ModelName: "gpt-4o", + Export: []string{ + "listPets", + "createPets", + "showPetById", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "createPets": {{ + Reference: "createPets", + ToolID: ":createPets", + }}, + "listPets": {{ + Reference: "listPets", + ToolID: ":listPets", + }}, + "showPetById": {{ + Reference: "showPetById", + ToolID: ":showPetById", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + }, + ":createPets": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "createPets", + Description: "Create a pet", + ModelName: "gpt-4o", + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":createPets", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + Source: types.ToolSource{LineNo: 2}, + }, + ":listPets": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listPets", + Description: "List all pets", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Required: []string{}, + Properties: openapi3.Schemas{"limit": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"integer"}, + Format: "int32", + Description: "How many items to return at one time (max 100)", + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":listPets", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":showPetById": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "showPetById", + Description: "Info for a specific pet", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{"petId"}, + Properties: openapi3.Schemas{"petId": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Description: "The id of the pet to retrieve", + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":showPetById", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + Source: types.ToolSource{LineNo: 3}, + }, +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden new file mode 100644 index 00000000..72ccafae --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden @@ -0,0 +1,134 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + Description: "This is a tool set for the Swagger Petstore OpenAPI spec", + ModelName: "gpt-4o", + Export: []string{ + "listPets", + "createPets", + "showPetById", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "createPets": {{ + Reference: "createPets", + ToolID: ":createPets", + }}, + "listPets": {{ + Reference: "listPets", + ToolID: ":listPets", + }}, + "showPetById": {{ + Reference: "showPetById", + ToolID: ":showPetById", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + }, + ":createPets": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "createPets", + Description: "Create a pet", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Required: []string{}, + Properties: openapi3.Schemas{"requestBodyContent": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{ + "id", + "name", + }, + Properties: openapi3.Schemas{ + "id": &openapi3.SchemaRef{ + Value: &openapi3.Schema{ + Type: &openapi3.Types{ + "integer", + }, + Format: "int64", + }, + }, + "name": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, + "tag": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, + }, + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":createPets", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + Source: types.ToolSource{LineNo: 2}, + }, + ":listPets": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listPets", + Description: "List all pets", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{}, + Properties: openapi3.Schemas{"limit": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"integer"}, + Format: "int32", + Description: "How many items to return at one time (max 100)", + Max: valast.Ptr(float64(100)), + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":listPets", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":showPetById": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "showPetById", + Description: "Info for a specific pet", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{"petId"}, + Properties: openapi3.Schemas{"petId": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Description: "The id of the pet to retrieve", + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":showPetById", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "createpets": ":createPets", + "listpets": ":listPets", + "showpetbyid": ":showPetById", + }, + Source: types.ToolSource{LineNo: 3}, + }, +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden new file mode 100644 index 00000000..3bcfd9e5 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden @@ -0,0 +1,134 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + Description: "This is a tool set for the Swagger Petstore OpenAPI spec", + ModelName: "gpt-4o", + Export: []string{ + "get_pets", + "post_pets", + "get_pets_petId", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "get_pets": {{ + Reference: "get_pets", + ToolID: ":get_pets", + }}, + "get_pets_petId": {{ + Reference: "get_pets_petId", + ToolID: ":get_pets_petId", + }}, + "post_pets": {{ + Reference: "post_pets", + ToolID: ":post_pets", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "get_pets": ":get_pets", + "get_pets_petid": ":get_pets_petId", + "post_pets": ":post_pets", + }, + }, + ":get_pets": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "get_pets", + Description: "List all pets", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Required: []string{}, + Properties: openapi3.Schemas{"limit": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"integer"}, + Format: "int32", + Description: "How many items to return at one time (max 100)", + Max: valast.Ptr(float64(100)), + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":get_pets", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "get_pets": ":get_pets", + "get_pets_petid": ":get_pets_petId", + "post_pets": ":post_pets", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":get_pets_petId": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "get_pets_petId", + Description: "Info for a specific pet", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{"petId"}, + Properties: openapi3.Schemas{"petId": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Description: "The id of the pet to retrieve", + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":get_pets_petId", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "get_pets": ":get_pets", + "get_pets_petid": ":get_pets_petId", + "post_pets": ":post_pets", + }, + Source: types.ToolSource{LineNo: 3}, + }, + ":post_pets": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "post_pets", + Description: "Create a pet", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{}, + Properties: openapi3.Schemas{"requestBodyContent": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Required: []string{ + "id", + "name", + }, + Properties: openapi3.Schemas{ + "id": &openapi3.SchemaRef{ + Value: &openapi3.Schema{ + Type: &openapi3.Types{ + "integer", + }, + Format: "int64", + }, + }, + "name": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, + "tag": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, + }, + }}}, + }, + }, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + }, + ID: ":post_pets", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "get_pets": ":get_pets", + "get_pets_petid": ":get_pets_petId", + "post_pets": ":post_pets", + }, + Source: types.ToolSource{LineNo: 2}, + }, +} diff --git a/pkg/loader/testdata/openapi_v3_no_operation_ids.yaml b/pkg/loader/testdata/openapi_v3_no_operation_ids.yaml new file mode 100644 index 00000000..53e58b88 --- /dev/null +++ b/pkg/loader/testdata/openapi_v3_no_operation_ids.yaml @@ -0,0 +1,116 @@ +openapi: "3.0.0" +info: + version: 1.0.0 + title: Swagger Petstore + license: + name: MIT +servers: + - url: http://petstore.swagger.io/v1 +paths: + /pets: + get: + summary: List all pets + tags: + - pets + parameters: + - name: limit + in: query + description: How many items to return at one time (max 100) + required: false + schema: + type: integer + maximum: 100 + format: int32 + responses: + '200': + description: A paged array of pets + headers: + x-next: + description: A link to the next page of responses + schema: + type: string + content: + application/json: + schema: + $ref: "#/components/schemas/Pets" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + post: + summary: Create a pet + tags: + - pets + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Pet' + required: true + responses: + '201': + description: Null response + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + /pets/{petId}: + get: + summary: Info for a specific pet + tags: + - pets + parameters: + - name: petId + in: path + required: true + description: The id of the pet to retrieve + schema: + type: string + responses: + '200': + description: Expected response to a valid request + content: + application/json: + schema: + $ref: "#/components/schemas/Pet" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" +components: + schemas: + Pet: + type: object + required: + - id + - name + properties: + id: + type: integer + format: int64 + name: + type: string + tag: + type: string + Pets: + type: array + maxItems: 100 + items: + $ref: "#/components/schemas/Pet" + Error: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string \ No newline at end of file From f90f56d812b452203de7878ed55b7203b4d5824b Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:13:05 -0400 Subject: [PATCH 005/270] chore: bump tui for cred overrides Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 868dc060..36106b3f 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/tui v0.0.0-20240627044440-d416df63c10d + github.com/gptscript-ai/tui v0.0.0-20240702222655-901e7ec1faf5 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -62,7 +62,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc // indirect + github.com/gptscript-ai/go-gptscript v0.9.1 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hexops/autogold v1.3.1 // indirect diff --git a/go.sum b/go.sum index 8a6d9dc8..9f29c3e4 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc h1:ABV7VAK65YBkqL7VlNp5ryVXnRqkKQ+U/NZfUO3ypqA= -github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240627044440-d416df63c10d h1:hbJ5rkwMDDntqbvHMbsEoP8Nsa5nqTOzF+ktkw3uDQQ= -github.com/gptscript-ai/tui v0.0.0-20240627044440-d416df63c10d/go.mod h1:NwFdBDmGQvjLFFDnSRBRakkhw0MIO1sSdRnWNk4cCQ0= +github.com/gptscript-ai/go-gptscript v0.9.1 h1:O9oSmYvzQ2GZkPfDhXpiMGdtO9BMCVGeWLdJH88AJzg= +github.com/gptscript-ai/go-gptscript v0.9.1/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240702222655-901e7ec1faf5 h1:qCNJVFDHT2p0cuLo920DmUQoUngAuXGgqldYNwmC/R0= +github.com/gptscript-ai/tui v0.0.0-20240702222655-901e7ec1faf5/go.mod h1:mYzM8AwIiAdImy2g0BsdTPPuSbsONTMw5GIHDc/2o7g= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= From be843033189d9005c1f493283102cc22dbd1051c Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:13:37 -0400 Subject: [PATCH 006/270] enhance: pass cred overrides to tui Make cred overrides work when using the TUI. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 865b3873..4820efd8 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -472,6 +472,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { DefaultModel: r.DefaultModel, TrustedRepoPrefixes: []string{"github.com/gptscript-ai"}, DisableCache: r.DisableCache, + CredentialOverrides: r.CredentialOverride, Input: toolInput, CacheDir: r.CacheDir, SubTool: r.SubTool, From ce0d86765c5566c1c0818d31d2a6c98470fd795a Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:47:03 -0400 Subject: [PATCH 007/270] chore: add smoke tests for claude-3-5-sonnet Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 32 +- .../claude-3-5-sonnet-20240620-expected.json | 929 ++++++++++++++++++ .../claude-3-5-sonnet-20240620-expected.json | 878 +++++++++++++++++ 3 files changed, 1838 insertions(+), 1 deletion(-) create mode 100644 pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index 5c26ef8d..a687e684 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -148,6 +148,37 @@ jobs: export PATH="$(pwd)/bin:${PATH}" make smoke + claude-3-5-sonnet-20240620: + needs: check-label + if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout base repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: Checkout PR code if running for a PR + if: ${{ github.event_name == 'pull_request_target' }} + uses: actions/checkout@v4 + with: + fetch-depth: 1 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.21" + - env: + OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider + ANTHROPIC_API_KEY: ${{ secrets.SMOKE_ANTHROPIC_API_KEY }} + GPTSCRIPT_CREDENTIAL_OVERRIDE: "github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY" + name: Run smoke test for claude-3-5-sonnet-20240620 + run: | + echo "Running smoke test for model claude-3-5-sonnet-20240620" + export PATH="$(pwd)/bin:${PATH}" + make smoke + mistral-large-2402: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} @@ -177,4 +208,3 @@ jobs: echo "Running smoke test for model mistral-large-2402" export PATH="$(pwd)/bin:${PATH}" make smoke - diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json new file mode 100644 index 00000000..63673b21 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json @@ -0,0 +1,929 @@ +[ + { + "time": "2024-07-02T19:39:41.734737-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:41.735252-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:42.643066-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:42.643356-04:00", + "callContext": { + "id": "1719963583", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:43.666188-04:00", + "callContext": { + "id": "1719963583", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:10887" + }, + { + "time": "2024-07-02T19:39:43.666385-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-07-02T19:39:43.666475-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963584", + "usage": {}, + "chatRequest": { + "model": "claude-3-5-sonnet-20240620", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-07-02T19:39:43.66707-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963584", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-07-02T19:39:45.331397-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963584", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" + }, + { + "time": "2024-07-02T19:39:45.332721-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963584", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-07-02T19:39:45.332824-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01XLGhfvziYUf8rFoQEbvw4P": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-07-02T19:39:45.332869-04:00", + "callContext": { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963582" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-07-02T19:39:45.487113-04:00", + "callContext": { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963582" + }, + "type": "callChat", + "chatCompletionId": "1719963585", + "usage": {}, + "chatRequest": { + "model": "claude-3-5-sonnet-20240620", + "messages": [ + { + "role": "system", + "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\": \"how are you doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-07-02T19:39:45.487696-04:00", + "callContext": { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963582" + }, + "type": "callProgress", + "chatCompletionId": "1719963585", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-07-02T19:39:46.547634-04:00", + "callContext": { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963582" + }, + "type": "callProgress", + "chatCompletionId": "1719963585", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-07-02T19:39:46.549112-04:00", + "callContext": { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963582" + }, + "type": "callChat", + "chatCompletionId": "1719963585", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": {} + } + }, + { + "time": "2024-07-02T19:39:46.549192-04:00", + "callContext": { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963582" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-07-02T19:39:46.549253-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-07-02T19:39:46.710406-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963586", + "usage": {}, + "chatRequest": { + "model": "claude-3-5-sonnet-20240620", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "toolu_01XLGhfvziYUf8rFoQEbvw4P" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-07-02T19:39:46.710675-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963586", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-07-02T19:39:48.861353-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963586", + "usage": {}, + "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + }, + { + "time": "2024-07-02T19:39:48.862116-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963586", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + } + ], + "usage": {} + } + }, + { + "time": "2024-07-02T19:39:48.862154-04:00", + "callContext": { + "id": "1719963582", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + }, + { + "time": "2024-07-02T19:39:48.862164-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json new file mode 100644 index 00000000..4f3dd9d7 --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json @@ -0,0 +1,878 @@ +[ + { + "time": "2024-07-02T19:39:48.881112-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:48.881306-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:49.304999-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:49.305331-04:00", + "callContext": { + "id": "1719963590", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-07-02T19:39:50.324048-04:00", + "callContext": { + "id": "1719963590", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:10585" + }, + { + "time": "2024-07-02T19:39:50.324287-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-07-02T19:39:50.324366-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963591", + "usage": {}, + "chatRequest": { + "model": "claude-3-5-sonnet-20240620", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-07-02T19:39:50.324921-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963591", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-07-02T19:39:51.941234-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963591", + "usage": {}, + "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" + }, + { + "time": "2024-07-02T19:39:51.941444-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963591", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-07-02T19:39:51.941523-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01SHRzoGuUEQtwBp2ktaqu7P": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-07-02T19:39:51.941567-04:00", + "callContext": { + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963589", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-07-02T19:39:51.942241-04:00", + "callContext": { + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963589", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1719963592", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-07-02T19:39:51.945596-04:00", + "callContext": { + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963589", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callProgress", + "chatCompletionId": "1719963592", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-07-02T19:39:51.945769-04:00", + "callContext": { + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963589", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1719963592", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-07-02T19:39:51.945833-04:00", + "callContext": { + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1719963589", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-07-02T19:39:51.945871-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-07-02T19:39:52.112712-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963593", + "usage": {}, + "chatRequest": { + "model": "claude-3-5-sonnet-20240620", + "messages": [ + { + "role": "system", + "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-07-02T19:39:52.11309-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963593", + "usage": {}, + "content": "Waiting for model response..." + }, + { + "time": "2024-07-02T19:39:53.567604-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callProgress", + "chatCompletionId": "1719963593", + "usage": {}, + "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + }, + { + "time": "2024-07-02T19:39:53.568556-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1719963593", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + } + ], + "usage": {} + } + }, + { + "time": "2024-07-02T19:39:53.568602-04:00", + "callContext": { + "id": "1719963589", + "tool": { + "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + }, + { + "time": "2024-07-02T19:39:53.568622-04:00", + "type": "runFinish", + "usage": {} + } +] From 7e4b9cca7aa10fa5b83f3f52c12353933565ad65 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:48:48 -0400 Subject: [PATCH 008/270] chore: remove smoke tests for claude-3-opus Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 31 - .../Bob/claude-3-opus-20240229-expected.json | 929 ------------------ .../claude-3-opus-20240229-expected.json | 878 ----------------- 3 files changed, 1838 deletions(-) delete mode 100644 pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json delete mode 100644 pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index a687e684..0106ed6d 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -117,37 +117,6 @@ jobs: export PATH="$(pwd)/bin:${PATH}" make smoke - claude-3-opus-20240229: - needs: check-label - if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} - runs-on: ubuntu-22.04 - steps: - - name: Checkout base repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: Checkout PR code if running for a PR - if: ${{ github.event_name == 'pull_request_target' }} - uses: actions/checkout@v4 - with: - fetch-depth: 1 - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} - - uses: actions/setup-go@v5 - with: - cache: false - go-version: "1.21" - - env: - OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider - ANTHROPIC_API_KEY: ${{ secrets.SMOKE_ANTHROPIC_API_KEY }} - GPTSCRIPT_CREDENTIAL_OVERRIDE: "github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY" - name: Run smoke test for claude-3-opus-20240229 - run: | - echo "Running smoke test for model claude-3-opus-20240229" - export PATH="$(pwd)/bin:${PATH}" - make smoke - claude-3-5-sonnet-20240620: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json deleted file mode 100644 index 1e924928..00000000 --- a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json +++ /dev/null @@ -1,929 +0,0 @@ -[ - { - "time": "2024-06-28T10:48:16.036954-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:16.037294-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:17.325263-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:17.325584-04:00", - "callContext": { - "id": "1719586098", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:18.342475-04:00", - "callContext": { - "id": "1719586098", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callFinish", - "usage": {}, - "content": "http://127.0.0.1:10961" - }, - { - "time": "2024-06-28T10:48:18.342649-04:00", - "type": "runFinish", - "usage": {} - }, - { - "time": "2024-06-28T10:48:18.342718-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586099", - "usage": {}, - "chatRequest": { - "model": "claude-3-opus-20240229", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-28T10:48:18.343115-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586099", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-28T10:48:21.291497-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586099", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" - }, - { - "time": "2024-06-28T10:48:21.291864-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586099", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "bob", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - } - ], - "usage": {} - } - }, - { - "time": "2024-06-28T10:48:21.291966-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolSubCalls": { - "bob": { - "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\": \"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-06-28T10:48:21.291997-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586097" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\": \"how are you doing\"}" - }, - { - "time": "2024-06-28T10:48:21.50264-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586097" - }, - "type": "callChat", - "chatCompletionId": "1719586100", - "usage": {}, - "chatRequest": { - "model": "claude-3-opus-20240229", - "messages": [ - { - "role": "system", - "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" - }, - { - "role": "user", - "content": "{\"question\": \"how are you doing\"}" - } - ], - "temperature": 0 - } - }, - { - "time": "2024-06-28T10:48:21.503008-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586097" - }, - "type": "callProgress", - "chatCompletionId": "1719586100", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-28T10:48:22.875617-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586097" - }, - "type": "callProgress", - "chatCompletionId": "1719586100", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-28T10:48:22.875964-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586097" - }, - "type": "callChat", - "chatCompletionId": "1719586100", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - } - ], - "usage": {} - } - }, - { - "time": "2024-06-28T10:48:22.876034-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586097" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-28T10:48:22.876064-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-06-28T10:48:23.086894-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586101", - "usage": {}, - "chatRequest": { - "model": "claude-3-opus-20240229", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "bob", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", - "name": "bob", - "tool_call_id": "bob" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-28T10:48:23.087303-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586101", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-28T10:48:25.243408-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586101", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-28T10:48:25.243765-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586101", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - } - ], - "usage": {} - } - }, - { - "time": "2024-06-28T10:48:25.243793-04:00", - "callContext": { - "id": "1719586097", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-28T10:48:25.243836-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json deleted file mode 100644 index 88077e74..00000000 --- a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json +++ /dev/null @@ -1,878 +0,0 @@ -[ - { - "time": "2024-06-28T10:48:25.264658-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:25.264853-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:26.063945-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:26.064323-04:00", - "callContext": { - "id": "1719586107", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-28T10:48:27.081163-04:00", - "callContext": { - "id": "1719586107", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callFinish", - "usage": {}, - "content": "http://127.0.0.1:10315" - }, - { - "time": "2024-06-28T10:48:27.081351-04:00", - "type": "runFinish", - "usage": {} - }, - { - "time": "2024-06-28T10:48:27.081414-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586108", - "usage": {}, - "chatRequest": { - "model": "claude-3-opus-20240229", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-28T10:48:27.081844-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586108", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-28T10:48:30.452648-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586108", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" - }, - { - "time": "2024-06-28T10:48:30.452871-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586108", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "bob", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - } - ], - "usage": {} - } - }, - { - "time": "2024-06-28T10:48:30.452992-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolSubCalls": { - "bob": { - "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\": \"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-06-28T10:48:30.453034-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586106", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\": \"how are you doing\"}" - }, - { - "time": "2024-06-28T10:48:30.453498-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586106", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1719586109", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-06-28T10:48:30.455907-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586106", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callProgress", - "chatCompletionId": "1719586109", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-06-28T10:48:30.45611-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586106", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1719586109", - "usage": {}, - "chatResponse": { - "usage": {} - } - }, - { - "time": "2024-06-28T10:48:30.456161-04:00", - "callContext": { - "id": "bob", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719586106", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-06-28T10:48:30.456194-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-06-28T10:48:30.652688-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586110", - "usage": {}, - "chatRequest": { - "model": "claude-3-opus-20240229", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "bob", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", - "name": "bob", - "tool_call_id": "bob" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-28T10:48:30.652933-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586110", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-28T10:48:33.127339-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719586110", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-28T10:48:33.127696-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1719586110", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - } - ], - "usage": {} - } - }, - { - "time": "2024-06-28T10:48:33.127717-04:00", - "callContext": { - "id": "1719586106", - "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-28T10:48:33.127758-04:00", - "type": "runFinish", - "usage": {} - } -] From ea0184f3b3c6aaff72e868b919fc8c9b657a0395 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Wed, 3 Jul 2024 10:56:35 -0400 Subject: [PATCH 009/270] chore: use laplateforme provider for mistral smoke tests Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 5 +- .../Bob/mistral-large-2402-expected.json | 808 +++++++++++------- .../mistral-large-2402-expected.json | 553 +++++++----- 3 files changed, 839 insertions(+), 527 deletions(-) diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index 0106ed6d..59ff43a2 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -170,8 +170,9 @@ jobs: go-version: "1.21" - env: OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: mistral-large-2402 from https://api.mistral.ai/v1 - GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY: ${{ secrets.SMOKE_GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY }} + GPTSCRIPT_DEFAULT_MODEL: mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider + MISTRAL_API_KEY: ${{ secrets.SMOKE_MISTRAL_API_KEY }} + GPTSCRIPT_CREDENTIAL_OVERRIDE: "github.com/gptscript-ai/mistral-laplateforme-provider/credential:MISTRAL_API_KEY" name: Run smoke test for mistral-large-2402 run: | echo "Running smoke test for model mistral-large-2402" diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json index f7341a7b..8730226a 100644 --- a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json @@ -1,15 +1,15 @@ [ { - "time": "2024-06-20T17:10:39.294578-04:00", + "time": "2024-07-03T10:53:01.406601-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-20T17:10:39.294835-04:00", + "time": "2024-07-03T10:53:01.406888-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -34,17 +34,123 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callStart", "usage": {} }, { - "time": "2024-06-20T17:10:39.501107-04:00", + "time": "2024-07-03T10:53:02.106674-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-07-03T10:53:02.107085-04:00", + "callContext": { + "id": "1720018383", + "tool": { + "name": "Mistral La Plateforme Provider", + "description": "Model provider for Mistral models running on La Plateforme", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-07-03T10:53:03.125117-04:00", + "callContext": { + "id": "1720018383", + "tool": { + "name": "Mistral La Plateforme Provider", + "description": "Model provider for Mistral models running on La Plateforme", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:10244" + }, + { + "time": "2024-07-03T10:53:03.125375-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-07-03T10:53:03.12547-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -69,10 +175,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917841", + "chatCompletionId": "1720018384", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -104,11 +211,11 @@ } }, { - "time": "2024-06-20T17:10:39.501246-04:00", + "time": "2024-07-03T10:53:03.126002-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -133,19 +240,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917841", + "chatCompletionId": "1720018384", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:40.154068-04:00", + "time": "2024-07-03T10:53:03.438634-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -170,18 +278,19 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917841", + "chatCompletionId": "1720018384", "usage": {} }, { - "time": "2024-06-20T17:10:40.786117-04:00", + "time": "2024-07-03T10:53:03.917633-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -206,19 +315,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917841", + "chatCompletionId": "1720018384", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:40.786643-04:00", + "time": "2024-07-03T10:53:03.9181-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -243,14 +353,15 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917841", + "chatCompletionId": "1720018384", "usage": { - "promptTokens": 85, + "promptTokens": 188, "completionTokens": 23, - "totalTokens": 108 + "totalTokens": 211 }, "chatResponse": { "role": "assistant", @@ -258,7 +369,7 @@ { "toolCall": { "index": 0, - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -267,18 +378,18 @@ } ], "usage": { - "promptTokens": 85, + "promptTokens": 188, "completionTokens": 23, - "totalTokens": 108 + "totalTokens": 211 } } }, { - "time": "2024-06-20T17:10:40.78704-04:00", + "time": "2024-07-03T10:53:03.918447-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -303,10 +414,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "toolSubCalls": { - "WWpUyFBHH": { + "IePX3uH5y": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -315,13 +427,13 @@ "usage": {} }, { - "time": "2024-06-20T17:10:40.787133-04:00", + "time": "2024-07-03T10:53:03.918585-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -344,22 +456,23 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callStart", "usage": {}, "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:40.94973-04:00", + "time": "2024-07-03T10:53:04.089188-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -382,12 +495,13 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callChat", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -405,13 +519,13 @@ } }, { - "time": "2024-06-20T17:10:40.950046-04:00", + "time": "2024-07-03T10:53:04.089548-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -434,23 +548,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:41.218814-04:00", + "time": "2024-07-03T10:53:04.287287-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -473,23 +588,23 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", - "usage": {}, - "content": "Thanks" + "chatCompletionId": "1720018385", + "usage": {} }, { - "time": "2024-06-20T17:10:41.218971-04:00", + "time": "2024-07-03T10:53:04.287688-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -512,23 +627,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks" }, { - "time": "2024-06-20T17:10:41.242129-04:00", + "time": "2024-07-03T10:53:04.302879-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -551,23 +667,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for" }, { - "time": "2024-06-20T17:10:41.278815-04:00", + "time": "2024-07-03T10:53:04.323886-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -590,23 +707,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking" }, { - "time": "2024-06-20T17:10:41.298557-04:00", + "time": "2024-07-03T10:53:04.345227-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -629,23 +747,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"" }, { - "time": "2024-06-20T17:10:41.329684-04:00", + "time": "2024-07-03T10:53:04.364182-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -668,23 +787,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how" }, { - "time": "2024-06-20T17:10:41.36213-04:00", + "time": "2024-07-03T10:53:04.387098-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -707,23 +827,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are" }, { - "time": "2024-06-20T17:10:41.404159-04:00", + "time": "2024-07-03T10:53:04.405872-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -746,23 +867,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you" }, { - "time": "2024-06-20T17:10:41.443596-04:00", + "time": "2024-07-03T10:53:04.427749-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -785,23 +907,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing" }, { - "time": "2024-06-20T17:10:41.46035-04:00", + "time": "2024-07-03T10:53:04.448563-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -824,23 +947,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\"," }, { - "time": "2024-06-20T17:10:41.479186-04:00", + "time": "2024-07-03T10:53:04.468026-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -863,23 +987,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I" }, { - "time": "2024-06-20T17:10:41.508921-04:00", + "time": "2024-07-03T10:53:04.490606-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -902,23 +1027,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'" }, { - "time": "2024-06-20T17:10:41.538159-04:00", + "time": "2024-07-03T10:53:04.511171-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -941,23 +1067,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm" }, { - "time": "2024-06-20T17:10:41.578073-04:00", + "time": "2024-07-03T10:53:04.531235-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -980,23 +1107,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing" }, { - "time": "2024-06-20T17:10:41.59766-04:00", + "time": "2024-07-03T10:53:04.553855-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1019,23 +1147,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great" }, { - "time": "2024-06-20T17:10:41.711868-04:00", + "time": "2024-07-03T10:53:04.574503-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1058,23 +1187,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" }, { - "time": "2024-06-20T17:10:41.712039-04:00", + "time": "2024-07-03T10:53:04.598055-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1097,23 +1227,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" }, { - "time": "2024-06-20T17:10:41.712067-04:00", + "time": "2024-07-03T10:53:04.613412-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1136,23 +1267,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" }, { - "time": "2024-06-20T17:10:41.718662-04:00", + "time": "2024-07-03T10:53:04.635897-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1175,23 +1307,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-20T17:10:41.748288-04:00", + "time": "2024-07-03T10:53:04.656116-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1214,23 +1347,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:41.78608-04:00", + "time": "2024-07-03T10:53:04.680962-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1253,23 +1387,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:41.786165-04:00", + "time": "2024-07-03T10:53:04.681447-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1292,16 +1427,17 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callChat", - "chatCompletionId": "1718917842", + "chatCompletionId": "1720018385", "usage": { - "promptTokens": 41, + "promptTokens": 143, "completionTokens": 19, - "totalTokens": 60 + "totalTokens": 162 }, "chatResponse": { "role": "assistant", @@ -1311,20 +1447,20 @@ } ], "usage": { - "promptTokens": 41, + "promptTokens": 143, "completionTokens": 19, - "totalTokens": 60 + "totalTokens": 162 } } }, { - "time": "2024-06-20T17:10:41.7862-04:00", + "time": "2024-07-03T10:53:04.681598-04:00", "callContext": { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -1347,20 +1483,21 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840" + "parentID": "1720018382" }, "type": "callFinish", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:41.78624-04:00", + "time": "2024-07-03T10:53:04.681725-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1385,6 +1522,7 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "toolResults": 1, @@ -1392,11 +1530,11 @@ "usage": {} }, { - "time": "2024-06-20T17:10:41.957577-04:00", + "time": "2024-07-03T10:53:04.842182-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1421,10 +1559,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -1438,7 +1577,7 @@ "content": "", "tool_calls": [ { - "id": "WWpUyFBHH", + "id": "IePX3uH5y", "type": "function", "function": { "name": "bob", @@ -1451,7 +1590,7 @@ "role": "tool", "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", "name": "bob", - "tool_call_id": "WWpUyFBHH" + "tool_call_id": "IePX3uH5y" } ], "temperature": 0, @@ -1476,11 +1615,11 @@ } }, { - "time": "2024-06-20T17:10:41.957749-04:00", + "time": "2024-07-03T10:53:04.842685-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1505,19 +1644,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:42.455931-04:00", + "time": "2024-07-03T10:53:05.384131-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1542,19 +1682,19 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", - "usage": {}, - "content": "Bob" + "chatCompletionId": "1720018386", + "usage": {} }, { - "time": "2024-06-20T17:10:42.45599-04:00", + "time": "2024-07-03T10:53:05.384657-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1579,19 +1719,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied" + "content": "Bob" }, { - "time": "2024-06-20T17:10:42.456033-04:00", + "time": "2024-07-03T10:53:05.404612-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1616,19 +1757,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied" + "content": "Bob said" }, { - "time": "2024-06-20T17:10:42.475053-04:00", + "time": "2024-07-03T10:53:05.430068-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1653,19 +1795,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied," + "content": "Bob said," }, { - "time": "2024-06-20T17:10:42.534667-04:00", + "time": "2024-07-03T10:53:05.452982-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1690,19 +1833,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"" + "content": "Bob said, \"" }, { - "time": "2024-06-20T17:10:42.594649-04:00", + "time": "2024-07-03T10:53:05.501525-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1727,19 +1871,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks" + "content": "Bob said, \"Thanks for" }, { - "time": "2024-06-20T17:10:42.653279-04:00", + "time": "2024-07-03T10:53:05.50179-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1764,19 +1909,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for" + "content": "Bob said, \"Thanks for" }, { - "time": "2024-06-20T17:10:42.760299-04:00", + "time": "2024-07-03T10:53:05.532152-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1801,19 +1947,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking" + "content": "Bob said, \"Thanks for asking" }, { - "time": "2024-06-20T17:10:42.774637-04:00", + "time": "2024-07-03T10:53:05.553295-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1838,19 +1985,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking '" + "content": "Bob said, \"Thanks for asking '" }, { - "time": "2024-06-20T17:10:42.835456-04:00", + "time": "2024-07-03T10:53:05.576012-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1875,19 +2023,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how" + "content": "Bob said, \"Thanks for asking 'how" }, { - "time": "2024-06-20T17:10:42.889942-04:00", + "time": "2024-07-03T10:53:05.602885-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1912,19 +2061,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are" + "content": "Bob said, \"Thanks for asking 'how are" }, { - "time": "2024-06-20T17:10:42.951997-04:00", + "time": "2024-07-03T10:53:05.624751-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1949,19 +2099,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you" + "content": "Bob said, \"Thanks for asking 'how are you" }, { - "time": "2024-06-20T17:10:43.009024-04:00", + "time": "2024-07-03T10:53:05.649543-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1986,19 +2137,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing" + "content": "Bob said, \"Thanks for asking 'how are you doing" }, { - "time": "2024-06-20T17:10:43.072963-04:00", + "time": "2024-07-03T10:53:05.674199-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2023,19 +2175,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing'," + "content": "Bob said, \"Thanks for asking 'how are you doing'," }, { - "time": "2024-06-20T17:10:43.129687-04:00", + "time": "2024-07-03T10:53:05.697398-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2060,19 +2213,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I" + "content": "Bob said, \"Thanks for asking 'how are you doing', I" }, { - "time": "2024-06-20T17:10:43.168387-04:00", + "time": "2024-07-03T10:53:05.725234-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2097,19 +2251,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'" }, { - "time": "2024-06-20T17:10:43.286476-04:00", + "time": "2024-07-03T10:53:05.747687-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2134,19 +2289,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm" }, { - "time": "2024-06-20T17:10:43.286589-04:00", + "time": "2024-07-03T10:53:05.773761-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2171,19 +2327,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing" }, { - "time": "2024-06-20T17:10:43.286632-04:00", + "time": "2024-07-03T10:53:05.798352-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2208,19 +2365,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great" }, { - "time": "2024-06-20T17:10:43.323232-04:00", + "time": "2024-07-03T10:53:05.823105-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2245,19 +2403,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow" }, { - "time": "2024-06-20T17:10:43.3614-04:00", + "time": "2024-07-03T10:53:05.846373-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2282,19 +2441,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly" }, { - "time": "2024-06-20T17:10:43.402971-04:00", + "time": "2024-07-03T10:53:05.871647-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2319,19 +2479,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI" }, { - "time": "2024-06-20T17:10:43.442811-04:00", + "time": "2024-07-03T10:53:05.895603-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2356,19 +2517,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-20T17:10:43.477932-04:00", + "time": "2024-07-03T10:53:05.919754-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2393,19 +2555,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-20T17:10:43.524916-04:00", + "time": "2024-07-03T10:53:05.956244-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2430,19 +2593,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-20T17:10:43.525171-04:00", + "time": "2024-07-03T10:53:05.956647-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2467,35 +2631,36 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917843", + "chatCompletionId": "1720018386", "usage": { - "promptTokens": 143, + "promptTokens": 246, "completionTokens": 23, - "totalTokens": 166 + "totalTokens": 269 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "text": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" } ], "usage": { - "promptTokens": 143, + "promptTokens": 246, "completionTokens": 23, - "totalTokens": 166 + "totalTokens": 269 } } }, { - "time": "2024-06-20T17:10:43.525284-04:00", + "time": "2024-07-03T10:53:05.956695-04:00", "callContext": { - "id": "1718917840", + "id": "1720018382", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -2520,14 +2685,15 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callFinish", "usage": {}, - "content": "Bob replied, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-20T17:10:43.525327-04:00", + "time": "2024-07-03T10:53:05.956743-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json index b3e54dc4..43df5fa7 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json @@ -1,15 +1,15 @@ [ { - "time": "2024-06-20T17:10:43.55694-04:00", + "time": "2024-07-03T10:53:05.993864-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-20T17:10:43.557263-04:00", + "time": "2024-07-03T10:53:05.99419-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -34,17 +34,123 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callStart", "usage": {} }, { - "time": "2024-06-20T17:10:43.805494-04:00", + "time": "2024-07-03T10:53:06.366435-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-07-03T10:53:06.366952-04:00", + "callContext": { + "id": "1720018387", + "tool": { + "name": "Mistral La Plateforme Provider", + "description": "Model provider for Mistral models running on La Plateforme", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-07-03T10:53:07.383203-04:00", + "callContext": { + "id": "1720018387", + "tool": { + "name": "Mistral La Plateforme Provider", + "description": "Model provider for Mistral models running on La Plateforme", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + } + ] + }, + "localTools": { + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:10798" + }, + { + "time": "2024-07-03T10:53:07.383553-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-07-03T10:53:07.383621-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -69,10 +175,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917845", + "chatCompletionId": "1720018388", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -104,11 +211,11 @@ } }, { - "time": "2024-06-20T17:10:43.805682-04:00", + "time": "2024-07-03T10:53:07.384109-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -133,19 +240,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917845", + "chatCompletionId": "1720018388", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:44.078279-04:00", + "time": "2024-07-03T10:53:07.670442-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -170,18 +278,19 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917845", + "chatCompletionId": "1720018388", "usage": {} }, { - "time": "2024-06-20T17:10:44.913816-04:00", + "time": "2024-07-03T10:53:08.283965-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -206,19 +315,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917845", + "chatCompletionId": "1720018388", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:44.914845-04:00", + "time": "2024-07-03T10:53:08.284275-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -243,14 +353,15 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917845", + "chatCompletionId": "1720018388", "usage": { - "promptTokens": 85, + "promptTokens": 188, "completionTokens": 23, - "totalTokens": 108 + "totalTokens": 211 }, "chatResponse": { "role": "assistant", @@ -258,7 +369,7 @@ { "toolCall": { "index": 0, - "id": "UPthtEfla", + "id": "K9FVJPqm6", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -267,18 +378,18 @@ } ], "usage": { - "promptTokens": 85, + "promptTokens": 188, "completionTokens": 23, - "totalTokens": 108 + "totalTokens": 211 } } }, { - "time": "2024-06-20T17:10:44.915086-04:00", + "time": "2024-07-03T10:53:08.285881-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -303,10 +414,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "toolSubCalls": { - "UPthtEfla": { + "K9FVJPqm6": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -315,13 +427,13 @@ "usage": {} }, { - "time": "2024-06-20T17:10:44.915147-04:00", + "time": "2024-07-03T10:53:08.286454-04:00", "callContext": { - "id": "UPthtEfla", + "id": "K9FVJPqm6", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -344,9 +456,10 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917844", + "parentID": "1720018386", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -354,13 +467,13 @@ "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:44.916248-04:00", + "time": "2024-07-03T10:53:08.287602-04:00", "callContext": { - "id": "UPthtEfla", + "id": "K9FVJPqm6", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -383,13 +496,14 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917844", + "parentID": "1720018386", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1718917846", + "chatCompletionId": "1720018389", "usage": {}, "chatRequest": { "model": "", @@ -397,13 +511,13 @@ } }, { - "time": "2024-06-20T17:10:44.92245-04:00", + "time": "2024-07-03T10:53:08.295318-04:00", "callContext": { - "id": "UPthtEfla", + "id": "K9FVJPqm6", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -426,24 +540,25 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917844", + "parentID": "1720018386", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callProgress", - "chatCompletionId": "1718917846", + "chatCompletionId": "1720018389", "usage": {}, "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-20T17:10:44.922905-04:00", + "time": "2024-07-03T10:53:08.295753-04:00", "callContext": { - "id": "UPthtEfla", + "id": "K9FVJPqm6", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -466,26 +581,27 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917844", + "parentID": "1720018386", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1718917846", + "chatCompletionId": "1720018389", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-06-20T17:10:44.922998-04:00", + "time": "2024-07-03T10:53:08.29586-04:00", "callContext": { - "id": "UPthtEfla", + "id": "K9FVJPqm6", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "arguments": { "properties": { @@ -508,9 +624,10 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917844", + "parentID": "1720018386", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -518,11 +635,11 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-20T17:10:44.92306-04:00", + "time": "2024-07-03T10:53:08.295955-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -547,6 +664,7 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "toolResults": 1, @@ -554,11 +672,11 @@ "usage": {} }, { - "time": "2024-06-20T17:10:45.091313-04:00", + "time": "2024-07-03T10:53:08.467884-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -583,10 +701,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -600,7 +719,7 @@ "content": "", "tool_calls": [ { - "id": "UPthtEfla", + "id": "K9FVJPqm6", "type": "function", "function": { "name": "bob", @@ -613,7 +732,7 @@ "role": "tool", "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "UPthtEfla" + "tool_call_id": "K9FVJPqm6" } ], "temperature": 0, @@ -638,11 +757,11 @@ } }, { - "time": "2024-06-20T17:10:45.091762-04:00", + "time": "2024-07-03T10:53:08.468218-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -667,19 +786,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:45.427766-04:00", + "time": "2024-07-03T10:53:08.755588-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -704,19 +824,19 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", - "usage": {}, - "content": "Bob" + "chatCompletionId": "1720018390", + "usage": {} }, { - "time": "2024-06-20T17:10:45.427886-04:00", + "time": "2024-07-03T10:53:08.756052-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -741,19 +861,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied" + "content": "Bob" }, { - "time": "2024-06-20T17:10:45.427938-04:00", + "time": "2024-07-03T10:53:08.792872-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -778,19 +899,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied," + "content": "Bob said" }, { - "time": "2024-06-20T17:10:45.427998-04:00", + "time": "2024-07-03T10:53:08.843651-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -815,19 +937,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"" + "content": "Bob said," }, { - "time": "2024-06-20T17:10:45.428017-04:00", + "time": "2024-07-03T10:53:08.890513-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -852,19 +975,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"" + "content": "Bob said, \"" }, { - "time": "2024-06-20T17:10:45.428046-04:00", + "time": "2024-07-03T10:53:08.940592-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -889,19 +1013,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks" + "content": "Bob said, \"Thanks" }, { - "time": "2024-06-20T17:10:45.434307-04:00", + "time": "2024-07-03T10:53:08.979075-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -926,19 +1051,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for" + "content": "Bob said, \"Thanks for" }, { - "time": "2024-06-20T17:10:45.462196-04:00", + "time": "2024-07-03T10:53:09.026193-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -963,19 +1089,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking" + "content": "Bob said, \"Thanks for asking" }, { - "time": "2024-06-20T17:10:45.496748-04:00", + "time": "2024-07-03T10:53:09.082478-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1000,19 +1127,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how" + "content": "Bob said, \"Thanks for asking how" }, { - "time": "2024-06-20T17:10:45.516868-04:00", + "time": "2024-07-03T10:53:09.120629-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1037,19 +1165,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are" + "content": "Bob said, \"Thanks for asking how are" }, { - "time": "2024-06-20T17:10:45.545808-04:00", + "time": "2024-07-03T10:53:09.169561-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1074,19 +1203,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you" + "content": "Bob said, \"Thanks for asking how are you" }, { - "time": "2024-06-20T17:10:45.572091-04:00", + "time": "2024-07-03T10:53:09.216601-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1111,19 +1241,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing" + "content": "Bob said, \"Thanks for asking how are you doing" }, { - "time": "2024-06-20T17:10:45.602668-04:00", + "time": "2024-07-03T10:53:09.261113-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1148,19 +1279,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing," + "content": "Bob said, \"Thanks for asking how are you doing," }, { - "time": "2024-06-20T17:10:45.627618-04:00", + "time": "2024-07-03T10:53:09.306429-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1185,19 +1317,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I" + "content": "Bob said, \"Thanks for asking how are you doing, I" }, { - "time": "2024-06-20T17:10:45.659445-04:00", + "time": "2024-07-03T10:53:09.354951-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1222,19 +1355,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'" + "content": "Bob said, \"Thanks for asking how are you doing, I'" }, { - "time": "2024-06-20T17:10:45.682109-04:00", + "time": "2024-07-03T10:53:09.401888-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1259,19 +1393,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm" + "content": "Bob said, \"Thanks for asking how are you doing, I'm" }, { - "time": "2024-06-20T17:10:45.710817-04:00", + "time": "2024-07-03T10:53:09.448467-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1296,19 +1431,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing" }, { - "time": "2024-06-20T17:10:45.743363-04:00", + "time": "2024-07-03T10:53:09.493586-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1333,19 +1469,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great" }, { - "time": "2024-06-20T17:10:45.766321-04:00", + "time": "2024-07-03T10:53:09.537745-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1370,19 +1507,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow" }, { - "time": "2024-06-20T17:10:45.801585-04:00", + "time": "2024-07-03T10:53:09.585266-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1407,19 +1545,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly" }, { - "time": "2024-06-20T17:10:46.066262-04:00", + "time": "2024-07-03T10:53:09.63097-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1444,19 +1583,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI" }, { - "time": "2024-06-20T17:10:46.093598-04:00", + "time": "2024-07-03T10:53:09.678117-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1481,19 +1621,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" }, { - "time": "2024-06-20T17:10:46.118737-04:00", + "time": "2024-07-03T10:53:09.726398-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1518,19 +1659,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-20T17:10:46.146712-04:00", + "time": "2024-07-03T10:53:09.773449-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1555,19 +1697,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-20T17:10:46.146789-04:00", + "time": "2024-07-03T10:53:09.774342-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1592,35 +1735,36 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917847", + "chatCompletionId": "1720018390", "usage": { - "promptTokens": 144, + "promptTokens": 247, "completionTokens": 22, - "totalTokens": 166 + "totalTokens": 269 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "text": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" } ], "usage": { - "promptTokens": 144, + "promptTokens": 247, "completionTokens": 22, - "totalTokens": 166 + "totalTokens": 269 } } }, { - "time": "2024-06-20T17:10:46.146827-04:00", + "time": "2024-07-03T10:53:09.774431-04:00", "callContext": { - "id": "1718917844", + "id": "1720018386", "tool": { - "modelName": "mistral-large-2402 from https://api.mistral.ai/v1", + "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" @@ -1645,14 +1789,15 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callFinish", "usage": {}, - "content": "Bob replied, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-06-20T17:10:46.146844-04:00", + "time": "2024-07-03T10:53:09.774496-04:00", "type": "runFinish", "usage": {} } From 753f5aa57bfc4e9623861ff45313a66a362ef2d2 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 3 Jul 2024 11:35:27 -0400 Subject: [PATCH 010/270] enhance: add simple integration test (#606) Signed-off-by: Grant Linville --- .github/workflows/integration.yaml | 40 ++++++++++++++++++++++++++++++ Makefile | 6 ++++- integration/cred_test.go | 13 ++++++++++ integration/helpers.go | 16 ++++++++++++ 4 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/integration.yaml create mode 100644 integration/cred_test.go create mode 100644 integration/helpers.go diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml new file mode 100644 index 00000000..e0b78be2 --- /dev/null +++ b/.github/workflows/integration.yaml @@ -0,0 +1,40 @@ +name: integration + +on: + push: + branches: + - main + paths-ignore: + - docs/** + - tools/** + - README.md + pull_request: + branches: + - main + paths-ignore: + - docs/** + - tools/** + - README.md + +jobs: + integration: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04, windows-latest] + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 1 + - uses: actions/setup-go@v5 + with: + cache: false + go-version: "1.22" + - name: Build + if: matrix.os == 'ubuntu-22.04' + run: make build + - name: build-windows + if: matrix.os == 'windows-latest' + run: make build-exe + - name: Run Integration Tests + run: make integration \ No newline at end of file diff --git a/Makefile b/Makefile index 0fed6344..e0d93a1d 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,11 @@ tidy: go mod tidy test: - go test -v ./... + go test -v ./pkg/... + +.PHONY: integration +integration: + go test -v ./integration/... smoke: build smoke: diff --git a/integration/cred_test.go b/integration/cred_test.go new file mode 100644 index 00000000..b92ccd55 --- /dev/null +++ b/integration/cred_test.go @@ -0,0 +1,13 @@ +package integration + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGPTScriptCredential(t *testing.T) { + out, err := GPTScriptExec("cred") + require.NoError(t, err) + require.Contains(t, out, "CREDENTIAL") +} diff --git a/integration/helpers.go b/integration/helpers.go new file mode 100644 index 00000000..8af581c3 --- /dev/null +++ b/integration/helpers.go @@ -0,0 +1,16 @@ +package integration + +import ( + "os/exec" + "runtime" +) + +func GPTScriptExec(args ...string) (string, error) { + cmd := exec.Command("../bin/gptscript", args...) + if runtime.GOOS == "windows" { + cmd = exec.Command("..\\bin\\gptscript.exe", args...) + } + + out, err := cmd.CombinedOutput() + return string(out), err +} From 1104843ccc7c571568756e8468d7607d546fb467 Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Wed, 3 Jul 2024 11:29:02 -0700 Subject: [PATCH 011/270] Fix: Try to send interupt to sys.daemon to allow cleanup Signed-off-by: Daishan Peng --- pkg/daemon/daemon.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index 58afbf98..bce6ec43 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -5,6 +5,7 @@ import ( "io" "os" "os/exec" + "runtime" ) func SysDaemon() error { @@ -19,5 +20,11 @@ func SysDaemon() error { cmd := exec.CommandContext(ctx, os.Args[2], os.Args[3:]...) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout + cmd.Cancel = func() error { + if runtime.GOOS == "windows" { + return cmd.Process.Kill() + } + return cmd.Process.Signal(os.Interrupt) + } return cmd.Run() } From faecb7c65cda2c5d093724932e4d5d4c725567fd Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Wed, 3 Jul 2024 12:12:22 -0700 Subject: [PATCH 012/270] Revert "Fix: Try to send interupt to sys.daemon to allow cleanup" This reverts commit 1104843ccc7c571568756e8468d7607d546fb467. --- pkg/daemon/daemon.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index bce6ec43..58afbf98 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -5,7 +5,6 @@ import ( "io" "os" "os/exec" - "runtime" ) func SysDaemon() error { @@ -20,11 +19,5 @@ func SysDaemon() error { cmd := exec.CommandContext(ctx, os.Args[2], os.Args[3:]...) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout - cmd.Cancel = func() error { - if runtime.GOOS == "windows" { - return cmd.Process.Kill() - } - return cmd.Process.Signal(os.Interrupt) - } return cmd.Run() } From e4d174b9afb232bac243a7eea7be95b28b226a28 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 3 Jul 2024 16:33:49 -0400 Subject: [PATCH 013/270] chore: fix Go version to 1.22.4 There is a change in 1.22.5 that breaks sys.daemon on Windows. Fixing this version for now until we address this issue. Signed-off-by: Donnie Adams --- .github/workflows/main.yaml | 4 +++- .github/workflows/release.yaml | 4 +++- go.mod | 3 ++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 6b469c39..6ea47e8f 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -26,7 +26,9 @@ jobs: uses: actions/setup-go@v5 with: cache: false - go-version: "1.22" + # This can't be upgraded until the issue with sys.daemon on Windows is resolved + # After the issue is resolved, this can be set to 1.22 + go-version: "1.22.4" - name: Run GoReleaser uses: goreleaser/goreleaser-action@v4 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 18871150..883a479d 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -27,7 +27,9 @@ jobs: uses: actions/setup-go@v5 with: cache: false - go-version: "1.22" + # This can't be upgraded until the issue with sys.daemon on Windows is resolved + # After the issue is resolved, this can be set to 1.22 + go-version: "1.22.4" - name: Run GoReleaser uses: goreleaser/goreleaser-action@v4 with: diff --git a/go.mod b/go.mod index 36106b3f..fe1b3fa6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,7 @@ module github.com/gptscript-ai/gptscript -go 1.22.3 +// This can't be upgraded until the issue with sys.daemon on Windows is resolved +go 1.22.4 require ( github.com/AlecAivazis/survey/v2 v2.3.7 From b1a31ac53223ccca819cea9e060f4075df8bb115 Mon Sep 17 00:00:00 2001 From: Atulpriya Sharma Date: Thu, 4 Jul 2024 16:03:05 +0530 Subject: [PATCH 014/270] Add GPTReview Example --- examples/gptreview.gpt | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 examples/gptreview.gpt diff --git a/examples/gptreview.gpt b/examples/gptreview.gpt new file mode 100644 index 00000000..2176c89a --- /dev/null +++ b/examples/gptreview.gpt @@ -0,0 +1,28 @@ + Name: Code Reviewer + Description: A tool to help you perform code review of open PRs + Context: learn-gh + Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write + Args: PR_URL: The GitHub PR_URL + chat:true + + You have the gh cli available to you. Use it to perform code review for a pr. + + Perform the following steps in order: + 1. Ask the user for the ($PR_URL) and save it. + 2. Identify the files changed in the pull request ($PR_URL) using the pr number and perform a diff. + 1. Analyze the complete code of each identified file and perform a detailed line by line code review. + 2. Repeat the process for each changed file in the pr. + 3. Share your review comments separately for each file. + 4. In a new line write "Code: Approved" or "Code: Require Changes" based on the review. + --- + Name: learn-gh + Description: A tool to help you learn gh cli + + #!/usr/bin/env bash + + echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicated --sort flag." + gh --help + gh repo --help + gh pr --help + gh pr checkout --help + gh pr diff --help \ No newline at end of file From e7193ad8872d8b1dc49d7b972f94dcfb84a7d63f Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 8 Jul 2024 10:57:52 -0400 Subject: [PATCH 015/270] Revert "Revert "Fix: Try to send interupt to sys.daemon to allow cleanup"" --- pkg/daemon/daemon.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index 58afbf98..bce6ec43 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -5,6 +5,7 @@ import ( "io" "os" "os/exec" + "runtime" ) func SysDaemon() error { @@ -19,5 +20,11 @@ func SysDaemon() error { cmd := exec.CommandContext(ctx, os.Args[2], os.Args[3:]...) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout + cmd.Cancel = func() error { + if runtime.GOOS == "windows" { + return cmd.Process.Kill() + } + return cmd.Process.Signal(os.Interrupt) + } return cmd.Run() } From b6442cdd23770049e7031ec62d14c7dd20321419 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Tue, 9 Jul 2024 16:05:31 -0700 Subject: [PATCH 016/270] chore: remove references to tap in favor of homebrew-core Signed-off-by: Taylor Price --- .goreleaser.yml | 13 ------------- README.md | 2 +- docs/docs/01-overview.md | 2 +- 3 files changed, 2 insertions(+), 15 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index b04bb8ea..d76c2e2e 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -50,16 +50,3 @@ release: owner: gptscript-ai name: gptscript prerelease: auto - -brews: - - description: "GPTScript CLI" - install: | - bin.install "gptscript" - generate_completions_from_executable(bin/"gptscript", "completion", shells: [:bash, :zsh, :fish]) - homepage: "https://github.com/gptscript-ai/gptscript" - skip_upload: false - folder: "Formula" - repository: - owner: gptscript-ai - name: homebrew-tap - token: "{{ .Env.GH_PROJECT_TOKEN }}" diff --git a/README.md b/README.md index 65cc37e4..1da91c90 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Here are some sample use cases of GPTScript: ### Getting started MacOS and Linux (Homebrew): ``` -brew install gptscript-ai/tap/gptscript +brew install gptscript gptscript github.com/gptscript-ai/llm-basics-demo ``` diff --git a/docs/docs/01-overview.md b/docs/docs/01-overview.md index a3e71857..5ba2e7d1 100644 --- a/docs/docs/01-overview.md +++ b/docs/docs/01-overview.md @@ -22,7 +22,7 @@ Here are some sample use cases of GPTScript: ```shell - brew install gptscript-ai/tap/gptscript + brew install gptscript gptscript github.com/gptscript-ai/llm-basics-demo ``` From 476f4ae22ecaa6cdd101b12f74fce7b47cc46cc4 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 10 Jul 2024 16:52:09 -0700 Subject: [PATCH 017/270] chore: add support for tools with only a single agent line and nothing else --- pkg/engine/engine.go | 5 + pkg/input/input.go | 6 +- pkg/parser/parser.go | 1 + pkg/tests/runner_test.go | 20 +++ .../testdata/TestAgentOnly/call1-resp.golden | 16 ++ pkg/tests/testdata/TestAgentOnly/call1.golden | 42 +++++ .../testdata/TestAgentOnly/call2-resp.golden | 9 + pkg/tests/testdata/TestAgentOnly/call2.golden | 57 ++++++ pkg/tests/testdata/TestAgentOnly/step1.golden | 168 ++++++++++++++++++ pkg/tests/testdata/TestAgentOnly/test.gpt | 20 +++ pkg/types/tool.go | 4 + 11 files changed, 346 insertions(+), 2 deletions(-) create mode 100644 pkg/tests/testdata/TestAgentOnly/call1-resp.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/call1.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/call2-resp.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/call2.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/step1.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/test.gpt diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index b0a6e4eb..43c1da99 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -207,6 +207,11 @@ func NewContext(ctx context.Context, prg *types.Program, input string) (Context, } callCtx.AgentGroup = agentGroup + + if callCtx.Tool.IsAgentsOnly() && len(callCtx.AgentGroup) > 0 { + callCtx.Tool = callCtx.Program.ToolSet[callCtx.AgentGroup[0].ToolID] + } + return callCtx, nil } diff --git a/pkg/input/input.go b/pkg/input/input.go index 0037fa5e..3d480431 100644 --- a/pkg/input/input.go +++ b/pkg/input/input.go @@ -3,10 +3,12 @@ package input import ( "fmt" "io" + "io/fs" "os" "path/filepath" "strings" + "github.com/gptscript-ai/gptscript/internal" "github.com/gptscript-ai/gptscript/pkg/loader" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -33,7 +35,7 @@ func FromFile(file string) (string, error) { } return string(data), nil } else if file != "" { - if s, err := os.Stat(file); err == nil && s.IsDir() { + if s, err := fs.Stat(internal.FS, file); err == nil && s.IsDir() { for _, ext := range types.DefaultFiles { if _, err := os.Stat(filepath.Join(file, ext)); err == nil { file = filepath.Join(file, ext) @@ -42,7 +44,7 @@ func FromFile(file string) (string, error) { } } log.Debugf("reading file %s", file) - data, err := os.ReadFile(file) + data, err := fs.ReadFile(internal.FS, file) if err != nil { return "", fmt.Errorf("reading %s: %w", file, err) } diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index b998320b..22cd5e9e 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -199,6 +199,7 @@ func (c *context) finish(tools *[]Node) { len(c.tool.GlobalTools) > 0 || len(c.tool.ExportInputFilters) > 0 || len(c.tool.ExportOutputFilters) > 0 || + len(c.tool.Agents) > 0 || c.tool.Chat { *tools = append(*tools, Node{ ToolNode: &ToolNode{ diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 6efe4a11..0b75c8a2 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -800,6 +800,26 @@ func TestExport(t *testing.T) { assert.Equal(t, "TEST RESULT CALL: 3", x) } +func TestAgentOnly(t *testing.T) { + r := tester.NewRunner(t) + + prg, err := r.Load("") + require.NoError(t, err) + + r.RespondWith(tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "agent2", + Arguments: "Agent 2 input", + }, + }) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) +} func TestAgents(t *testing.T) { r := tester.NewRunner(t) diff --git a/pkg/tests/testdata/TestAgentOnly/call1-resp.golden b/pkg/tests/testdata/TestAgentOnly/call1-resp.golden new file mode 100644 index 00000000..ea865122 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call1-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "agent2", + "arguments": "Agent 2 input" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestAgentOnly/call1.golden b/pkg/tests/testdata/TestAgentOnly/call1.golden new file mode 100644 index 00000000..b63c6fd3 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call1.golden @@ -0,0 +1,42 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "name": "agent2", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestAgentOnly/call2-resp.golden b/pkg/tests/testdata/TestAgentOnly/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestAgentOnly/call2.golden b/pkg/tests/testdata/TestAgentOnly/call2.golden new file mode 100644 index 00000000..82f95523 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call2.golden @@ -0,0 +1,57 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent1", + "name": "agent1", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent3", + "name": "agent3", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent2" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Agent 2 input" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestAgentOnly/step1.golden b/pkg/tests/testdata/TestAgentOnly/step1.golden new file mode 100644 index 00000000..662dbf04 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/step1.golden @@ -0,0 +1,168 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "state": { + "continuation": { + "state": { + "input": "Input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "name": "agent2", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "agent2", + "arguments": "Agent 2 input" + } + } + } + ], + "usage": {} + } + ], + "chat": true + }, + "pending": { + "call_1": { + "index": 0, + "id": "call_1", + "function": { + "name": "agent2", + "arguments": "Agent 2 input" + } + } + } + }, + "calls": { + "call_1": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "input": "Agent 2 input" + } + } + }, + "subCalls": [ + { + "toolId": "testdata/TestAgentOnly/test.gpt:agent2", + "callId": "call_1", + "state": { + "continuation": { + "state": { + "input": "Agent 2 input", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent1", + "name": "agent1", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent3", + "name": "agent3", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent2" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Agent 2 input" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "testdata/TestAgentOnly/test.gpt:agent2" + } + } + ], + "subCallID": "call_1" + } +}` diff --git a/pkg/tests/testdata/TestAgentOnly/test.gpt b/pkg/tests/testdata/TestAgentOnly/test.gpt new file mode 100644 index 00000000..f2ae9f9e --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/test.gpt @@ -0,0 +1,20 @@ +agents: agent1, agent2 + +---- +name: agent1 +chat: true + +I am agent1 + +---- +name: agent2 +chat: true +agents: agent3 + +I am agent2 + +--- +name: agent3 +chat: true + +I am agent3 \ No newline at end of file diff --git a/pkg/types/tool.go b/pkg/types/tool.go index b0af5183..e4b3424a 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -763,6 +763,10 @@ func (t Tool) IsOpenAPI() bool { return strings.HasPrefix(t.Instructions, OpenAPIPrefix) } +func (t Tool) IsAgentsOnly() bool { + return t.IsNoop() && len(t.Context) == 0 +} + func (t Tool) IsEcho() bool { return strings.HasPrefix(t.Instructions, EchoPrefix) } From f773577c977feb3da163dbac7f1921df25211aa6 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Thu, 11 Jul 2024 10:37:26 -0700 Subject: [PATCH 018/270] chore: maintain homebrew tap as well as homebrew core formulas Signed-off-by: Taylor Price --- .github/workflows/release.yaml | 10 ++ .goreleaser.yml | 7 +- docs/docs/01-overview.md | 11 ++ examples/gptreview.gpt | 28 +++ pkg/engine/engine.go | 5 + pkg/input/input.go | 6 +- pkg/parser/parser.go | 1 + pkg/tests/runner_test.go | 20 +++ .../testdata/TestAgentOnly/call1-resp.golden | 16 ++ pkg/tests/testdata/TestAgentOnly/call1.golden | 42 +++++ .../testdata/TestAgentOnly/call2-resp.golden | 9 + pkg/tests/testdata/TestAgentOnly/call2.golden | 57 ++++++ pkg/tests/testdata/TestAgentOnly/step1.golden | 168 ++++++++++++++++++ pkg/tests/testdata/TestAgentOnly/test.gpt | 20 +++ pkg/types/tool.go | 4 + 15 files changed, 399 insertions(+), 5 deletions(-) create mode 100644 examples/gptreview.gpt create mode 100644 pkg/tests/testdata/TestAgentOnly/call1-resp.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/call1.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/call2-resp.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/call2.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/step1.golden create mode 100644 pkg/tests/testdata/TestAgentOnly/test.gpt diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 883a479d..d09608e7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -40,6 +40,16 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_PROJECT_TOKEN: ${{ secrets.GH_PROJECT_TOKEN }} GORELEASER_CURRENT_TAG: ${{ github.ref_name }} + homebrew-release: + needs: release-tag + if: "! contains(github.ref_name, '-rc')" + runs-on: ubuntu-latest + steps: + - name: Update Homebrew formula + uses: dawidd6/action-homebrew-bump-formula@v3 + with: + token: ${{secrets.BREW_GH_TOKEN}} + formula: gptscript winget-release: needs: release-tag if: "! contains(github.ref_name, '-rc')" diff --git a/.goreleaser.yml b/.goreleaser.yml index b04bb8ea..3f767be0 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -52,13 +52,14 @@ release: prerelease: auto brews: - - description: "GPTScript CLI" + - name: gptscript + description: "GPTScript CLI" install: | bin.install "gptscript" - generate_completions_from_executable(bin/"gptscript", "completion", shells: [:bash, :zsh, :fish]) + generate_completions_from_executable(bin/"gptscript", "completion") homepage: "https://github.com/gptscript-ai/gptscript" skip_upload: false - folder: "Formula" + directory: "Formula" repository: owner: gptscript-ai name: homebrew-tap diff --git a/docs/docs/01-overview.md b/docs/docs/01-overview.md index a3e71857..b3d5b9fc 100644 --- a/docs/docs/01-overview.md +++ b/docs/docs/01-overview.md @@ -21,10 +21,21 @@ Here are some sample use cases of GPTScript: + ## Homebrew Tap +___ ```shell brew install gptscript-ai/tap/gptscript gptscript github.com/gptscript-ai/llm-basics-demo ``` + ## Homebrew +___ +:::warning +The [formula in homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/g/gptscript.rb) might be slightly outdated. Use our homebrew tap to always get the latest updates. +::: + ``` + brew install gptscript + gptscript github.com/gptscript-ai/llm-basics-demo + ``` ```shell diff --git a/examples/gptreview.gpt b/examples/gptreview.gpt new file mode 100644 index 00000000..2176c89a --- /dev/null +++ b/examples/gptreview.gpt @@ -0,0 +1,28 @@ + Name: Code Reviewer + Description: A tool to help you perform code review of open PRs + Context: learn-gh + Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write + Args: PR_URL: The GitHub PR_URL + chat:true + + You have the gh cli available to you. Use it to perform code review for a pr. + + Perform the following steps in order: + 1. Ask the user for the ($PR_URL) and save it. + 2. Identify the files changed in the pull request ($PR_URL) using the pr number and perform a diff. + 1. Analyze the complete code of each identified file and perform a detailed line by line code review. + 2. Repeat the process for each changed file in the pr. + 3. Share your review comments separately for each file. + 4. In a new line write "Code: Approved" or "Code: Require Changes" based on the review. + --- + Name: learn-gh + Description: A tool to help you learn gh cli + + #!/usr/bin/env bash + + echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicated --sort flag." + gh --help + gh repo --help + gh pr --help + gh pr checkout --help + gh pr diff --help \ No newline at end of file diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index b0a6e4eb..43c1da99 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -207,6 +207,11 @@ func NewContext(ctx context.Context, prg *types.Program, input string) (Context, } callCtx.AgentGroup = agentGroup + + if callCtx.Tool.IsAgentsOnly() && len(callCtx.AgentGroup) > 0 { + callCtx.Tool = callCtx.Program.ToolSet[callCtx.AgentGroup[0].ToolID] + } + return callCtx, nil } diff --git a/pkg/input/input.go b/pkg/input/input.go index 0037fa5e..3d480431 100644 --- a/pkg/input/input.go +++ b/pkg/input/input.go @@ -3,10 +3,12 @@ package input import ( "fmt" "io" + "io/fs" "os" "path/filepath" "strings" + "github.com/gptscript-ai/gptscript/internal" "github.com/gptscript-ai/gptscript/pkg/loader" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -33,7 +35,7 @@ func FromFile(file string) (string, error) { } return string(data), nil } else if file != "" { - if s, err := os.Stat(file); err == nil && s.IsDir() { + if s, err := fs.Stat(internal.FS, file); err == nil && s.IsDir() { for _, ext := range types.DefaultFiles { if _, err := os.Stat(filepath.Join(file, ext)); err == nil { file = filepath.Join(file, ext) @@ -42,7 +44,7 @@ func FromFile(file string) (string, error) { } } log.Debugf("reading file %s", file) - data, err := os.ReadFile(file) + data, err := fs.ReadFile(internal.FS, file) if err != nil { return "", fmt.Errorf("reading %s: %w", file, err) } diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index b998320b..22cd5e9e 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -199,6 +199,7 @@ func (c *context) finish(tools *[]Node) { len(c.tool.GlobalTools) > 0 || len(c.tool.ExportInputFilters) > 0 || len(c.tool.ExportOutputFilters) > 0 || + len(c.tool.Agents) > 0 || c.tool.Chat { *tools = append(*tools, Node{ ToolNode: &ToolNode{ diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 6efe4a11..0b75c8a2 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -800,6 +800,26 @@ func TestExport(t *testing.T) { assert.Equal(t, "TEST RESULT CALL: 3", x) } +func TestAgentOnly(t *testing.T) { + r := tester.NewRunner(t) + + prg, err := r.Load("") + require.NoError(t, err) + + r.RespondWith(tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "agent2", + Arguments: "Agent 2 input", + }, + }) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) +} func TestAgents(t *testing.T) { r := tester.NewRunner(t) diff --git a/pkg/tests/testdata/TestAgentOnly/call1-resp.golden b/pkg/tests/testdata/TestAgentOnly/call1-resp.golden new file mode 100644 index 00000000..ea865122 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call1-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "agent2", + "arguments": "Agent 2 input" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestAgentOnly/call1.golden b/pkg/tests/testdata/TestAgentOnly/call1.golden new file mode 100644 index 00000000..b63c6fd3 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call1.golden @@ -0,0 +1,42 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "name": "agent2", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestAgentOnly/call2-resp.golden b/pkg/tests/testdata/TestAgentOnly/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestAgentOnly/call2.golden b/pkg/tests/testdata/TestAgentOnly/call2.golden new file mode 100644 index 00000000..82f95523 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/call2.golden @@ -0,0 +1,57 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent1", + "name": "agent1", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent3", + "name": "agent3", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent2" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Agent 2 input" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestAgentOnly/step1.golden b/pkg/tests/testdata/TestAgentOnly/step1.golden new file mode 100644 index 00000000..662dbf04 --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/step1.golden @@ -0,0 +1,168 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "state": { + "continuation": { + "state": { + "input": "Input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "name": "agent2", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "agent2", + "arguments": "Agent 2 input" + } + } + } + ], + "usage": {} + } + ], + "chat": true + }, + "pending": { + "call_1": { + "index": 0, + "id": "call_1", + "function": { + "name": "agent2", + "arguments": "Agent 2 input" + } + } + } + }, + "calls": { + "call_1": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent2", + "input": "Agent 2 input" + } + } + }, + "subCalls": [ + { + "toolId": "testdata/TestAgentOnly/test.gpt:agent2", + "callId": "call_1", + "state": { + "continuation": { + "state": { + "input": "Agent 2 input", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent1", + "name": "agent1", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestAgentOnly/test.gpt:agent3", + "name": "agent3", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the assistant. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "I am agent2" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Agent 2 input" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "testdata/TestAgentOnly/test.gpt:agent2" + } + } + ], + "subCallID": "call_1" + } +}` diff --git a/pkg/tests/testdata/TestAgentOnly/test.gpt b/pkg/tests/testdata/TestAgentOnly/test.gpt new file mode 100644 index 00000000..f2ae9f9e --- /dev/null +++ b/pkg/tests/testdata/TestAgentOnly/test.gpt @@ -0,0 +1,20 @@ +agents: agent1, agent2 + +---- +name: agent1 +chat: true + +I am agent1 + +---- +name: agent2 +chat: true +agents: agent3 + +I am agent2 + +--- +name: agent3 +chat: true + +I am agent3 \ No newline at end of file diff --git a/pkg/types/tool.go b/pkg/types/tool.go index b0af5183..e4b3424a 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -763,6 +763,10 @@ func (t Tool) IsOpenAPI() bool { return strings.HasPrefix(t.Instructions, OpenAPIPrefix) } +func (t Tool) IsAgentsOnly() bool { + return t.IsNoop() && len(t.Context) == 0 +} + func (t Tool) IsEcho() bool { return strings.HasPrefix(t.Instructions, EchoPrefix) } From 1f2d7c7c56cdd88c20b5275932120b106747e28a Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Thu, 11 Jul 2024 11:01:46 -0700 Subject: [PATCH 019/270] chore: update goreleaser Signed-off-by: Taylor Price --- .github/workflows/release.yaml | 2 +- .goreleaser.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d09608e7..10b852cc 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -34,7 +34,7 @@ jobs: uses: goreleaser/goreleaser-action@v4 with: distribution: goreleaser - version: v1.23.0 + version: v2.0.1 args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.goreleaser.yml b/.goreleaser.yml index 3f767be0..6695c68a 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,3 +1,4 @@ +version: 2 dist: releases snapshot: name_template: '{{ trimprefix .Summary "v" }}' From 5d7cde648dde0da1ca9d0b6a20c77fa9bcf4dcb0 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Thu, 11 Jul 2024 11:03:21 -0700 Subject: [PATCH 020/270] chore: update goreleaser action Signed-off-by: Taylor Price --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 10b852cc..edc8b2ac 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -31,7 +31,7 @@ jobs: # After the issue is resolved, this can be set to 1.22 go-version: "1.22.4" - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: v2.0.1 From d3cf668b59b536ccd8eb19449416ecddf1c9aa2b Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Thu, 11 Jul 2024 11:31:56 -0700 Subject: [PATCH 021/270] fix: replace user PAT with bot PAT Signed-off-by: Taylor Price --- .github/workflows/main.yaml | 2 +- .github/workflows/release.yaml | 2 +- .goreleaser.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 6ea47e8f..f916fdab 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -37,7 +37,7 @@ jobs: args: release --clean --snapshot env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GH_PROJECT_TOKEN: ${{ secrets.GH_PROJECT_TOKEN }} + TAP_GITHUB_TOKEN: ${{ secrets.TAP_GITHUB_TOKEN }} - name: Upload to S3 uses: jakejarvis/s3-sync-action@v0.5.1 env: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index edc8b2ac..bf38499d 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -38,7 +38,7 @@ jobs: args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GH_PROJECT_TOKEN: ${{ secrets.GH_PROJECT_TOKEN }} + TAP_GITHUB_TOKEN: ${{ secrets.TAP_GITHUB_TOKEN }} GORELEASER_CURRENT_TAG: ${{ github.ref_name }} homebrew-release: needs: release-tag diff --git a/.goreleaser.yml b/.goreleaser.yml index 6695c68a..522e3b22 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -64,4 +64,4 @@ brews: repository: owner: gptscript-ai name: homebrew-tap - token: "{{ .Env.GH_PROJECT_TOKEN }}" + token: "{{ .Env.TAP_GITHUB_TOKEN }}" From 0c73f4b85ba75350e5a6fcd88952315bab81dc60 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Thu, 11 Jul 2024 12:19:07 -0700 Subject: [PATCH 022/270] fix: update goreleaser action and version for main workflow Signed-off-by: Taylor Price --- .github/workflows/main.yaml | 4 ++-- .github/workflows/release.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index f916fdab..ae26c52c 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -30,10 +30,10 @@ jobs: # After the issue is resolved, this can be set to 1.22 go-version: "1.22.4" - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser - version: v1.23.0 + version: "~> v2" args: release --clean --snapshot env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index bf38499d..b34c2fad 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -34,7 +34,7 @@ jobs: uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser - version: v2.0.1 + version: "~> v2" args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 75bd379f453cd3a5ba8ef5b23022edda6f37139f Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 12 Jul 2024 22:05:27 -0700 Subject: [PATCH 023/270] chore: add location option to loading scripts --- pkg/loader/loader.go | 18 ++++++++++++++++-- pkg/sdkserver/routes.go | 2 +- pkg/sdkserver/run.go | 8 ++++++++ pkg/sdkserver/types.go | 1 + 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index c12f976f..f60f09a1 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -373,12 +373,20 @@ func ProgramFromSource(ctx context.Context, content, subToolName string, opts .. } opt := complete(opts...) + var locationPath, locationName string + if opt.Location != "" { + locationPath = path.Dir(opt.Location) + locationName = path.Base(opt.Location) + } + prg := types.Program{ ToolSet: types.ToolSet{}, } tools, err := readTool(ctx, opt.Cache, &prg, &source{ Content: []byte(content), - Location: "inline", + Path: locationPath, + Name: locationName, + Location: opt.Location, }, subToolName) if err != nil { return types.Program{}, err @@ -388,12 +396,18 @@ func ProgramFromSource(ctx context.Context, content, subToolName string, opts .. } type Options struct { - Cache *cache.Client + Cache *cache.Client + Location string } func complete(opts ...Options) (result Options) { for _, opt := range opts { result.Cache = types.FirstSet(opt.Cache, result.Cache) + result.Location = types.FirstSet(opt.Location, result.Location) + } + + if result.Location == "" { + result.Location = "inline" } return diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index c16b4429..e17a2d1a 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -183,7 +183,7 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { logger.Debugf("executing tool: %+v", reqObject) var ( def fmt.Stringer = &reqObject.ToolDefs - programLoader loaderFunc = loader.ProgramFromSource + programLoader = loaderWithLocation(loader.ProgramFromSource, reqObject.Location) ) if reqObject.Content != "" { def = &reqObject.content diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index dc155557..0d055614 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -16,6 +16,14 @@ import ( type loaderFunc func(context.Context, string, string, ...loader.Options) (types.Program, error) +func loaderWithLocation(f loaderFunc, loc string) loaderFunc { + return func(ctx context.Context, s string, s2 string, options ...loader.Options) (types.Program, error) { + return f(ctx, s, s2, append(options, loader.Options{ + Location: loc, + })...) + } +} + func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, logger mvl.Logger, w http.ResponseWriter, opts gptscript.Options, chatState, input, subTool string, toolDef fmt.Stringer) { g, err := gptscript.New(ctx, s.gptscriptOpts, opts) if err != nil { diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 6f940c8b..478c6565 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -61,6 +61,7 @@ type toolOrFileRequest struct { CredentialContext string `json:"credentialContext"` CredentialOverrides []string `json:"credentialOverrides"` Confirm bool `json:"confirm"` + Location string `json:"location,omitempty"` } type content struct { From 2676b35eff90ea05599c828e10bd9b4f7720668a Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 12 Jul 2024 22:21:47 -0700 Subject: [PATCH 024/270] bug: fix relative references when defaulting files from dirs --- pkg/loader/url.go | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/pkg/loader/url.go b/pkg/loader/url.go index bc4d5c9f..2035469e 100644 --- a/pkg/loader/url.go +++ b/pkg/loader/url.go @@ -111,11 +111,20 @@ func loadURL(ctx context.Context, cache *cache.Client, base *source, name string req.Header.Set("Authorization", "Bearer "+bearerToken) } - data, err := getWithDefaults(req) + data, defaulted, err := getWithDefaults(req) if err != nil { return nil, false, fmt.Errorf("error loading %s: %v", url, err) } + if defaulted != "" { + pathString = url + name = defaulted + if repo != nil { + repo.Path = path.Join(repo.Path, repo.Name) + repo.Name = defaulted + } + } + log.Debugf("opened %s", url) result := &source{ @@ -137,31 +146,32 @@ func loadURL(ctx context.Context, cache *cache.Client, base *source, name string return result, true, nil } -func getWithDefaults(req *http.Request) ([]byte, error) { +func getWithDefaults(req *http.Request) ([]byte, string, error) { originalPath := req.URL.Path // First, try to get the original path as is. It might be an OpenAPI definition. resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, err + return nil, "", err } defer resp.Body.Close() if resp.StatusCode == http.StatusOK { - if toolBytes, err := io.ReadAll(resp.Body); err == nil && isOpenAPI(toolBytes) != 0 { - return toolBytes, nil - } + toolBytes, err := io.ReadAll(resp.Body) + return toolBytes, "", err + } + + base := path.Base(originalPath) + if strings.Contains(base, ".") { + return nil, "", fmt.Errorf("error loading %s: %s", req.URL.String(), resp.Status) } for i, def := range types.DefaultFiles { - base := path.Base(originalPath) - if !strings.Contains(base, ".") { - req.URL.Path = path.Join(originalPath, def) - } + req.URL.Path = path.Join(originalPath, def) resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, err + return nil, "", err } defer resp.Body.Close() @@ -170,11 +180,13 @@ func getWithDefaults(req *http.Request) ([]byte, error) { } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("error loading %s: %s", req.URL.String(), resp.Status) + return nil, "", fmt.Errorf("error loading %s: %s", req.URL.String(), resp.Status) } - return io.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) + return data, def, err } + panic("unreachable") } From 7e3b17632eadfc892e9665e3f859ab37dcbecb7f Mon Sep 17 00:00:00 2001 From: Atulpriya Sharma Date: Mon, 15 Jul 2024 17:52:18 +0530 Subject: [PATCH 025/270] Add GPTReview With GitHub Example --- examples/gptreview-ghaction/README.md | 21 ++++++++ examples/gptreview-ghaction/codereview.gpt | 26 ++++++++++ examples/gptreview-ghaction/workflow.yaml | 57 ++++++++++++++++++++++ 3 files changed, 104 insertions(+) create mode 100644 examples/gptreview-ghaction/README.md create mode 100644 examples/gptreview-ghaction/codereview.gpt create mode 100644 examples/gptreview-ghaction/workflow.yaml diff --git a/examples/gptreview-ghaction/README.md b/examples/gptreview-ghaction/README.md new file mode 100644 index 00000000..2c71acec --- /dev/null +++ b/examples/gptreview-ghaction/README.md @@ -0,0 +1,21 @@ +# GPTReview + +This folder contains an example of building and implementing your own code reviewer as part of GitHub Actions. + +Below are the files present here: + +- `codereview.gpt`: Contains the GPTScript code and prompts. +- `workflow.yaml`: The workflow file for the GitHub action. + +## Pre-requisites + +- GitHub Account +- OpenAI API Key + +## How To Run This Example + +- Create a new repository in your GitHub account and create a `codereview.gpt` file in that repo based on the contents provided in this file. +- Congfigure a GitHub Action for that repository and copy the contents from `workflow.yaml` to your `main.yaml`. +- Configure your `OPENAI_API_KEY` and `GH_TOKEN` as environment variables. +- Add some code file to the repository and open a new pull request. +- The GitHub Action will trigger and our GPTReview will review your code and provide review comments. diff --git a/examples/gptreview-ghaction/codereview.gpt b/examples/gptreview-ghaction/codereview.gpt new file mode 100644 index 00000000..f2502b50 --- /dev/null +++ b/examples/gptreview-ghaction/codereview.gpt @@ -0,0 +1,26 @@ +Name: Code Reviewer +Description: A tool to help you perform code review of open PRs +Context: learn-gh +Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write +Args: PR_URL: The GitHub PR_URL + +You have the gh cli available to you. Use it to perform code review for a pr from the $(repo) provided. + +Perform the following steps in order: +1. Identify the files changed in the pull request ($PR_URL) using the pr number and perform a diff. + 1. Analyze the complete code of each identified file and perform a detailed line by line code review. + 2. Repeat the process for each changed file in the pr. +2. Share your review comments separately for each file. +3. In a new line write "Code: Approved" or "Code: Require Changes" based on the review comments. +--- +Name: learn-gh +Description: A tool to help you learn gh cli + +#!/usr/bin/env bash + +echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." +gh --help +gh repo --help +gh pr --help +gh pr checkout --help +gh pr diff --help diff --git a/examples/gptreview-ghaction/workflow.yaml b/examples/gptreview-ghaction/workflow.yaml new file mode 100644 index 00000000..21ade39b --- /dev/null +++ b/examples/gptreview-ghaction/workflow.yaml @@ -0,0 +1,57 @@ +name: PR Review with GPTScript + +on: + pull_request: + types: [opened, synchronize, reopened] + +jobs: + pr_review: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Get PR Details + id: pr_details + run: | + PR_URL=$(jq -r '.pull_request.html_url' $GITHUB_EVENT_PATH) + PR_NUMBER=$(jq -r '.pull_request.number' $GITHUB_EVENT_PATH) + PR_FILES=$(jq -r '.pull_request.changed_files' $GITHUB_EVENT_PATH) + echo "PR_URL=${PR_URL}" >> $GITHUB_ENV + echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_ENV + echo "PR_FILES=${PR_FILES}" >> $GITHUB_ENV + + - name: Install GPTScript + run: curl https://get.gptscript.ai/install.sh | sh + + - name: Run GPTScript for Code Review + id: run_gptscript + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GH_TOKEN: ${{ secrets.GH_TOKEN }} + run: | + { + echo 'REVIEW<> "$GITHUB_ENV" + + + - name: Post Review Comment + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + run: | + gh pr comment ${{ github.event.pull_request.number }} --body "$REVIEW" + + - name: Set PR Status Fail + if: contains(env.REVIEW, 'Require Changes') + run: | + echo "Code Requires Changes" + exit 1 + + - name: Set PR Status Pass + if: contains(env.REVIEW, 'Approved') + run: | + echo "Code Approved" + From b2b58bb17a2269e94c8d6541dd24dd9ffcbbc8c7 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 15 Jul 2024 14:12:46 -0400 Subject: [PATCH 026/270] chore: set the run file env var when starting the ui Set the `UI_RUN_FILE` env var before starting the UI tool when a run file is provided; e.g. `gptscript --ui `. This tells the UI to use the file's run page as the landing page on start. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4820efd8..d7d57b36 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -371,7 +371,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_CREDENTIAL_OVERRIDE="+strings.Join(r.CredentialOverride, ",")) } - args = append([]string{args[0]}, "--file="+file) + gptOpt.Env = append(gptOpt.Env, "UI_RUN_FILE="+file) if len(args) > 2 { args = append(args, args[2:]...) From 904e97fdcccc78e76c925912b99750aac38c69d4 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 15 Jul 2024 16:14:33 -0400 Subject: [PATCH 027/270] fix: set common UI env vars regardless of whether a file is passed Signed-off-by: Donnie Adams --- pkg/cli/gptscript.go | 39 ++++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index d7d57b36..f539f516 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -332,16 +332,25 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { - args = append([]string{uiTool()}, args...) + if os.Getenv(system.BinEnvVar) == "" { + gptOpt.Env = append(gptOpt.Env, system.BinEnvVar+"="+system.Bin()) + } + + // Pass the corrected environment variables for SDK server options + if r.DefaultModel != "" { + gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_DEFAULT_MODEL="+r.DefaultModel) + } + if len(r.CredentialOverride) > 0 { + gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_CREDENTIAL_OVERRIDE="+strings.Join(r.CredentialOverride, ",")) + } // If args has more than one element, then the user has provided a file. - if len(args) > 1 { - if args[1] == "-" { + if len(args) > 0 { + file := args[0] + if file == "-" { return fmt.Errorf("chat UI only supports files, cannot read from stdin") } - file := args[1] - // If the file is external, then set the SCRIPTS_PATH to the current working directory. Otherwise, // set it to the directory of the script and set the file to the base. if !(strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "github.com")) { @@ -359,23 +368,9 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { gptOpt.Env = append(gptOpt.Env, "SCRIPTS_PATH="+cwd) } - if os.Getenv(system.BinEnvVar) == "" { - gptOpt.Env = append(gptOpt.Env, system.BinEnvVar+"="+system.Bin()) - } - - // Pass the corrected environment variables for SDK server options - if r.DefaultModel != "" { - gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_DEFAULT_MODEL="+r.DefaultModel) - } - if len(r.CredentialOverride) > 0 { - gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_CREDENTIAL_OVERRIDE="+strings.Join(r.CredentialOverride, ",")) - } - gptOpt.Env = append(gptOpt.Env, "UI_RUN_FILE="+file) - - if len(args) > 2 { - args = append(args, args[2:]...) - } + // Remove the file from args because the above line will pass it to the UI tool. + args = args[1:] } else { cwd, err := os.Getwd() if err != nil { @@ -386,6 +381,8 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // The UI must run in daemon mode. r.Daemon = true + // Use the UI tool as the first argument. + args = append([]string{uiTool()}, args...) } ctx := cmd.Context() From 5fb849fa33d27fac1771c5fee8219a26f6c5a32e Mon Sep 17 00:00:00 2001 From: Atulpriya Sharma Date: Tue, 16 Jul 2024 05:35:20 +0530 Subject: [PATCH 028/270] Fixed review comments --- examples/gptreview-ghaction/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/gptreview-ghaction/README.md b/examples/gptreview-ghaction/README.md index 2c71acec..fe793a92 100644 --- a/examples/gptreview-ghaction/README.md +++ b/examples/gptreview-ghaction/README.md @@ -14,8 +14,8 @@ Below are the files present here: ## How To Run This Example -- Create a new repository in your GitHub account and create a `codereview.gpt` file in that repo based on the contents provided in this file. -- Congfigure a GitHub Action for that repository and copy the contents from `workflow.yaml` to your `main.yaml`. -- Configure your `OPENAI_API_KEY` and `GH_TOKEN` as environment variables. -- Add some code file to the repository and open a new pull request. +- Create a new repository in your GitHub account and create a `codereview.gpt` file in the root of that repo based on the contents provided in this file. +- Congfigure a GitHub Action for that repository. To do so, navigate to the "Actions" tab and then click on "setup a workflow yourself" link. This will create a new `main.yaml` inside `.github/workflows` path. Copy the contents from `workflow.yaml` to your `main.yaml`. +- Configure your `OPENAI_API_KEY` and `GH_TOKEN` as environment variables in your GitHub repo. Refer to [these steps](https://docs.github.com/en/actions/learn-github-actions/variables#creating-configuration-variables-for-a-repository) to create environment variables for your repository. +- Create a new branch, and add some code file to the repository and open a new pull request. - The GitHub Action will trigger and our GPTReview will review your code and provide review comments. From 5d9dfb48db6bf08ea1670c8146f29893a63ddaee Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 15 Jul 2024 22:37:29 -0700 Subject: [PATCH 029/270] chore: improve reliability with bad tool names --- go.mod | 4 ++-- go.sum | 8 ++++---- pkg/engine/engine.go | 2 +- pkg/openai/client.go | 3 ++- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index fe1b3fa6..f0213f7e 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/tui v0.0.0-20240702222655-901e7ec1faf5 + github.com/gptscript-ai/tui v0.0.0-20240716053605-ecddbcf60eac github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -63,7 +63,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/gptscript-ai/go-gptscript v0.9.1 // indirect + github.com/gptscript-ai/go-gptscript v0.9.3-0.20240715172623-8176fb20c5cb // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hexops/autogold v1.3.1 // indirect diff --git a/go.sum b/go.sum index 9f29c3e4..598a161e 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.1 h1:O9oSmYvzQ2GZkPfDhXpiMGdtO9BMCVGeWLdJH88AJzg= -github.com/gptscript-ai/go-gptscript v0.9.1/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240702222655-901e7ec1faf5 h1:qCNJVFDHT2p0cuLo920DmUQoUngAuXGgqldYNwmC/R0= -github.com/gptscript-ai/tui v0.0.0-20240702222655-901e7ec1faf5/go.mod h1:mYzM8AwIiAdImy2g0BsdTPPuSbsONTMw5GIHDc/2o7g= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240715172623-8176fb20c5cb h1:xeSbO4mLYnoTg7diNW0tpxY/0yDSSdgjohMzwE4Za6k= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240715172623-8176fb20c5cb/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240716053605-ecddbcf60eac h1:zZ993dp2mx/63JD4THwMeBcn3C8SogcLeQRJUZsMSM4= +github.com/gptscript-ai/tui v0.0.0-20240716053605-ecddbcf60eac/go.mod h1:Ex2xQMzTMfb5UgLz9rctATPps8DnfPeJQh8o/AiQCoE= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 43c1da99..250e9578 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -389,7 +389,7 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { if content.ToolCall != nil { var toolID string for _, tool := range state.Completion.Tools { - if tool.Function.Name == content.ToolCall.Function.Name { + if strings.EqualFold(tool.Function.Name, content.ToolCall.Function.Name) { toolID = tool.Function.ToolID } } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 4ade654a..53252895 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -450,8 +450,9 @@ func appendMessage(msg types.CompletionMessage, response openai.ChatCompletionSt if tc.ToolCall.Function.Name != tool.Function.Name { tc.ToolCall.Function.Name += tool.Function.Name } - // OpenAI like to sometimes add this prefix for no good reason + // OpenAI like to sometimes add these prefix because it's confused tc.ToolCall.Function.Name = strings.TrimPrefix(tc.ToolCall.Function.Name, "namespace.") + tc.ToolCall.Function.Name = strings.TrimPrefix(tc.ToolCall.Function.Name, "@") tc.ToolCall.Function.Arguments += tool.Function.Arguments msg.Content[idx] = tc From 87e4c801e15fbef8722d60a9fb4ba8f07e60c1f5 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 16 Jul 2024 16:07:10 -0400 Subject: [PATCH 030/270] fix: respect absolute paths for loading local tools Signed-off-by: Donnie Adams --- pkg/loader/loader.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index f60f09a1..d7634058 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -72,7 +72,11 @@ func openFile(path string) (io.ReadCloser, bool, error) { func loadLocal(base *source, name string) (*source, bool, error) { // We want to keep all strings in / format, and only convert to platform specific when reading - filePath := path.Join(base.Path, name) + // This is why we use path instead of filepath. + filePath := name + if !path.IsAbs(name) { + filePath = path.Join(base.Path, name) + } if s, err := fs.Stat(internal.FS, filepath.Clean(filePath)); err == nil && s.IsDir() { for _, def := range types.DefaultFiles { From c2d65e2c0e051d19687514c87b1572ab4143b694 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Tue, 16 Jul 2024 15:38:12 -0700 Subject: [PATCH 031/270] chore: strip leading and trailing whitespace characters from credential values Signed-off-by: Taylor Price --- pkg/credentials/credential.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index c81adf2b..605208a0 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -35,6 +35,9 @@ func (c Credential) IsExpired() bool { } func (c Credential) toDockerAuthConfig() (types.AuthConfig, error) { + for k, v := range c.Env { + c.Env[k] = strings.TrimSpace(v) + } cred, err := json.Marshal(c) if err != nil { return types.AuthConfig{}, err From ecca52cd7e2a08f87d3465ba878b1839d1f41b7f Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Tue, 16 Jul 2024 16:10:06 -0700 Subject: [PATCH 032/270] fix: expand tilde and relative paths to be absolute for cache-dir Signed-off-by: Taylor Price --- pkg/cache/cache.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index bc499aef..031bd166 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -9,7 +9,9 @@ import ( "errors" "io/fs" "os" + "os/user" "path/filepath" + "strings" "github.com/adrg/xdg" "github.com/getkin/kin-openapi/openapi3" @@ -40,10 +42,28 @@ func Complete(opts ...Options) (result Options) { } if result.CacheDir == "" { result.CacheDir = filepath.Join(xdg.CacheHome, version.ProgramName) + } else if !filepath.IsAbs(result.CacheDir) { + var err error + result.CacheDir, err = makeAbsolute(result.CacheDir) + if err != nil { + result.CacheDir = filepath.Join(xdg.CacheHome, version.ProgramName) + } } return } +func makeAbsolute(path string) (string, error) { + if strings.HasPrefix(path, "~"+string(filepath.Separator)) { + usr, err := user.Current() + if err != nil { + return "", err + } + + return filepath.Join(usr.HomeDir, path[2:]), nil + } + return filepath.Abs(path) +} + type noCacheKey struct{} func IsNoCache(ctx context.Context) bool { From 3055632582ead7338a322b209da0921c717cc7d1 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 17 Jul 2024 14:52:21 -0400 Subject: [PATCH 033/270] enhance: share credential (#634) Signed-off-by: Grant Linville --- integration/cred_test.go | 16 +++ integration/helpers.go | 4 + integration/scripts/credscopes.gpt | 160 +++++++++++++++++++++++++++++ pkg/parser/parser.go | 2 + pkg/runner/runner.go | 41 ++++---- pkg/types/tool.go | 24 +++++ 6 files changed, 228 insertions(+), 19 deletions(-) create mode 100644 integration/scripts/credscopes.gpt diff --git a/integration/cred_test.go b/integration/cred_test.go index b92ccd55..67298ef8 100644 --- a/integration/cred_test.go +++ b/integration/cred_test.go @@ -11,3 +11,19 @@ func TestGPTScriptCredential(t *testing.T) { require.NoError(t, err) require.Contains(t, out, "CREDENTIAL") } + +// TestCredentialScopes makes sure that environment variables set by credential tools and shared credential tools +// are only available to the correct tools. See scripts/credscopes.gpt for more details. +func TestCredentialScopes(t *testing.T) { + out, err := RunScript("scripts/credscopes.gpt", "--sub-tool", "oneOne") + require.NoError(t, err) + require.Contains(t, out, "good") + + out, err = RunScript("scripts/credscopes.gpt", "--sub-tool", "twoOne") + require.NoError(t, err) + require.Contains(t, out, "good") + + out, err = RunScript("scripts/credscopes.gpt", "--sub-tool", "twoTwo") + require.NoError(t, err) + require.Contains(t, out, "good") +} diff --git a/integration/helpers.go b/integration/helpers.go index 8af581c3..33304676 100644 --- a/integration/helpers.go +++ b/integration/helpers.go @@ -14,3 +14,7 @@ func GPTScriptExec(args ...string) (string, error) { out, err := cmd.CombinedOutput() return string(out), err } + +func RunScript(script string, options ...string) (string, error) { + return GPTScriptExec(append(options, "--quiet", script)...) +} diff --git a/integration/scripts/credscopes.gpt b/integration/scripts/credscopes.gpt new file mode 100644 index 00000000..7319f163 --- /dev/null +++ b/integration/scripts/credscopes.gpt @@ -0,0 +1,160 @@ +# This script sets up a chain of tools in a tree structure. +# The root is oneOne, with children twoOne and twoTwo, with children threeOne, threeTwo, and threeThree, with only +# threeTwo shared between them. +# Each tool should only have access to any credentials it defines and any credentials exported/shared by its +# immediate children (but not grandchildren). +# This script checks to make sure that this is working properly. +name: oneOne +tools: twoOne, twoTwo +cred: getcred with oneOne as var and 11 as val + +#!python3 + +import os + +oneOne = os.getenv('oneOne') +twoOne = os.getenv('twoOne') +twoTwo = os.getenv('twoTwo') +threeOne = os.getenv('threeOne') +threeTwo = os.getenv('threeTwo') +threeThree = os.getenv('threeThree') + +if oneOne != '11': + print('error: oneOne is not 11') + exit(1) + +if twoOne != '21': + print('error: twoOne is not 21') + exit(1) + +if twoTwo != '22': + print('error: twoTwo is not 22') + exit(1) + +if threeOne is not None: + print('error: threeOne is not None') + exit(1) + +if threeTwo is not None: + print('error: threeTwo is not None') + exit(1) + +if threeThree is not None: + print('error: threeThree is not None') + exit(1) + +print('good') + +--- +name: twoOne +tools: threeOne, threeTwo +sharecred: getcred with twoOne as var and 21 as val + +#!python3 + +import os + +oneOne = os.getenv('oneOne') +twoOne = os.getenv('twoOne') +twoTwo = os.getenv('twoTwo') +threeOne = os.getenv('threeOne') +threeTwo = os.getenv('threeTwo') +threeThree = os.getenv('threeThree') + +if oneOne is not None: + print('error: oneOne is not None') + exit(1) + +if twoOne is not None: + print('error: twoOne is not None') + exit(1) + +if twoTwo is not None: + print('error: twoTwo is not None') + exit(1) + +if threeOne != '31': + print('error: threeOne is not 31') + exit(1) + +if threeTwo != '32': + print('error: threeTwo is not 32') + exit(1) + +if threeThree is not None: + print('error: threeThree is not None') + exit(1) + +print('good') + +--- +name: twoTwo +tools: threeTwo, threeThree +sharecred: getcred with twoTwo as var and 22 as val + +#!python3 + +import os + +oneOne = os.getenv('oneOne') +twoOne = os.getenv('twoOne') +twoTwo = os.getenv('twoTwo') +threeOne = os.getenv('threeOne') +threeTwo = os.getenv('threeTwo') +threeThree = os.getenv('threeThree') + +if oneOne is not None: + print('error: oneOne is not None') + exit(1) + +if twoOne is not None: + print('error: twoOne is not None') + exit(1) + +if twoTwo is not None: + print('error: twoTwo is not None') + exit(1) + +if threeOne is not None: + print('error: threeOne is not None') + exit(1) + +if threeTwo != '32': + print('error: threeTwo is not 32') + exit(1) + +if threeThree != '33': + print('error: threeThree is not 33') + exit(1) + +print('good') + +--- +name: threeOne +sharecred: getcred with threeOne as var and 31 as val + +--- +name: threeTwo +sharecred: getcred with threeTwo as var and 32 as val + +--- +name: threeThree +sharecred: getcred with threeThree as var and 33 as val + +--- +name: getcred + +#!python3 + +import os +import json + +var = os.getenv('var') +val = os.getenv('val') + +output = { + "env": { + var: val + } +} +print(json.dumps(output)) diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index 22cd5e9e..f7c750c1 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -148,6 +148,8 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { } case "credentials", "creds", "credential", "cred": tool.Parameters.Credentials = append(tool.Parameters.Credentials, value) + case "sharecredentials", "sharecreds", "sharecredential", "sharecred": + tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, value) default: return false, nil } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index cc5a3927..36bac826 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -419,9 +419,13 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en return nil, err } - if len(callCtx.Tool.Credentials) > 0 { + credTools, err := callCtx.Tool.GetCredentialTools(*callCtx.Program, callCtx.AgentGroup) + if err != nil { + return nil, err + } + if len(credTools) > 0 { var err error - env, err = r.handleCredentials(callCtx, monitor, env) + env, err = r.handleCredentials(callCtx, monitor, env, credTools) if err != nil { return nil, err } @@ -552,9 +556,13 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s progress, progressClose := streamProgress(&callCtx, monitor) defer progressClose() - if len(callCtx.Tool.Credentials) > 0 { + credTools, err := callCtx.Tool.GetCredentialTools(*callCtx.Program, callCtx.AgentGroup) + if err != nil { + return nil, err + } + if len(credTools) > 0 { var err error - env, err = r.handleCredentials(callCtx, monitor, env) + env, err = r.handleCredentials(callCtx, monitor, env, credTools) if err != nil { return nil, err } @@ -828,7 +836,7 @@ func getEventContent(content string, callCtx engine.Context) string { return content } -func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env []string) ([]string, error) { +func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env []string, credToolRefs []types.ToolReference) ([]string, error) { // Since credential tools (usually) prompt the user, we want to only run one at a time. r.credMutex.Lock() defer r.credMutex.Unlock() @@ -845,10 +853,10 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } } - for _, credToolName := range callCtx.Tool.Credentials { - toolName, credentialAlias, args, err := types.ParseCredentialArgs(credToolName, callCtx.Input) + for _, ref := range credToolRefs { + toolName, credentialAlias, args, err := types.ParseCredentialArgs(ref.Reference, callCtx.Input) if err != nil { - return nil, fmt.Errorf("failed to parse credential tool %q: %w", credToolName, err) + return nil, fmt.Errorf("failed to parse credential tool %q: %w", ref.Reference, err) } credName := toolName @@ -895,11 +903,6 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env // If the credential doesn't already exist in the store, run the credential tool in order to get the value, // and save it in the store. if !exists || c.IsExpired() { - credToolRefs, ok := callCtx.Tool.ToolMapping[credToolName] - if !ok || len(credToolRefs) != 1 { - return nil, fmt.Errorf("failed to find ID for tool %s", credToolName) - } - // If the existing credential is expired, we need to provide it to the cred tool through the environment. if exists && c.IsExpired() { credJSON, err := json.Marshal(c) @@ -914,22 +917,22 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env if args != nil { inputBytes, err := json.Marshal(args) if err != nil { - return nil, fmt.Errorf("failed to marshal args for tool %s: %w", credToolName, err) + return nil, fmt.Errorf("failed to marshal args for tool %s: %w", ref.Reference, err) } input = string(inputBytes) } - res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, credToolRefs[0].ToolID, input, "", engine.CredentialToolCategory) + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, ref.ToolID, input, "", engine.CredentialToolCategory) if err != nil { - return nil, fmt.Errorf("failed to run credential tool %s: %w", credToolName, err) + return nil, fmt.Errorf("failed to run credential tool %s: %w", ref.Reference, err) } if res.Result == nil { - return nil, fmt.Errorf("invalid state: credential tool [%s] can not result in a continuation", credToolName) + return nil, fmt.Errorf("invalid state: credential tool [%s] can not result in a continuation", ref.Reference) } if err := json.Unmarshal([]byte(*res.Result), &c); err != nil { - return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", credToolName, err) + return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", ref.Reference, err) } c.ToolName = credName c.Type = credentials.CredentialTypeTool @@ -943,7 +946,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } // Only store the credential if the tool is on GitHub or has an alias, and the credential is non-empty. - if (isGitHubTool(toolName) && callCtx.Program.ToolSet[credToolRefs[0].ToolID].Source.Repo != nil) || credentialAlias != "" { + if (isGitHubTool(toolName) && callCtx.Program.ToolSet[ref.ToolID].Source.Repo != nil) || credentialAlias != "" { if isEmpty { log.Warnf("Not saving empty credential for tool %s", toolName) } else if err := r.credStore.Add(callCtx.Ctx, *c); err != nil { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index e4b3424a..ad483984 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -139,6 +139,7 @@ type Parameters struct { Export []string `json:"export,omitempty"` Agents []string `json:"agents,omitempty"` Credentials []string `json:"credentials,omitempty"` + ExportCredentials []string `json:"exportCredentials,omitempty"` InputFilters []string `json:"inputFilters,omitempty"` ExportInputFilters []string `json:"exportInputFilters,omitempty"` OutputFilters []string `json:"outputFilters,omitempty"` @@ -154,6 +155,7 @@ func (p Parameters) ToolRefNames() []string { p.ExportContext, p.Context, p.Credentials, + p.ExportCredentials, p.InputFilters, p.ExportInputFilters, p.OutputFilters, @@ -466,6 +468,11 @@ func (t ToolDef) String() string { _, _ = fmt.Fprintf(buf, "Credential: %s\n", cred) } } + if len(t.Parameters.ExportCredentials) > 0 { + for _, exportCred := range t.Parameters.ExportCredentials { + _, _ = fmt.Fprintf(buf, "Share Credential: %s\n", exportCred) + } + } if t.Parameters.Chat { _, _ = fmt.Fprintf(buf, "Chat: true\n") } @@ -675,6 +682,23 @@ func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([] return result.List() } +func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { + result := toolRefSet{} + + result.AddAll(t.GetToolRefsFromNames(t.Credentials)) + + toolRefs, err := t.getCompletionToolRefs(prg, agentGroup) + if err != nil { + return nil, err + } + for _, toolRef := range toolRefs { + referencedTool := prg.ToolSet[toolRef.ToolID] + result.AddAll(referencedTool.GetToolRefsFromNames(referencedTool.ExportCredentials)) + } + + return result.List() +} + func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (result []CompletionTool) { toolNames := map[string]struct{}{} From 19a51890efe613918d573d91ee268797c2c7ad2f Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 19 Jul 2024 10:46:26 -0700 Subject: [PATCH 034/270] chore: return missing tool call to LLM, don't fail --- pkg/engine/engine.go | 24 +++++--- pkg/runner/runner.go | 12 ++++ pkg/tests/runner_test.go | 15 +++++ .../TestMissingTool/call1-resp.golden | 14 +++++ .../testdata/TestMissingTool/call1.golden | 32 ++++++++++ .../TestMissingTool/call2-resp.golden | 9 +++ .../testdata/TestMissingTool/call2.golden | 61 +++++++++++++++++++ pkg/tests/testdata/TestMissingTool/test.gpt | 10 +++ pkg/tests/tester/runner.go | 15 ++++- 9 files changed, 181 insertions(+), 11 deletions(-) create mode 100644 pkg/tests/testdata/TestMissingTool/call1-resp.golden create mode 100644 pkg/tests/testdata/TestMissingTool/call1.golden create mode 100644 pkg/tests/testdata/TestMissingTool/call2-resp.golden create mode 100644 pkg/tests/testdata/TestMissingTool/call2.golden create mode 100644 pkg/tests/testdata/TestMissingTool/test.gpt diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 250e9578..0ea72ff6 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -45,8 +45,9 @@ type Return struct { } type Call struct { - ToolID string `json:"toolID,omitempty"` - Input string `json:"input,omitempty"` + Missing bool `json:"missing,omitempty"` + ToolID string `json:"toolID,omitempty"` + Input string `json:"input,omitempty"` } type CallResult struct { @@ -216,10 +217,7 @@ func NewContext(ctx context.Context, prg *types.Program, input string) (Context, } func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID string, toolCategory ToolCategory) (Context, error) { - tool, ok := c.Program.ToolSet[toolID] - if !ok { - return Context{}, fmt.Errorf("failed to file tool for id [%s]", toolID) - } + tool := c.Program.ToolSet[toolID] if callID == "" { callID = counter.Next() @@ -387,19 +385,25 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { state.Pending = map[string]types.CompletionToolCall{} for _, content := range resp.Content { if content.ToolCall != nil { - var toolID string + var ( + toolID string + missing bool + ) for _, tool := range state.Completion.Tools { if strings.EqualFold(tool.Function.Name, content.ToolCall.Function.Name) { toolID = tool.Function.ToolID } } if toolID == "" { - return nil, fmt.Errorf("failed to find tool id for tool %s in tool_call result", content.ToolCall.Function.Name) + log.Debugf("failed to find tool id for tool %s in tool_call result", content.ToolCall.Function.Name) + toolID = content.ToolCall.Function.Name + missing = true } state.Pending[content.ToolCall.ID] = *content.ToolCall ret.Calls[content.ToolCall.ID] = Call{ - ToolID: toolID, - Input: content.ToolCall.Function.Arguments, + ToolID: toolID, + Missing: missing, + Input: content.ToolCall.Function.Arguments, } } else { cp := content.Text diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 36bac826..9e8695a7 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -802,6 +802,18 @@ func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, for _, id := range ids { call := state.Continuation.Calls[id] + if call.Missing { + resultLock.Lock() + callResults = append(callResults, SubCallResult{ + ToolID: call.ToolID, + CallID: id, + State: &State{ + Result: &[]string{fmt.Sprintf("ERROR: can not call unknown tool named [%s]", call.ToolID)}[0], + }, + }) + resultLock.Unlock() + continue + } d.Run(func(ctx context.Context) error { result, err := r.subCall(ctx, callCtx, monitor, env, call.ToolID, call.Input, id, toolCategory) if err != nil { diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 0b75c8a2..a38de6a2 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -948,3 +948,18 @@ func TestSysContext(t *testing.T) { require.Len(t, context.Call.AgentGroup, 1) assert.Equal(t, context.Call.AgentGroup[0].Named, "iAmSuperman") } + +func TestMissingTool(t *testing.T) { + r := tester.NewRunner(t) + + r.RespondWith(tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "not bob", + }, + }) + + resp, err := r.Run("", "Input 1") + require.NoError(t, err) + r.AssertResponded(t) + autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp) +} diff --git a/pkg/tests/testdata/TestMissingTool/call1-resp.golden b/pkg/tests/testdata/TestMissingTool/call1-resp.golden new file mode 100644 index 00000000..c9799ee8 --- /dev/null +++ b/pkg/tests/testdata/TestMissingTool/call1-resp.golden @@ -0,0 +1,14 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "id": "call_1", + "function": { + "name": "not bob" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestMissingTool/call1.golden b/pkg/tests/testdata/TestMissingTool/call1.golden new file mode 100644 index 00000000..f1bcc4f0 --- /dev/null +++ b/pkg/tests/testdata/TestMissingTool/call1.golden @@ -0,0 +1,32 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestMissingTool/test.gpt:Bob", + "name": "Bob", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Call tool Bob" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestMissingTool/call2-resp.golden b/pkg/tests/testdata/TestMissingTool/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestMissingTool/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestMissingTool/call2.golden b/pkg/tests/testdata/TestMissingTool/call2.golden new file mode 100644 index 00000000..2fe99e81 --- /dev/null +++ b/pkg/tests/testdata/TestMissingTool/call2.golden @@ -0,0 +1,61 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestMissingTool/test.gpt:Bob", + "name": "Bob", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Call tool Bob" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "Input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "id": "call_1", + "function": { + "name": "not bob" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "ERROR: can not call unknown tool named [not bob]" + } + ], + "toolCall": { + "id": "call_1", + "function": { + "name": "not bob" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestMissingTool/test.gpt b/pkg/tests/testdata/TestMissingTool/test.gpt new file mode 100644 index 00000000..2613ffd2 --- /dev/null +++ b/pkg/tests/testdata/TestMissingTool/test.gpt @@ -0,0 +1,10 @@ +tools: Bob + +Call tool Bob + +--- +name: Bob + +#!sys.echo + +You called? \ No newline at end of file diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index fe21ba92..775f0248 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -104,7 +104,20 @@ func (c *Client) Call(_ context.Context, messageRequest types.CompletionRequest, } if result.Func.Name != "" { - c.t.Fatalf("failed to find tool %s", result.Func.Name) + return &types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: []types.ContentPart{ + { + ToolCall: &types.CompletionToolCall{ + ID: fmt.Sprintf("call_%d", c.id), + Function: types.CompletionFunctionCall{ + Name: result.Func.Name, + Arguments: result.Func.Arguments, + }, + }, + }, + }, + }, nil } return &types.CompletionMessage{ From 3c53e2aa3f4cae850557ef231c4949fd43750f21 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 19 Jul 2024 21:54:01 -0400 Subject: [PATCH 035/270] feat: improve SDK server start up Additionally, this change includes a way to run the server embeddedly in another process that may use stdin. Signed-off-by: Donnie Adams --- pkg/cli/sdk_server.go | 4 +-- pkg/sdkserver/server.go | 74 +++++++++++++++++++++++++++++------------ 2 files changed, 54 insertions(+), 24 deletions(-) diff --git a/pkg/cli/sdk_server.go b/pkg/cli/sdk_server.go index a2ac8488..c9cf480f 100644 --- a/pkg/cli/sdk_server.go +++ b/pkg/cli/sdk_server.go @@ -29,11 +29,11 @@ func (c *SDKServer) Run(cmd *cobra.Command, _ []string) error { // Don't use cmd.Context() as we don't want to die on ctrl+c ctx := context.Background() if term.IsTerminal(int(os.Stdin.Fd())) { - // Only support CTRL+C if stdin is the terminal. When ran as a SDK it will be a pipe + // Only support CTRL+C if stdin is the terminal. When ran as an SDK it will be a pipe ctx = cmd.Context() } - return sdkserver.Start(ctx, sdkserver.Options{ + return sdkserver.Run(ctx, sdkserver.Options{ Options: opts, ListenAddress: c.ListenAddress, Debug: c.Debug, diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 4556f69e..d0ca5a5f 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "log/slog" "net" "net/http" "os" @@ -29,7 +28,18 @@ type Options struct { Debug bool } -func Start(ctx context.Context, opts Options) error { +// Run will start the server and block until the server is shut down. +func Run(ctx context.Context, opts Options) error { + listener, err := newListener(opts) + if err != nil { + return err + } + + _, err = io.WriteString(os.Stderr, listener.Addr().String()+"\n") + if err != nil { + return fmt.Errorf("failed to write to address to stderr: %w", err) + } + sigCtx, cancel := signal.NotifyContext(ctx, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGKILL) defer cancel() go func() { @@ -40,6 +50,34 @@ func Start(ctx context.Context, opts Options) error { cancel() }() + return run(sigCtx, listener, opts) +} + +// EmbeddedStart allows running the server as an embedded process that may use Stdin for input. +// It returns the address the server is listening on. +func EmbeddedStart(ctx context.Context, opts Options) (string, error) { + listener, err := newListener(opts) + if err != nil { + return "", err + } + + go func() { + _ = run(ctx, listener, opts) + }() + + return listener.Addr().String(), nil +} + +func (s *server) close() { + s.client.Close(true) + s.events.Close() +} + +func newListener(opts Options) (net.Listener, error) { + return net.Listen("tcp", opts.ListenAddress) +} + +func run(ctx context.Context, listener net.Listener, opts Options) error { if opts.Debug { mvl.SetDebug() } @@ -58,11 +96,6 @@ func Start(ctx context.Context, opts Options) error { return err } - listener, err := net.Listen("tcp", opts.ListenAddress) - if err != nil { - return fmt.Errorf("failed to listen on %s: %w", opts.ListenAddress, err) - } - s := &server{ gptscriptOpts: opts.Options, address: listener.Addr().String(), @@ -72,11 +105,11 @@ func Start(ctx context.Context, opts Options) error { waitingToConfirm: make(map[string]chan runner.AuthorizerResponse), waitingToPrompt: make(map[string]chan map[string]string), } - defer s.Close() + defer s.close() s.addRoutes(http.DefaultServeMux) - server := http.Server{ + httpServer := &http.Server{ Handler: apply(http.DefaultServeMux, contentType("application/json"), addRequestID, @@ -86,25 +119,22 @@ func Start(ctx context.Context, opts Options) error { ), } - slog.Info("Starting server", "addr", s.address) - - context.AfterFunc(sigCtx, func() { - ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + logger := mvl.Package() + done := make(chan struct{}) + context.AfterFunc(ctx, func() { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() - slog.Info("Shutting down server") - _ = server.Shutdown(ctx) - slog.Info("Server stopped") + logger.Infof("Shutting down server") + _ = httpServer.Shutdown(ctx) + logger.Infof("Server stopped") + close(done) }) - if err := server.Serve(listener); err != nil && !errors.Is(err, http.ErrServerClosed) { + if err = httpServer.Serve(listener); !errors.Is(err, http.ErrServerClosed) { return fmt.Errorf("server error: %w", err) } + <-done return nil } - -func (s *server) Close() { - s.client.Close(true) - s.events.Close() -} From f602040671a1f967c72b8598b838a6c7d2b9f211 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 19 Jul 2024 10:45:50 -0700 Subject: [PATCH 036/270] chore: already cached performance improvements --- go.mod | 4 ++-- go.sum | 8 ++++---- pkg/engine/cmd.go | 2 +- pkg/env/env.go | 4 ++++ pkg/llm/registry.go | 29 +++++++++++++++++++++++++++++ pkg/loader/url.go | 19 ++++++++++++++++++- 6 files changed, 58 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index f0213f7e..1e379045 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/tui v0.0.0-20240716053605-ecddbcf60eac + github.com/gptscript-ai/tui v0.0.0-20240722014329-d50b5ac5db74 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -63,7 +63,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/gptscript-ai/go-gptscript v0.9.3-0.20240715172623-8176fb20c5cb // indirect + github.com/gptscript-ai/go-gptscript v0.9.3-0.20240722014125-d757d09f606b // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hexops/autogold v1.3.1 // indirect diff --git a/go.sum b/go.sum index 598a161e..881c5073 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240715172623-8176fb20c5cb h1:xeSbO4mLYnoTg7diNW0tpxY/0yDSSdgjohMzwE4Za6k= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240715172623-8176fb20c5cb/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240716053605-ecddbcf60eac h1:zZ993dp2mx/63JD4THwMeBcn3C8SogcLeQRJUZsMSM4= -github.com/gptscript-ai/tui v0.0.0-20240716053605-ecddbcf60eac/go.mod h1:Ex2xQMzTMfb5UgLz9rctATPps8DnfPeJQh8o/AiQCoE= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240722014125-d757d09f606b h1:Hxu8oPE43uQ2sZ7P+9yGSX9bXh0RoJfOgvY/SlCwFlM= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240722014125-d757d09f606b/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240722014329-d50b5ac5db74 h1:69BENZCN2y4BCxmPjMRp+ZQ47ay4i5gRgREKZatu5oE= +github.com/gptscript-ai/tui v0.0.0-20240722014329-d50b5ac5db74/go.mod h1:sP/9g7+nLq65aGef5F30AEG+Cuu4BwlglUYv1Pzps4Y= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index e0ea5e3a..9e4b94fc 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -96,7 +96,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate instructions = append(instructions, inputContext.Content) } var extraEnv = []string{ - strings.TrimSpace(fmt.Sprintf("GPTSCRIPT_CONTEXT=%s", strings.Join(instructions, "\n"))), + strings.TrimSpace("GPTSCRIPT_CONTEXT=" + strings.Join(instructions, "\n")), } cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input) diff --git a/pkg/env/env.go b/pkg/env/env.go index 7c8bd7c7..bedd5f9d 100644 --- a/pkg/env/env.go +++ b/pkg/env/env.go @@ -60,6 +60,10 @@ func AppendPath(env []string, binPath string) []string { // Lookup will try to find bin in the PATH in env. It will refer to PATHEXT for Windows support. // If bin can not be resolved to anything the original bin string is returned. func Lookup(env []string, bin string) string { + if strings.Contains(bin, string(filepath.Separator)) { + return bin + } + for _, env := range env { for _, prefix := range []string{"PATH=", "Path="} { suffix, ok := strings.CutPrefix(env, prefix) diff --git a/pkg/llm/registry.go b/pkg/llm/registry.go index ba648f58..c568b43c 100644 --- a/pkg/llm/registry.go +++ b/pkg/llm/registry.go @@ -7,6 +7,7 @@ import ( "sort" "github.com/gptscript-ai/gptscript/pkg/openai" + "github.com/gptscript-ai/gptscript/pkg/remote" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -41,11 +42,39 @@ func (r *Registry) ListModels(ctx context.Context, providers ...string) (result return result, nil } +func (r *Registry) fastPath(modelName string) Client { + // This is optimization hack to avoid doing List Models + if len(r.clients) != 2 { + return nil + } + + _, modelFromProvider := types.SplitToolRef(modelName) + if modelFromProvider != "" { + return nil + } + + _, ok := r.clients[0].(*openai.Client) + if !ok { + return nil + } + + _, ok = r.clients[1].(*remote.Client) + if !ok { + return nil + } + + return r.clients[0] +} + func (r *Registry) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { if messageRequest.Model == "" { return nil, fmt.Errorf("model is required") } + if c := r.fastPath(messageRequest.Model); c != nil { + return c.Call(ctx, messageRequest, status) + } + var errs []error var oaiClient *openai.Client for _, client := range r.clients { diff --git a/pkg/loader/url.go b/pkg/loader/url.go index 2035469e..41400790 100644 --- a/pkg/loader/url.go +++ b/pkg/loader/url.go @@ -7,6 +7,7 @@ import ( "net/http" url2 "net/url" "path" + "regexp" "strings" "time" @@ -33,6 +34,14 @@ type cacheValue struct { Time time.Time } +func (c *cacheKey) isStatic() bool { + return c.Repo != nil && + c.Repo.Revision != "" && + stableRef.MatchString(c.Repo.Revision) +} + +var stableRef = regexp.MustCompile("^([a-f0-9]{7,40}$|v[0-9]|[0-9])") + func loadURL(ctx context.Context, cache *cache.Client, base *source, name string) (*source, bool, error) { var ( repo *types.Repo @@ -47,9 +56,17 @@ func loadURL(ctx context.Context, cache *cache.Client, base *source, name string cachedValue cacheValue ) + if cachedKey.Repo == nil { + if _, rev, ok := strings.Cut(name, "@"); ok && stableRef.MatchString(rev) { + cachedKey.Repo = &types.Repo{ + Revision: rev, + } + } + } + if ok, err := cache.Get(ctx, cachedKey, &cachedValue); err != nil { return nil, false, err - } else if ok && time.Since(cachedValue.Time) < CacheTimeout { + } else if ok && (cachedKey.isStatic() || time.Since(cachedValue.Time) < CacheTimeout) { return cachedValue.Source, true, nil } From 3c5d2903a4d8fb26d64fe2b5ab6f79a04c6d17bb Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sun, 21 Jul 2024 18:44:38 -0700 Subject: [PATCH 037/270] chore: only add agent tools for chat tools --- pkg/types/tool.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index ad483984..82effad4 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -660,10 +660,12 @@ func (t Tool) addContextExportedTools(prg Program, result *toolRefSet) error { func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { result := toolRefSet{} - for _, agent := range agentGroup { - // don't add yourself - if agent.ToolID != t.ID { - result.Add(agent) + if t.Chat { + for _, agent := range agentGroup { + // don't add yourself + if agent.ToolID != t.ID { + result.Add(agent) + } } } From f3194f7d599f5b3c575ce9ec83e37510af9e344c Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sun, 21 Jul 2024 22:03:48 -0700 Subject: [PATCH 038/270] chore: add log message for installing uv --- pkg/repos/runtimes/python/python.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index a5268f31..c031cb16 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -22,7 +22,7 @@ import ( //go:embed python.json var releasesData []byte -const uvVersion = "uv==0.2.3" +const uvVersion = "uv==0.2.27" type Release struct { OS string `json:"os,omitempty"` @@ -185,6 +185,7 @@ func (r *Runtime) runPip(ctx context.Context, toolSource, binDir string, env []s } func (r *Runtime) setupUV(ctx context.Context, tmp string) error { + log.InfofCtx(ctx, "Install uv %s", uvVersion) cmd := debugcmd.New(ctx, pythonCmd(tmp), "-m", "pip", "install", uvVersion) return cmd.Run() } From 13c31fd93ec0a559c1df0a8493808c2bbf597869 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 22 Jul 2024 12:28:27 -0400 Subject: [PATCH 039/270] fix: use default config in embedded SDK server Signed-off-by: Donnie Adams --- pkg/gptscript/gptscript.go | 4 ++-- pkg/sdkserver/server.go | 20 +++++++++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index d25915b1..462ee8b5 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -51,7 +51,7 @@ type Options struct { Env []string } -func complete(opts ...Options) Options { +func Complete(opts ...Options) Options { var result Options for _, opt := range opts { result.Cache = cache.Complete(result.Cache, opt.Cache) @@ -80,7 +80,7 @@ func complete(opts ...Options) Options { } func New(ctx context.Context, o ...Options) (*GPTScript, error) { - opts := complete(o...) + opts := Complete(o...) registry := llm.NewRegistry() cacheClient, err := cache.New(opts.Cache) diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index d0ca5a5f..26e449fe 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -55,7 +55,9 @@ func Run(ctx context.Context, opts Options) error { // EmbeddedStart allows running the server as an embedded process that may use Stdin for input. // It returns the address the server is listening on. -func EmbeddedStart(ctx context.Context, opts Options) (string, error) { +func EmbeddedStart(ctx context.Context, options ...Options) (string, error) { + opts := complete(options...) + listener, err := newListener(opts) if err != nil { return "", err @@ -138,3 +140,19 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { <-done return nil } + +func complete(opts ...Options) Options { + var result Options + + for _, opt := range opts { + result.Options = gptscript.Complete(result.Options, opt.Options) + result.ListenAddress = types.FirstSet(opt.ListenAddress, result.ListenAddress) + result.Debug = types.FirstSet(opt.Debug, result.Debug) + } + + if result.ListenAddress == "" { + result.ListenAddress = "127.0.0.1:0" + } + + return result +} From 78b02e33c3bea5fad77e24eeefd3289f14b68c3a Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 22 Jul 2024 15:19:09 -0400 Subject: [PATCH 040/270] feat: add ability to disable server error logging Signed-off-by: Donnie Adams --- pkg/sdkserver/server.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 26e449fe..b1dffdef 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "log" "net" "net/http" "os" @@ -24,8 +25,9 @@ import ( type Options struct { gptscript.Options - ListenAddress string - Debug bool + ListenAddress string + Debug bool + DisableServerErrorLogging bool } // Run will start the server and block until the server is shut down. @@ -121,6 +123,10 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { ), } + if opts.DisableServerErrorLogging { + httpServer.ErrorLog = log.New(io.Discard, "", 0) + } + logger := mvl.Package() done := make(chan struct{}) context.AfterFunc(ctx, func() { From b5f2aebf9a75799fd38e10f86a07bf4f1d07b6cc Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 22 Jul 2024 18:19:03 -0400 Subject: [PATCH 041/270] feat: add a "contact us" link to the docs nav bar Signed-off-by: Donnie Adams --- docs/docusaurus.config.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index c48b8ac9..6a0eb3bc 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -65,6 +65,11 @@ const config = { label: "Discord", position: "right", }, + { + href: "mailto:info@acorn.io?subject=Reaching out from GPTScript Docs", + label: "Contact Us", + position: "right", + }, { href: "https://tools.gptscript.ai/", label: "Tool Search", From d54d11b7d57762f706e8d864acaf8a68cf3260bd Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 23 Jul 2024 15:08:01 -0400 Subject: [PATCH 042/270] fix: normalize LLM-hallucinated tool names (#656) Signed-off-by: Grant Linville --- pkg/engine/engine.go | 2 +- pkg/tests/runner_test.go | 2 +- pkg/tests/testdata/TestMissingTool/call1-resp.golden | 2 +- pkg/tests/testdata/TestMissingTool/call2.golden | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 0ea72ff6..d3daa674 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -396,7 +396,7 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { } if toolID == "" { log.Debugf("failed to find tool id for tool %s in tool_call result", content.ToolCall.Function.Name) - toolID = content.ToolCall.Function.Name + toolID = types.ToolNormalizer(content.ToolCall.Function.Name) missing = true } state.Pending[content.ToolCall.ID] = *content.ToolCall diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index a38de6a2..70d5346c 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -954,7 +954,7 @@ func TestMissingTool(t *testing.T) { r.RespondWith(tester.Result{ Func: types.CompletionFunctionCall{ - Name: "not bob", + Name: "not.bob", }, }) diff --git a/pkg/tests/testdata/TestMissingTool/call1-resp.golden b/pkg/tests/testdata/TestMissingTool/call1-resp.golden index c9799ee8..4f4e82a0 100644 --- a/pkg/tests/testdata/TestMissingTool/call1-resp.golden +++ b/pkg/tests/testdata/TestMissingTool/call1-resp.golden @@ -5,7 +5,7 @@ "toolCall": { "id": "call_1", "function": { - "name": "not bob" + "name": "not.bob" } } } diff --git a/pkg/tests/testdata/TestMissingTool/call2.golden b/pkg/tests/testdata/TestMissingTool/call2.golden index 2fe99e81..c24d83f8 100644 --- a/pkg/tests/testdata/TestMissingTool/call2.golden +++ b/pkg/tests/testdata/TestMissingTool/call2.golden @@ -35,7 +35,7 @@ "toolCall": { "id": "call_1", "function": { - "name": "not bob" + "name": "not.bob" } } } @@ -46,13 +46,13 @@ "role": "tool", "content": [ { - "text": "ERROR: can not call unknown tool named [not bob]" + "text": "ERROR: can not call unknown tool named [notBob]" } ], "toolCall": { "id": "call_1", "function": { - "name": "not bob" + "name": "not.bob" } }, "usage": {} From 777f80815434ce62653db241ac2c35401919ba6c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 23 Jul 2024 15:42:29 -0400 Subject: [PATCH 043/270] fix: set disable error logging when completing sdk server config Signed-off-by: Donnie Adams --- pkg/sdkserver/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index b1dffdef..4ef28267 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -154,6 +154,7 @@ func complete(opts ...Options) Options { result.Options = gptscript.Complete(result.Options, opt.Options) result.ListenAddress = types.FirstSet(opt.ListenAddress, result.ListenAddress) result.Debug = types.FirstSet(opt.Debug, result.Debug) + result.DisableServerErrorLogging = types.FirstSet(opt.DisableServerErrorLogging, result.DisableServerErrorLogging) } if result.ListenAddress == "" { From 1b6811916413be5e0516b419e14edbd8736df8d1 Mon Sep 17 00:00:00 2001 From: Sangeetha Hariharan Date: Wed, 17 Jul 2024 16:51:38 -0700 Subject: [PATCH 044/270] Add faq for github rate limit --- docs/docs/09-faqs.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index b849e1d0..00f26700 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -53,3 +53,11 @@ This tells the LLM (by way of a [system message](https://platform.openai.com/doc This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. This is because if a tool intends to interact with the workspace, it minimally needs these tools. +### I'm hitting GitHub's rate limit for unauthenticated requests when using GPTScript. + +By default, GPTScript makes unauthenticated requests to GitHub when pulling tools. Since GitHub's rate limits for unauthenticated requests are fairly low, running into them when developing with GPTScript is a common issue. To avoid this, you can get GPTScript to make authenticated requests -- which have higher rate limits -- by setting the `GITHUB_AUTH_TOKEN` environment variable to your github account's PAT (Personal Access Token). +If you're already authenticated with the `gh` CLI, you can use its token by running: + +```bash +export GITHUB_AUTH_TOKEN="$(gh auth token)" +``` From 9d739fc7086d3f7abfffbf1378422e3ef6ca5616 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Sat, 27 Jul 2024 11:42:18 -0400 Subject: [PATCH 045/270] feat: add force sequential option to SDK server Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 1 + pkg/sdkserver/types.go | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index e17a2d1a..e0977c9e 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -202,6 +202,7 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { // Set the monitor factory so that we can get events from the server. MonitorFactory: NewSessionFactory(s.events), CredentialOverrides: reqObject.CredentialOverrides, + Sequential: reqObject.ForceSequential, }, } diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 478c6565..06119c35 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -62,6 +62,7 @@ type toolOrFileRequest struct { CredentialOverrides []string `json:"credentialOverrides"` Confirm bool `json:"confirm"` Location string `json:"location,omitempty"` + ForceSequential bool `json:"forceSequential"` } type content struct { From 36c961cf2d06fec872b62547d617233fd2451c80 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sat, 27 Jul 2024 21:57:06 -0700 Subject: [PATCH 046/270] chore: bump tui --- go.mod | 4 ++-- go.sum | 8 ++++---- pkg/cli/gptscript.go | 9 ++++++--- pkg/parser/parser.go | 10 +++++----- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 1e379045..9feaef4f 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,8 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/tui v0.0.0-20240722014329-d50b5ac5db74 + github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6 + github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -63,7 +64,6 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/gptscript-ai/go-gptscript v0.9.3-0.20240722014125-d757d09f606b // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hexops/autogold v1.3.1 // indirect diff --git a/go.sum b/go.sum index 881c5073..9c288064 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240722014125-d757d09f606b h1:Hxu8oPE43uQ2sZ7P+9yGSX9bXh0RoJfOgvY/SlCwFlM= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240722014125-d757d09f606b/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240722014329-d50b5ac5db74 h1:69BENZCN2y4BCxmPjMRp+ZQ47ay4i5gRgREKZatu5oE= -github.com/gptscript-ai/tui v0.0.0-20240722014329-d50b5ac5db74/go.mod h1:sP/9g7+nLq65aGef5F30AEG+Cuu4BwlglUYv1Pzps4Y= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6 h1:hF9Q8KdQhuoXSGKVh4ywRvwn5RJt9rbPraigpXqbGYU= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1 h1:gJXswjjwoiWdOS+s73mliWbN9dyJpiUkb3T+EiV7EFc= +github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1/go.mod h1:Llh3vi87gyry6j/sgJxhkHHvgv9uQRzEiMWuQtmpW1w= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index f539f516..c22a25d2 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -13,6 +13,7 @@ import ( "github.com/fatih/color" "github.com/gptscript-ai/cmd" + gptscript2 "github.com/gptscript-ai/go-gptscript" "github.com/gptscript-ai/gptscript/pkg/assemble" "github.com/gptscript-ai/gptscript/pkg/auth" "github.com/gptscript-ai/gptscript/pkg/builtin" @@ -464,9 +465,11 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { if !r.DisableTUI && !r.Debug && !r.DebugMessages && !r.NoTrunc { // Don't use cmd.Context() because then sigint will cancel everything return tui.Run(context.Background(), args[0], tui.RunOptions{ - OpenAIAPIKey: r.OpenAIOptions.APIKey, - OpenAIBaseURL: r.OpenAIOptions.BaseURL, - DefaultModel: r.DefaultModel, + ClientOpts: &gptscript2.GlobalOptions{ + OpenAIAPIKey: r.OpenAIOptions.APIKey, + OpenAIBaseURL: r.OpenAIOptions.BaseURL, + DefaultModel: r.DefaultModel, + }, TrustedRepoPrefixes: []string{"github.com/gptscript-ai"}, DisableCache: r.DisableCache, CredentialOverrides: r.CredentialOverride, diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index f7c750c1..d12f838e 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -101,23 +101,23 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { return false, err } tool.Parameters.Chat = v - case "export", "exporttool", "exports", "exporttools", "sharetool", "sharetools": + case "export", "exporttool", "exports", "exporttools", "sharetool", "sharetools", "sharedtool", "sharedtools": tool.Parameters.Export = append(tool.Parameters.Export, csv(value)...) case "tool", "tools": tool.Parameters.Tools = append(tool.Parameters.Tools, csv(value)...) case "inputfilter", "inputfilters": tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(value)...) - case "shareinputfilter", "shareinputfilters": + case "shareinputfilter", "shareinputfilters", "sharedinputfilter", "sharedinputfilters": tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(value)...) case "outputfilter", "outputfilters": tool.Parameters.OutputFilters = append(tool.Parameters.OutputFilters, csv(value)...) - case "shareoutputfilter", "shareoutputfilters": + case "shareoutputfilter", "shareoutputfilters", "sharedoutputfilter", "sharedoutputfilters": tool.Parameters.ExportOutputFilters = append(tool.Parameters.ExportOutputFilters, csv(value)...) case "agent", "agents": tool.Parameters.Agents = append(tool.Parameters.Agents, csv(value)...) case "globaltool", "globaltools": tool.Parameters.GlobalTools = append(tool.Parameters.GlobalTools, csv(value)...) - case "exportcontext", "exportcontexts", "sharecontext", "sharecontexts": + case "exportcontext", "exportcontexts", "sharecontext", "sharecontexts", "sharedcontext", "sharedcontexts": tool.Parameters.ExportContext = append(tool.Parameters.ExportContext, csv(value)...) case "context": tool.Parameters.Context = append(tool.Parameters.Context, csv(value)...) @@ -148,7 +148,7 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { } case "credentials", "creds", "credential", "cred": tool.Parameters.Credentials = append(tool.Parameters.Credentials, value) - case "sharecredentials", "sharecreds", "sharecredential", "sharecred": + case "sharecredentials", "sharecreds", "sharecredential", "sharecred", "sharedcredentials", "sharedcreds", "sharedcredential", "sharedcred": tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, value) default: return false, nil From 313ee1cf39e1771ff3f2e3806bd7bf7037c111b7 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 29 Jul 2024 15:29:57 -0700 Subject: [PATCH 047/270] chore: add --default-model-provider This change allow you to set a default model provider where before the default could only be a url/api pair for OpenAI. Setting this also implicitly disables the configured openai provider configured with the --openai* flags --- .../04-command-line-reference/gptscript.md | 63 ++++++++++--------- .../gptscript_eval.md | 43 ++++++------- .../gptscript_fmt.md | 43 ++++++------- .../gptscript_parse.md | 43 ++++++------- pkg/cli/gptscript.go | 48 +++++++------- pkg/gptscript/gptscript.go | 42 +++++++------ pkg/remote/remote.go | 46 +++++++++----- pkg/sdkserver/routes.go | 1 + pkg/sdkserver/types.go | 23 +++---- 9 files changed, 188 insertions(+), 164 deletions(-) diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 0c485603..6cd3feb6 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -12,37 +12,38 @@ gptscript [flags] PROGRAM_FILE [INPUT...] ### Options ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) - --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) - -h, --help help for gptscript - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) - --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) - --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) - --ui Launch the UI ($GPTSCRIPT_UI) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) + --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) + -h, --help help for gptscript + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) + --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) + --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) + --ui Launch the UI ($GPTSCRIPT_UI) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 0fdd0249..ff9e6446 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -25,27 +25,28 @@ gptscript eval [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index 5780c838..7aceb957 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -19,27 +19,28 @@ gptscript fmt [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index 680aebf6..3d84622b 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -19,27 +19,28 @@ gptscript parse [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index c22a25d2..84d7de71 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -54,24 +54,25 @@ type GPTScript struct { Output string `usage:"Save output to a file, or - for stdout" short:"o"` EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` - SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` - Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` - ListModels bool `usage:"List the models available and exit" local:"true"` - ListTools bool `usage:"List built-in tools and exit" local:"true"` - ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` - Chdir string `usage:"Change current working directory" short:"C"` - Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` - Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` - CredentialContext string `usage:"Context name in which to store credentials" default:"default"` - CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` - ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` - ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` - ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` - Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` - UI bool `usage:"Launch the UI" local:"true" name:"ui"` - DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` - SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` + SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` + Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` + ListModels bool `usage:"List the models available and exit" local:"true"` + ListTools bool `usage:"List built-in tools and exit" local:"true"` + ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` + Chdir string `usage:"Change current working directory" short:"C"` + Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` + Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` + CredentialContext string `usage:"Context name in which to store credentials" default:"default"` + CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` + ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` + ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` + ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` + Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` + UI bool `usage:"Launch the UI" local:"true" name:"ui"` + DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` + SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"` readData []byte } @@ -136,11 +137,12 @@ func (r *GPTScript) NewGPTScriptOpts() (gptscript.Options, error) { CredentialOverrides: r.CredentialOverride, Sequential: r.ForceSequential, }, - Quiet: r.Quiet, - Env: os.Environ(), - CredentialContext: r.CredentialContext, - Workspace: r.Workspace, - DisablePromptServer: r.UI, + Quiet: r.Quiet, + Env: os.Environ(), + CredentialContext: r.CredentialContext, + Workspace: r.Workspace, + DisablePromptServer: r.UI, + DefaultModelProvider: r.DefaultModelProvider, } if r.Confirm { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 462ee8b5..43f429fc 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -40,15 +40,16 @@ type GPTScript struct { } type Options struct { - Cache cache.Options - OpenAI openai.Options - Monitor monitor.Options - Runner runner.Options - CredentialContext string - Quiet *bool - Workspace string - DisablePromptServer bool - Env []string + Cache cache.Options + OpenAI openai.Options + Monitor monitor.Options + Runner runner.Options + DefaultModelProvider string + CredentialContext string + Quiet *bool + Workspace string + DisablePromptServer bool + Env []string } func Complete(opts ...Options) Options { @@ -64,6 +65,7 @@ func Complete(opts ...Options) Options { result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) result.Env = append(result.Env, opt.Env...) result.DisablePromptServer = types.FirstSet(opt.DisablePromptServer, result.DisablePromptServer) + result.DefaultModelProvider = types.FirstSet(opt.DefaultModelProvider, result.DefaultModelProvider) } if result.Quiet == nil { @@ -106,16 +108,18 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { return nil, err } - oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ - Cache: cacheClient, - SetSeed: true, - }) - if err != nil { - return nil, err - } + if opts.DefaultModelProvider == "" { + oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ + Cache: cacheClient, + SetSeed: true, + }) + if err != nil { + return nil, err + } - if err := registry.AddClient(oaiClient); err != nil { - return nil, err + if err := registry.AddClient(oaiClient); err != nil { + return nil, err + } } if opts.Runner.MonitorFactory == nil { @@ -143,7 +147,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { fullEnv := append(opts.Env, extraEnv...) - remoteClient := remote.New(runner, fullEnv, cacheClient, credStore) + remoteClient := remote.New(runner, fullEnv, cacheClient, credStore, opts.DefaultModelProvider) if err := registry.AddClient(remoteClient); err != nil { closeServer() return nil, err diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 6cb3644e..8b9d2162 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -22,21 +22,23 @@ import ( ) type Client struct { - clientsLock sync.Mutex - cache *cache.Client - clients map[string]*openai.Client - models map[string]*openai.Client - runner *runner.Runner - envs []string - credStore credentials.CredentialStore + clientsLock sync.Mutex + cache *cache.Client + clients map[string]*openai.Client + models map[string]*openai.Client + runner *runner.Runner + envs []string + credStore credentials.CredentialStore + defaultProvider string } -func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credentials.CredentialStore) *Client { +func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credentials.CredentialStore, defaultProvider string) *Client { return &Client{ - cache: cache, - runner: r, - envs: envs, - credStore: credStore, + cache: cache, + runner: r, + envs: envs, + credStore: credStore, + defaultProvider: defaultProvider, } } @@ -73,13 +75,23 @@ func (c *Client) ListModels(ctx context.Context, providers ...string) (result [] return } -func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { - toolName, modelNameSuffix := types.SplitToolRef(modelName) - if modelNameSuffix == "" { +func (c *Client) parseModel(modelString string) (modelName, providerName string) { + toolName, subTool := types.SplitToolRef(modelString) + if subTool == "" { + // This is just a plain model string "gpt4o" + return toolName, c.defaultProvider + } + // This is a provider string "modelName from provider" + return subTool, toolName +} + +func (c *Client) Supports(ctx context.Context, modelString string) (bool, error) { + _, providerName := c.parseModel(modelString) + if providerName == "" { return false, nil } - client, err := c.load(ctx, toolName) + client, err := c.load(ctx, providerName) if err != nil { return false, err } @@ -91,7 +103,7 @@ func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { c.models = map[string]*openai.Client{} } - c.models[modelName] = client + c.models[modelString] = client return true, nil } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index e0977c9e..4bed2b37 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -204,6 +204,7 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { CredentialOverrides: reqObject.CredentialOverrides, Sequential: reqObject.ForceSequential, }, + DefaultModelProvider: reqObject.DefaultModelProvider, } if reqObject.Confirm { diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 06119c35..a07cfb9e 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -52,17 +52,18 @@ type toolOrFileRequest struct { cacheOptions `json:",inline"` openAIOptions `json:",inline"` - ToolDefs toolDefs `json:"toolDefs,inline"` - SubTool string `json:"subTool"` - Input string `json:"input"` - ChatState string `json:"chatState"` - Workspace string `json:"workspace"` - Env []string `json:"env"` - CredentialContext string `json:"credentialContext"` - CredentialOverrides []string `json:"credentialOverrides"` - Confirm bool `json:"confirm"` - Location string `json:"location,omitempty"` - ForceSequential bool `json:"forceSequential"` + ToolDefs toolDefs `json:"toolDefs,inline"` + SubTool string `json:"subTool"` + Input string `json:"input"` + ChatState string `json:"chatState"` + Workspace string `json:"workspace"` + Env []string `json:"env"` + CredentialContext string `json:"credentialContext"` + CredentialOverrides []string `json:"credentialOverrides"` + Confirm bool `json:"confirm"` + Location string `json:"location,omitempty"` + ForceSequential bool `json:"forceSequential"` + DefaultModelProvider string `json:"defaultModelProvider,omitempty"` } type content struct { From d0e11307efcbaed0f74a4d305db59d296ff98cd3 Mon Sep 17 00:00:00 2001 From: Atulpriya Sharma Date: Tue, 30 Jul 2024 07:35:12 +0530 Subject: [PATCH 048/270] Add GPTReview Jenkins example --- examples/gptreview-jenkins/Jenkinsfile | 56 +++++++++++++++++++++++ examples/gptreview-jenkins/README.md | 31 +++++++++++++ examples/gptreview-jenkins/codereview.gpt | 26 +++++++++++ 3 files changed, 113 insertions(+) create mode 100644 examples/gptreview-jenkins/Jenkinsfile create mode 100644 examples/gptreview-jenkins/README.md create mode 100644 examples/gptreview-jenkins/codereview.gpt diff --git a/examples/gptreview-jenkins/Jenkinsfile b/examples/gptreview-jenkins/Jenkinsfile new file mode 100644 index 00000000..b65780d7 --- /dev/null +++ b/examples/gptreview-jenkins/Jenkinsfile @@ -0,0 +1,56 @@ +pipeline { + agent any + + stages { + stage('Clean Workspace') { + steps { + deleteDir() + } + } + + stage('GPT Review') { + steps { + script { + checkout([ + $class: 'GitSCM', + branches: [[name: '*/main']], // Specify branch + userRemoteConfigs: [[ + url: '' // Provide the URL for your repo that has the codereview.gpt file. + ]] + ]) + + withCredentials([string(credentialsId: 'OPENAI_API_KEY', variable: 'OPENAI_API_KEY')]){ + withCredentials([string(credentialsId: 'GH_TOKEN', variable: 'GH_TOKEN')]) { + // GPTSCript reviews the code + REVIEW = sh(script: "gptscript codereview.gpt --PR_URL=${PR_URL}", returnStdout: true).trim() + + // Construct the JSON payload using Groovy's JSON library + def jsonPayload = groovy.json.JsonOutput.toJson([body: REVIEW]) + + // Post the review comment to the GitHub PR + sh "curl -H \"Authorization: token ${GH_TOKEN}\" -H \"Content-Type: application/json\" -X POST -d '${jsonPayload}' '${PR_COMMENTS_URL}'" + } + } + } + } + } + + stage('Check PR Status') { + steps { + script { + // Check if REVIEW contains 'Require Changes' + if (REVIEW.contains('Require Changes')) { + echo 'Code Requires Changes' + currentBuild.result = 'FAILURE' // Mark the build as failed + error 'Code Requires Changes' // Terminate the build with an error + } + + // Check if REVIEW contains 'Approved' + if (REVIEW.contains('Approved')) { + echo 'Code Approved' + } + } + } + } + } +} \ No newline at end of file diff --git a/examples/gptreview-jenkins/README.md b/examples/gptreview-jenkins/README.md new file mode 100644 index 00000000..60fdb663 --- /dev/null +++ b/examples/gptreview-jenkins/README.md @@ -0,0 +1,31 @@ +# GPTReview With Jenkins + +This folder contains an example of building and implementing your own code reviewer as part of Jenkins Pipeline. + +Below are the files present here: + +- `codereview.gpt`: Contains the GPTScript code and prompts. +- `Jenkinsfile`: Jenkins pipeline file. + +## Pre-requisites + +- An OpenAI API Key. +- GitHub repository. +- Jenkins. +- [GPTScript](https://github.com/gptscript-ai/gptscript) and [GH](https://github.com/cli/cli) CLI installed on the system running Jenkins. + +## How To Run This Example + +- Create a new repository in your GitHub account and create a `codereview.gpt` file in the root of that repo based on the contents provided in this file. +- Configure Jenkins: + - Install required plugins - [GitHub](https://plugins.jenkins.io/github/), [Generic Webhook Trigger Plugin](https://plugins.jenkins.io/generic-webhook-trigger/) & [HTTP Request Plugin](https://plugins.jenkins.io/http_request/). + - Create a Pipeline + - Configure the “Open_AI_API” and “GH_TOKEN” environment variables + +- Congfigure GitHub: + - Setup up Webhook by providing your Jenkins pipeline URL: `http:///generic-webhook-trigger/invoke?token=` + - Add `Jenkinsfile` in the root of the repo. *Note: Replace the repository URL with your repo URL in the Jenkinsfile provided.* + +- Executing the Script: + - Create a new branch, and add some code file to the repository and open a new pull request. + - The Jenkins pipeline will trigger and our GPTReview will review your code and provide review comments. diff --git a/examples/gptreview-jenkins/codereview.gpt b/examples/gptreview-jenkins/codereview.gpt new file mode 100644 index 00000000..f2502b50 --- /dev/null +++ b/examples/gptreview-jenkins/codereview.gpt @@ -0,0 +1,26 @@ +Name: Code Reviewer +Description: A tool to help you perform code review of open PRs +Context: learn-gh +Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write +Args: PR_URL: The GitHub PR_URL + +You have the gh cli available to you. Use it to perform code review for a pr from the $(repo) provided. + +Perform the following steps in order: +1. Identify the files changed in the pull request ($PR_URL) using the pr number and perform a diff. + 1. Analyze the complete code of each identified file and perform a detailed line by line code review. + 2. Repeat the process for each changed file in the pr. +2. Share your review comments separately for each file. +3. In a new line write "Code: Approved" or "Code: Require Changes" based on the review comments. +--- +Name: learn-gh +Description: A tool to help you learn gh cli + +#!/usr/bin/env bash + +echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." +gh --help +gh repo --help +gh pr --help +gh pr checkout --help +gh pr diff --help From 3bf1c22adb9229cbd704b56c7ca6ce9038ebad7b Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 30 Jul 2024 08:56:38 -0400 Subject: [PATCH 049/270] chore: remove homebrew-release step Since gptscript is on the homebrew autobump list, the formula can't be updated manually. Signed-off-by: Donnie Adams --- .github/workflows/release.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b34c2fad..4c1c22bf 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -40,16 +40,6 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAP_GITHUB_TOKEN: ${{ secrets.TAP_GITHUB_TOKEN }} GORELEASER_CURRENT_TAG: ${{ github.ref_name }} - homebrew-release: - needs: release-tag - if: "! contains(github.ref_name, '-rc')" - runs-on: ubuntu-latest - steps: - - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v3 - with: - token: ${{secrets.BREW_GH_TOKEN}} - formula: gptscript winget-release: needs: release-tag if: "! contains(github.ref_name, '-rc')" From 76042bd382685a8f536a7e48d666ae7f5a8229d1 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 30 Jul 2024 14:41:31 -0400 Subject: [PATCH 050/270] feat: better handling of OpenAPI tools (#667) Signed-off-by: Grant Linville --- go.mod | 3 + go.sum | 7 + pkg/engine/openapi.go | 498 +++++------------- pkg/engine/openapi_test.go | 31 +- pkg/loader/loader.go | 124 ++--- pkg/loader/loader_test.go | 7 +- pkg/loader/openapi.go | 147 +++++- pkg/loader/openapi_test.go | 39 ++ .../testdata/openapi/TestOpenAPIv2.golden | 6 +- .../openapi/TestOpenAPIv2Revamp.golden | 116 ++++ .../testdata/openapi/TestOpenAPIv3.golden | 6 +- .../TestOpenAPIv3NoOperationIDs.golden | 6 +- .../TestOpenAPIv3NoOperationIDsRevamp.golden | 116 ++++ .../openapi/TestOpenAPIv3Revamp.golden | 116 ++++ pkg/openapi/getschema.go | 285 ++++++++++ pkg/openapi/list.go | 68 +++ pkg/openapi/load.go | 121 +++++ pkg/openapi/run.go | 451 ++++++++++++++++ pkg/openapi/security.go | 56 ++ 19 files changed, 1704 insertions(+), 499 deletions(-) create mode 100644 pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden create mode 100644 pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden create mode 100644 pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden create mode 100644 pkg/openapi/getschema.go create mode 100644 pkg/openapi/list.go create mode 100644 pkg/openapi/load.go create mode 100644 pkg/openapi/run.go create mode 100644 pkg/openapi/security.go diff --git a/go.mod b/go.mod index 9feaef4f..de545abd 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 github.com/tidwall/gjson v1.17.1 + github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc golang.org/x/sync v0.7.0 golang.org/x/term v0.20.0 @@ -101,6 +102,8 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/ulikunitz/xz v0.5.10 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect diff --git a/go.sum b/go.sum index 9c288064..5a6ce6cf 100644 --- a/go.sum +++ b/go.sum @@ -317,6 +317,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -336,6 +337,12 @@ github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95 github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= diff --git a/pkg/engine/openapi.go b/pkg/engine/openapi.go index 2e338ca4..0bd5f599 100644 --- a/pkg/engine/openapi.go +++ b/pkg/engine/openapi.go @@ -8,83 +8,148 @@ import ( "mime/multipart" "net/http" "net/url" + "os" "strings" "github.com/gptscript-ai/gptscript/pkg/env" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/tidwall/gjson" - "golang.org/x/exp/maps" ) -var ( - SupportedMIMETypes = []string{"application/json", "text/plain", "multipart/form-data"} - SupportedSecurityTypes = []string{"apiKey", "http"} -) +func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error) { + envMap := make(map[string]string, len(e.Env)) + for _, env := range e.Env { + k, v, _ := strings.Cut(env, "=") + envMap[k] = v + } -type Parameter struct { - Name string `json:"name"` - Style string `json:"style"` - Explode *bool `json:"explode"` -} + _, inst, _ := strings.Cut(tool.Instructions, types.OpenAPIPrefix+" ") + args := strings.Fields(inst) -// A SecurityInfo represents a security scheme in OpenAPI. -type SecurityInfo struct { - Name string `json:"name"` // name as defined in the security schemes - Type string `json:"type"` // http or apiKey - Scheme string `json:"scheme"` // bearer or basic, for type==http - APIKeyName string `json:"apiKeyName"` // name of the API key, for type==apiKey - In string `json:"in"` // header, query, or cookie, for type==apiKey -} + if len(args) != 3 { + return nil, fmt.Errorf("expected 3 arguments to %s", types.OpenAPIPrefix) + } -func (i SecurityInfo) GetCredentialToolStrings(hostname string) []string { - vars := i.getCredentialNamesAndEnvVars(hostname) - var tools []string - - for cred, v := range vars { - field := "value" - switch i.Type { - case "apiKey": - field = i.APIKeyName - case "http": - if i.Scheme == "bearer" { - field = "bearer token" - } else { - if strings.Contains(v, "PASSWORD") { - field = "password" - } else { - field = "username" - } + command := args[0] + source := args[1] + filter := args[2] + + var res *Return + switch command { + case openapi.ListTool: + t, err := openapi.Load(source) + if err != nil { + return nil, fmt.Errorf("failed to load OpenAPI file %s: %w", source, err) + } + + opList, err := openapi.List(t, filter) + if err != nil { + return nil, fmt.Errorf("failed to list operations: %w", err) + } + + opListJSON, err := json.MarshalIndent(opList, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal operation list: %w", err) + } + + res = &Return{ + Result: ptr(string(opListJSON)), + } + case openapi.GetSchemaTool: + operation := gjson.Get(input, "operation").String() + + if filter != "" && filter != openapi.NoFilter { + match, err := openapi.MatchFilters(strings.Split(filter, "|"), operation) + if err != nil { + return nil, err + } else if !match { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil } } - tools = append(tools, fmt.Sprintf("github.com/gptscript-ai/credential as %s with %s as env and %q as message and %q as field", - cred, v, "Please provide a value for the "+v+" environment variable", field)) - } - return tools -} + t, err := openapi.Load(source) + if err != nil { + return nil, fmt.Errorf("failed to load OpenAPI file %s: %w", source, err) + } -func (i SecurityInfo) getCredentialNamesAndEnvVars(hostname string) map[string]string { - if i.Type == "http" && i.Scheme == "basic" { - return map[string]string{ - hostname + i.Name + "Username": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_USERNAME", - hostname + i.Name + "Password": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_PASSWORD", + var defaultHost string + if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + u, err := url.Parse(source) + if err != nil { + return nil, fmt.Errorf("failed to parse server URL %s: %w", source, err) + } + defaultHost = u.Scheme + "://" + u.Hostname() + } + + schema, _, found, err := openapi.GetSchema(operation, defaultHost, t) + if err != nil { + return nil, fmt.Errorf("failed to get schema: %w", err) + } + if !found { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil + } + + schemaJSON, err := json.MarshalIndent(schema, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal schema: %w", err) + } + + res = &Return{ + Result: ptr(string(schemaJSON)), + } + case openapi.RunTool: + operation := gjson.Get(input, "operation").String() + args := gjson.Get(input, "args").String() + + if filter != "" && filter != openapi.NoFilter { + match, err := openapi.MatchFilters(strings.Split(filter, "|"), operation) + if err != nil { + return nil, err + } else if !match { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil + } + } + + t, err := openapi.Load(source) + if err != nil { + return nil, fmt.Errorf("failed to load OpenAPI file %s: %w", source, err) + } + + var defaultHost string + if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + u, err := url.Parse(source) + if err != nil { + return nil, fmt.Errorf("failed to parse server URL %s: %w", source, err) + } + defaultHost = u.Scheme + "://" + u.Hostname() + } + + result, found, err := openapi.Run(operation, defaultHost, args, t, e.Env) + if err != nil { + return nil, fmt.Errorf("failed to run operation %s: %w", operation, err) + } else if !found { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil + } + + res = &Return{ + Result: ptr(result), } } - return map[string]string{ - hostname + i.Name: "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name), - } -} -type OpenAPIInstructions struct { - Server string `json:"server"` - Path string `json:"path"` - Method string `json:"method"` - BodyContentMIME string `json:"bodyContentMIME"` - SecurityInfos [][]SecurityInfo `json:"apiKeyInfos"` - QueryParameters []Parameter `json:"queryParameters"` - PathParameters []Parameter `json:"pathParameters"` - HeaderParameters []Parameter `json:"headerParameters"` - CookieParameters []Parameter `json:"cookieParameters"` + return res, nil } // runOpenAPI runs a tool that was generated from an OpenAPI definition. @@ -92,6 +157,10 @@ type OpenAPIInstructions struct { // The tools Instructions field will be in the format "#!sys.openapi '{Instructions JSON}'", // where {Instructions JSON} is a JSON string of type OpenAPIInstructions. func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { + if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { + return e.runOpenAPIRevamp(tool, input) + } + envMap := map[string]string{} for _, env := range e.Env { @@ -100,7 +169,7 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { } // Extract the instructions from the tool to determine server, path, method, etc. - var instructions OpenAPIInstructions + var instructions openapi.OperationInfo _, inst, _ := strings.Cut(tool.Instructions, types.OpenAPIPrefix+" ") inst = strings.TrimPrefix(inst, "'") inst = strings.TrimSuffix(inst, "'") @@ -109,7 +178,7 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { } // Handle path parameters - instructions.Path = handlePathParameters(instructions.Path, instructions.PathParameters, input) + instructions.Path = openapi.HandlePathParameters(instructions.Path, instructions.PathParams, input) // Parse the URL path, err := url.JoinPath(instructions.Server, instructions.Path) @@ -131,7 +200,7 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { // Check for authentication (only if using HTTPS or localhost) if u.Scheme == "https" || u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" { if len(instructions.SecurityInfos) > 0 { - if err := handleAuths(req, envMap, instructions.SecurityInfos); err != nil { + if err := openapi.HandleAuths(req, envMap, instructions.SecurityInfos); err != nil { return nil, fmt.Errorf("error setting up authentication: %w", err) } } @@ -145,11 +214,11 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { } // Handle query parameters - req.URL.RawQuery = handleQueryParameters(req.URL.Query(), instructions.QueryParameters, input).Encode() + req.URL.RawQuery = openapi.HandleQueryParameters(req.URL.Query(), instructions.QueryParams, input).Encode() // Handle header and cookie parameters - handleHeaderParameters(req, instructions.HeaderParameters, input) - handleCookieParameters(req, instructions.CookieParameters, input) + openapi.HandleHeaderParameters(req, instructions.HeaderParams, input) + openapi.HandleCookieParameters(req, instructions.CookieParams, input) // Handle request body if instructions.BodyContentMIME != "" { @@ -217,299 +286,6 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { }, nil } -// handleAuths will set up the request with the necessary authentication information. -// A set of sets of SecurityInfo is passed in, where each represents a possible set of security options. -func handleAuths(req *http.Request, envMap map[string]string, infoSets [][]SecurityInfo) error { - var missingVariables [][]string - - // We need to find a set of infos where we have all the needed environment variables. - for _, infoSet := range infoSets { - var missing []string // Keep track of any missing environment variables - for _, info := range infoSet { - vars := info.getCredentialNamesAndEnvVars(req.URL.Hostname()) - - for _, envName := range vars { - if _, ok := envMap[envName]; !ok { - missing = append(missing, envName) - } - } - } - if len(missing) > 0 { - missingVariables = append(missingVariables, missing) - continue - } - - // We're using this info set, because no environment variables were missing. - // Set up the request as needed. - for _, info := range infoSet { - envNames := maps.Values(info.getCredentialNamesAndEnvVars(req.URL.Hostname())) - switch info.Type { - case "apiKey": - switch info.In { - case "header": - req.Header.Set(info.APIKeyName, envMap[envNames[0]]) - case "query": - v := url.Values{} - v.Add(info.APIKeyName, envMap[envNames[0]]) - req.URL.RawQuery = v.Encode() - case "cookie": - req.AddCookie(&http.Cookie{ - Name: info.APIKeyName, - Value: envMap[envNames[0]], - }) - } - case "http": - switch info.Scheme { - case "bearer": - req.Header.Set("Authorization", "Bearer "+envMap[envNames[0]]) - case "basic": - req.SetBasicAuth(envMap[envNames[0]], envMap[envNames[1]]) - } - } - } - return nil - } - - return fmt.Errorf("did not find the needed environment variables for any of the security options. "+ - "At least one of these sets of environment variables must be provided: %v", missingVariables) -} - -// handleQueryParameters extracts each query parameter from the input JSON and adds it to the URL query. -func handleQueryParameters(q url.Values, params []Parameter, input string) url.Values { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - // If it's an array or object, handle the serialization style - if res.IsArray() { - switch param.Style { - case "form", "": // form is the default style for query parameters - if param.Explode == nil || *param.Explode { // default is to explode - for _, item := range res.Array() { - q.Add(param.Name, item.String()) - } - } else { - var strs []string - for _, item := range res.Array() { - strs = append(strs, item.String()) - } - q.Add(param.Name, strings.Join(strs, ",")) - } - case "spaceDelimited": - if param.Explode == nil || *param.Explode { - for _, item := range res.Array() { - q.Add(param.Name, item.String()) - } - } else { - var strs []string - for _, item := range res.Array() { - strs = append(strs, item.String()) - } - q.Add(param.Name, strings.Join(strs, " ")) - } - case "pipeDelimited": - if param.Explode == nil || *param.Explode { - for _, item := range res.Array() { - q.Add(param.Name, item.String()) - } - } else { - var strs []string - for _, item := range res.Array() { - strs = append(strs, item.String()) - } - q.Add(param.Name, strings.Join(strs, "|")) - } - } - } else if res.IsObject() { - switch param.Style { - case "form", "": // form is the default style for query parameters - if param.Explode == nil || *param.Explode { // default is to explode - for k, v := range res.Map() { - q.Add(k, v.String()) - } - } else { - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - q.Add(param.Name, strings.Join(strs, ",")) - } - case "deepObject": - for k, v := range res.Map() { - q.Add(param.Name+"["+k+"]", v.String()) - } - } - } else { - q.Add(param.Name, res.String()) - } - } - } - return q -} - -// handlePathParameters extracts each path parameter from the input JSON and replaces its placeholder in the URL path. -func handlePathParameters(path string, params []Parameter, input string) string { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - // If it's an array or object, handle the serialization style - if res.IsArray() { - switch param.Style { - case "simple", "": // simple is the default style for path parameters - // simple looks the same regardless of whether explode is true - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) - case "label": - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - - if param.Explode == nil || !*param.Explode { // default is to not explode - path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) - } else { - path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, "."), 1) - } - case "matrix": - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - - if param.Explode == nil || !*param.Explode { // default is to not explode - path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) - } else { - s := "" - for _, str := range strs { - s += ";" + param.Name + "=" + str - } - path = strings.Replace(path, "{"+param.Name+"}", s, 1) - } - } - } else if res.IsObject() { - switch param.Style { - case "simple", "": - if param.Explode == nil || !*param.Explode { // default is to not explode - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) - } else { - var strs []string - for k, v := range res.Map() { - strs = append(strs, k+"="+v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) - } - case "label": - if param.Explode == nil || !*param.Explode { // default is to not explode - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) - } else { - s := "" - for k, v := range res.Map() { - s += "." + k + "=" + v.String() - } - path = strings.Replace(path, "{"+param.Name+"}", s, 1) - } - case "matrix": - if param.Explode == nil || !*param.Explode { // default is to not explode - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) - } else { - s := "" - for k, v := range res.Map() { - s += ";" + k + "=" + v.String() - } - path = strings.Replace(path, "{"+param.Name+"}", s, 1) - } - } - } else { - // Serialization is handled slightly differently even for basic types. - // Explode doesn't do anything though. - switch param.Style { - case "simple", "": - path = strings.Replace(path, "{"+param.Name+"}", res.String(), 1) - case "label": - path = strings.Replace(path, "{"+param.Name+"}", "."+res.String(), 1) - case "matrix": - path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+res.String(), 1) - } - } - } - } - return path -} - -// handleHeaderParameters extracts each header parameter from the input JSON and adds it to the request headers. -func handleHeaderParameters(req *http.Request, params []Parameter, input string) { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - if res.IsArray() { - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - req.Header.Add(param.Name, strings.Join(strs, ",")) - } else if res.IsObject() { - // Handle explosion - var strs []string - if param.Explode == nil || !*param.Explode { // default is to not explode - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - } else { - for k, v := range res.Map() { - strs = append(strs, k+"="+v.String()) - } - } - req.Header.Add(param.Name, strings.Join(strs, ",")) - } else { // basic type - req.Header.Add(param.Name, res.String()) - } - } - } -} - -// handleCookieParameters extracts each cookie parameter from the input JSON and adds it to the request cookies. -func handleCookieParameters(req *http.Request, params []Parameter, input string) { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - if res.IsArray() { - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - req.AddCookie(&http.Cookie{ - Name: param.Name, - Value: strings.Join(strs, ","), - }) - } else if res.IsObject() { - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - req.AddCookie(&http.Cookie{ - Name: param.Name, - Value: strings.Join(strs, ","), - }) - } else { // basic type - req.AddCookie(&http.Cookie{ - Name: param.Name, - Value: res.String(), - }) - } - } - } +func ptr[T any](t T) *T { + return &t } diff --git a/pkg/engine/openapi_test.go b/pkg/engine/openapi_test.go index df1e00fc..9fd5d34e 100644 --- a/pkg/engine/openapi_test.go +++ b/pkg/engine/openapi_test.go @@ -5,6 +5,7 @@ import ( "net/url" "testing" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/stretchr/testify/require" ) @@ -89,7 +90,7 @@ func TestPathParameterSerialization(t *testing.T) { t.Run(test.name, func(t *testing.T) { path := path params := getParameters(test.style, test.explode) - path = handlePathParameters(path, params, string(inputStr)) + path = openapi.HandlePathParameters(path, params, string(inputStr)) require.Contains(t, test.expectedPaths, path) }) } @@ -111,13 +112,13 @@ func TestQueryParameterSerialization(t *testing.T) { tests := []struct { name string input string - param Parameter + param openapi.Parameter expectedQueries []string // We use multiple expected queries due to randomness in map iteration }{ { name: "value", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "v", }, expectedQueries: []string{"v=42"}, @@ -125,7 +126,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array form + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "form", Explode: boolPointer(true), @@ -135,7 +136,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array form + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "form", Explode: boolPointer(false), @@ -145,7 +146,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array spaceDelimited + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "spaceDelimited", Explode: boolPointer(true), @@ -155,7 +156,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array spaceDelimited + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "spaceDelimited", Explode: boolPointer(false), @@ -165,7 +166,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array pipeDelimited + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "pipeDelimited", Explode: boolPointer(true), @@ -175,7 +176,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array pipeDelimited + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "pipeDelimited", Explode: boolPointer(false), @@ -185,7 +186,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "object form + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "o", Style: "form", Explode: boolPointer(true), @@ -198,7 +199,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "object form + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "o", Style: "form", Explode: boolPointer(false), @@ -211,7 +212,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "object deepObject", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "o", Style: "deepObject", }, @@ -224,14 +225,14 @@ func TestQueryParameterSerialization(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - q := handleQueryParameters(url.Values{}, []Parameter{test.param}, test.input) + q := openapi.HandleQueryParameters(url.Values{}, []openapi.Parameter{test.param}, test.input) require.Contains(t, test.expectedQueries, q.Encode()) }) } } -func getParameters(style string, explode bool) []Parameter { - return []Parameter{ +func getParameters(style string, explode bool) []openapi.Parameter { + return []openapi.Parameter{ { Name: "v", Style: style, diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index d7634058..3d2ae8ed 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -8,26 +8,23 @@ import ( "fmt" "io" "io/fs" + "os" "path" "path/filepath" - "strconv" "strings" "time" "unicode/utf8" - "github.com/getkin/kin-openapi/openapi2" - "github.com/getkin/kin-openapi/openapi2conv" "github.com/getkin/kin-openapi/openapi3" "github.com/gptscript-ai/gptscript/internal" "github.com/gptscript-ai/gptscript/pkg/assemble" "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/parser" "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" - "gopkg.in/yaml.v3" - kyaml "sigs.k8s.io/yaml" ) const CacheTimeout = time.Hour @@ -157,33 +154,8 @@ func loadOpenAPI(prg *types.Program, data []byte) *openapi3.T { prg.OpenAPICache = map[string]any{} } - switch isOpenAPI(data) { - case 2: - // Convert OpenAPI v2 to v3 - jsondata := data - if !json.Valid(data) { - jsondata, err = kyaml.YAMLToJSON(data) - if err != nil { - return nil - } - } - - doc := &openapi2.T{} - if err := doc.UnmarshalJSON(jsondata); err != nil { - return nil - } - - openAPIDocument, err = openapi2conv.ToV3(doc) - if err != nil { - return nil - } - case 3: - // Use OpenAPI v3 as is - openAPIDocument, err = openapi3.NewLoader().LoadFromData(data) - if err != nil { - return nil - } - default: + openAPIDocument, err = openapi.LoadFromBytes(data) + if err != nil { return nil } @@ -202,14 +174,18 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base return []types.Tool{tool}, nil } - var tools []types.Tool + var ( + tools []types.Tool + isOpenAPI bool + ) if openAPIDocument := loadOpenAPI(prg, data); openAPIDocument != nil { + isOpenAPI = true var err error if base.Remote { - tools, err = getOpenAPITools(openAPIDocument, base.Location) + tools, err = getOpenAPITools(openAPIDocument, base.Location, base.Location, targetToolName) } else { - tools, err = getOpenAPITools(openAPIDocument, "") + tools, err = getOpenAPITools(openAPIDocument, "", base.Name, targetToolName) } if err != nil { return nil, fmt.Errorf("error parsing OpenAPI definition: %w", err) @@ -257,10 +233,6 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base // Probably a better way to come up with an ID tool.ID = tool.Source.Location + ":" + tool.Name - if i == 0 && targetToolName == "" { - targetTools = append(targetTools, tool) - } - if i != 0 && tool.Parameters.Name == "" { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have no name")) } @@ -273,16 +245,35 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have global tools")) } - if targetToolName != "" && tool.Parameters.Name != "" { - if strings.EqualFold(tool.Parameters.Name, targetToolName) { + // Determine targetTools + if isOpenAPI && os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { + targetTools = append(targetTools, tool) + } else { + if i == 0 && targetToolName == "" { targetTools = append(targetTools, tool) - } else if strings.Contains(targetToolName, "*") { - match, err := filepath.Match(strings.ToLower(targetToolName), strings.ToLower(tool.Parameters.Name)) - if err != nil { - return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, err) - } - if match { + } + + if targetToolName != "" && tool.Parameters.Name != "" { + if strings.EqualFold(tool.Parameters.Name, targetToolName) { targetTools = append(targetTools, tool) + } else if strings.Contains(targetToolName, "*") { + var patterns []string + if strings.Contains(targetToolName, "|") { + patterns = strings.Split(targetToolName, "|") + } else { + patterns = []string{targetToolName} + } + + for _, pattern := range patterns { + match, err := filepath.Match(strings.ToLower(pattern), strings.ToLower(tool.Parameters.Name)) + if err != nil { + return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, err) + } + if match { + targetTools = append(targetTools, tool) + break + } + } } } } @@ -491,42 +482,3 @@ func input(ctx context.Context, cache *cache.Client, base *source, name string) return nil, fmt.Errorf("can not load tools path=%s name=%s", base.Path, name) } - -// isOpenAPI checks if the data is an OpenAPI definition and returns the version if it is. -func isOpenAPI(data []byte) int { - var fragment struct { - Paths map[string]any `json:"paths,omitempty"` - Swagger string `json:"swagger,omitempty"` - OpenAPI string `json:"openapi,omitempty"` - } - - if err := json.Unmarshal(data, &fragment); err != nil { - if err := yaml.Unmarshal(data, &fragment); err != nil { - return 0 - } - } - if len(fragment.Paths) == 0 { - return 0 - } - - if v, _, _ := strings.Cut(fragment.OpenAPI, "."); v != "" { - ver, err := strconv.Atoi(v) - if err != nil { - log.Debugf("invalid OpenAPI version: openapi=%q", fragment.OpenAPI) - return 0 - } - return ver - } - - if v, _, _ := strings.Cut(fragment.Swagger, "."); v != "" { - ver, err := strconv.Atoi(v) - if err != nil { - log.Debugf("invalid Swagger version: swagger=%q", fragment.Swagger) - return 0 - } - return ver - } - - log.Debugf("no OpenAPI version found in input data: openapi=%q, swagger=%q", fragment.OpenAPI, fragment.Swagger) - return 0 -} diff --git a/pkg/loader/loader_test.go b/pkg/loader/loader_test.go index d70c45f5..7c480034 100644 --- a/pkg/loader/loader_test.go +++ b/pkg/loader/loader_test.go @@ -10,6 +10,7 @@ import ( "path/filepath" "testing" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/hexops/autogold/v2" "github.com/stretchr/testify/require" ) @@ -53,17 +54,17 @@ Stuff func TestIsOpenAPI(t *testing.T) { datav2, err := os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - v := isOpenAPI(datav2) + v := openapi.IsOpenAPI(datav2) require.Equal(t, 2, v, "(yaml) expected openapi v2") datav2, err = os.ReadFile("testdata/openapi_v2.json") require.NoError(t, err) - v = isOpenAPI(datav2) + v = openapi.IsOpenAPI(datav2) require.Equal(t, 2, v, "(json) expected openapi v2") datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - v = isOpenAPI(datav3) + v = openapi.IsOpenAPI(datav3) require.Equal(t, 3, v, "(json) expected openapi v3") } diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index 45254c9d..bc469a4e 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/url" + "os" "regexp" "slices" "sort" @@ -11,7 +12,7 @@ import ( "time" "github.com/getkin/kin-openapi/openapi3" - "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -20,8 +21,12 @@ var toolNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_-]+`) // getOpenAPITools parses an OpenAPI definition and generates a set of tools from it. // Each operation will become a tool definition. // The tool's Instructions will be in the format "#!sys.openapi '{JSON Instructions}'", -// where the JSON Instructions are a JSON-serialized engine.OpenAPIInstructions struct. -func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { +// where the JSON Instructions are a JSON-serialized openapi.OperationInfo struct. +func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) ([]types.Tool, error) { + if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { + return getOpenAPIToolsRevamp(t, source, targetToolName) + } + if log.IsDebug() { start := time.Now() defer func() { @@ -51,7 +56,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { for _, item := range t.Security { current := map[string]struct{}{} for name := range item { - if scheme, ok := t.Components.SecuritySchemes[name]; ok && slices.Contains(engine.SupportedSecurityTypes, scheme.Value.Type) { + if scheme, ok := t.Components.SecuritySchemes[name]; ok && slices.Contains(openapi.GetSupportedSecurityTypes(), scheme.Value.Type) { current[name] = struct{}{} } } @@ -134,10 +139,10 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { // - C // D auths []map[string]struct{} - queryParameters []engine.Parameter - pathParameters []engine.Parameter - headerParameters []engine.Parameter - cookieParameters []engine.Parameter + queryParameters []openapi.Parameter + pathParameters []openapi.Parameter + headerParameters []openapi.Parameter + cookieParameters []openapi.Parameter bodyMIME string ) tool := types.Tool{ @@ -177,7 +182,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { } // Add the parameter to the appropriate list for the tool's instructions - p := engine.Parameter{ + p := openapi.Parameter{ Name: param.Value.Name, Style: param.Value.Style, Explode: param.Value.Explode, @@ -199,7 +204,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { for mime, content := range operation.RequestBody.Value.Content { // Each MIME type needs to be handled individually, so we // keep a list of the ones we support. - if !slices.Contains(engine.SupportedMIMETypes, mime) { + if !slices.Contains(openapi.GetSupportedMIMETypes(), mime) { continue } bodyMIME = mime @@ -250,18 +255,18 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { } // For each set of auths, turn them into SecurityInfos, and drop ones that contain unsupported types. - var infos [][]engine.SecurityInfo + var infos [][]openapi.SecurityInfo outer: for _, auth := range auths { - var current []engine.SecurityInfo + var current []openapi.SecurityInfo for name := range auth { if scheme, ok := t.Components.SecuritySchemes[name]; ok { - if !slices.Contains(engine.SupportedSecurityTypes, scheme.Value.Type) { + if !slices.Contains(openapi.GetSupportedSecurityTypes(), scheme.Value.Type) { // There is an unsupported type in this auth, so move on to the next one. continue outer } - current = append(current, engine.SecurityInfo{ + current = append(current, openapi.SecurityInfo{ Type: scheme.Value.Type, Name: name, In: scheme.Value.In, @@ -324,17 +329,17 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { return tools, nil } -func instructionString(server, method, path, bodyMIME string, queryParameters, pathParameters, headerParameters, cookieParameters []engine.Parameter, infos [][]engine.SecurityInfo) (string, error) { - inst := engine.OpenAPIInstructions{ - Server: server, - Path: path, - Method: method, - BodyContentMIME: bodyMIME, - SecurityInfos: infos, - QueryParameters: queryParameters, - PathParameters: pathParameters, - HeaderParameters: headerParameters, - CookieParameters: cookieParameters, +func instructionString(server, method, path, bodyMIME string, queryParameters, pathParameters, headerParameters, cookieParameters []openapi.Parameter, infos [][]openapi.SecurityInfo) (string, error) { + inst := openapi.OperationInfo{ + Server: server, + Path: path, + Method: method, + BodyContentMIME: bodyMIME, + SecurityInfos: infos, + QueryParams: queryParameters, + PathParams: pathParameters, + HeaderParams: headerParameters, + CookieParams: cookieParameters, } instBytes, err := json.Marshal(inst) if err != nil { @@ -362,3 +367,95 @@ func parseServer(server *openapi3.Server) (string, error) { } return s, nil } + +func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]types.Tool, error) { + if t == nil { + return nil, fmt.Errorf("OpenAPI spec is nil") + } else if t.Info == nil { + return nil, fmt.Errorf("OpenAPI spec is missing info field") + } + + if targetToolName == "" { + targetToolName = openapi.NoFilter + } + + list := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: types.ToolNormalizer("list-operations-" + t.Info.Title), + Description: fmt.Sprintf("List available operations for %s. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", t.Info.Title), + }, + Instructions: fmt.Sprintf("%s %s %s %s", types.OpenAPIPrefix, openapi.ListTool, source, targetToolName), + }, + Source: types.ToolSource{ + LineNo: 0, + }, + } + + getSchema := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: types.ToolNormalizer("get-schema-" + t.Info.Title), + Description: fmt.Sprintf("Get the JSONSchema for the arguments for an operation for %s. You must do this before you run the operation.", t.Info.Title), + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeObject}, + Properties: openapi3.Schemas{ + "operation": { + Value: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeString}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }, + }, + }, + }, + }, + Instructions: fmt.Sprintf("%s %s %s %s", types.OpenAPIPrefix, openapi.GetSchemaTool, source, targetToolName), + }, + Source: types.ToolSource{ + LineNo: 1, + }, + } + + run := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: types.ToolNormalizer("run-operation-" + t.Info.Title), + Description: fmt.Sprintf("Run an operation for %s. You MUST call %s for the operation before you use this tool.", t.Info.Title, openapi.GetSchemaTool), + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeObject}, + Properties: openapi3.Schemas{ + "operation": { + Value: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeString}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }, + }, + "args": { + Value: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeString}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }, + }, + }, + }, + }, + Instructions: fmt.Sprintf("%s %s %s %s", types.OpenAPIPrefix, openapi.RunTool, source, targetToolName), + }, + } + + exportTool := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Export: []string{list.Parameters.Name, getSchema.Parameters.Name, run.Parameters.Name}, + }, + }, + } + + return []types.Tool{exportTool, list, getSchema, run}, nil +} diff --git a/pkg/loader/openapi_test.go b/pkg/loader/openapi_test.go index d00ffcca..1a7eaa76 100644 --- a/pkg/loader/openapi_test.go +++ b/pkg/loader/openapi_test.go @@ -86,3 +86,42 @@ func TestOpenAPIv2(t *testing.T) { autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) } + +func TestOpenAPIv3Revamp(t *testing.T) { + os.Setenv("GPTSCRIPT_OPENAPI_REVAMP", "true") + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) +} + +func TestOpenAPIv3NoOperationIDsRevamp(t *testing.T) { + os.Setenv("GPTSCRIPT_OPENAPI_REVAMP", "true") + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) +} + +func TestOpenAPIv2Revamp(t *testing.T) { + os.Setenv("GPTSCRIPT_OPENAPI_REVAMP", "true") + prgv2 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav2, err := os.ReadFile("testdata/openapi_v2.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden index 90dd1967..39b0b2c1 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden @@ -38,7 +38,7 @@ types.ToolSet{ Description: "Create a pet", ModelName: "gpt-4o", }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":createPets", ToolMapping: map[string][]types.ToolReference{}, @@ -68,7 +68,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":listPets", ToolMapping: map[string][]types.ToolReference{}, @@ -95,7 +95,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, }, ID: ":showPetById", ToolMapping: map[string][]types.ToolReference{}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden new file mode 100644 index 00000000..ebe68cc2 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden @@ -0,0 +1,116 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + ModelName: "gpt-4o", + Export: []string{ + "listOperationsSwaggerPetstore", + "getSchemaSwaggerPetstore", + "runOperationSwaggerPetstore", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "getSchemaSwaggerPetstore": {{ + Reference: "getSchemaSwaggerPetstore", + ToolID: ":getSchemaSwaggerPetstore", + }}, + "listOperationsSwaggerPetstore": {{ + Reference: "listOperationsSwaggerPetstore", + ToolID: ":listOperationsSwaggerPetstore", + }}, + "runOperationSwaggerPetstore": {{ + Reference: "runOperationSwaggerPetstore", + ToolID: ":runOperationSwaggerPetstore", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":getSchemaSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "getSchemaSwaggerPetstore", + Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }}}, + }, + }, + Instructions: "#!sys.openapi get-schema ", + }, + ID: ":getSchemaSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":listOperationsSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listOperationsSwaggerPetstore", + Description: "List available operations for Swagger Petstore. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", + ModelName: "gpt-4o", + }, + Instructions: "#!sys.openapi list ", + }, + ID: ":listOperationsSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":runOperationSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "runOperationSwaggerPetstore", + Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }}, + "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }}, + }, + }, + }, + Instructions: "#!sys.openapi run ", + }, + ID: ":runOperationSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden index 72ccafae..37ac2fe2 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden @@ -63,7 +63,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":createPets", ToolMapping: map[string][]types.ToolReference{}, @@ -92,7 +92,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":listPets", ToolMapping: map[string][]types.ToolReference{}, @@ -119,7 +119,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, }, ID: ":showPetById", ToolMapping: map[string][]types.ToolReference{}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden index 3bcfd9e5..e950e19c 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden @@ -50,7 +50,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":get_pets", ToolMapping: map[string][]types.ToolReference{}, @@ -77,7 +77,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, }, ID: ":get_pets_petId", ToolMapping: map[string][]types.ToolReference{}, @@ -119,7 +119,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":post_pets", ToolMapping: map[string][]types.ToolReference{}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden new file mode 100644 index 00000000..ebe68cc2 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden @@ -0,0 +1,116 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + ModelName: "gpt-4o", + Export: []string{ + "listOperationsSwaggerPetstore", + "getSchemaSwaggerPetstore", + "runOperationSwaggerPetstore", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "getSchemaSwaggerPetstore": {{ + Reference: "getSchemaSwaggerPetstore", + ToolID: ":getSchemaSwaggerPetstore", + }}, + "listOperationsSwaggerPetstore": {{ + Reference: "listOperationsSwaggerPetstore", + ToolID: ":listOperationsSwaggerPetstore", + }}, + "runOperationSwaggerPetstore": {{ + Reference: "runOperationSwaggerPetstore", + ToolID: ":runOperationSwaggerPetstore", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":getSchemaSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "getSchemaSwaggerPetstore", + Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }}}, + }, + }, + Instructions: "#!sys.openapi get-schema ", + }, + ID: ":getSchemaSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":listOperationsSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listOperationsSwaggerPetstore", + Description: "List available operations for Swagger Petstore. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", + ModelName: "gpt-4o", + }, + Instructions: "#!sys.openapi list ", + }, + ID: ":listOperationsSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":runOperationSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "runOperationSwaggerPetstore", + Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }}, + "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }}, + }, + }, + }, + Instructions: "#!sys.openapi run ", + }, + ID: ":runOperationSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden new file mode 100644 index 00000000..ebe68cc2 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden @@ -0,0 +1,116 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + ModelName: "gpt-4o", + Export: []string{ + "listOperationsSwaggerPetstore", + "getSchemaSwaggerPetstore", + "runOperationSwaggerPetstore", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "getSchemaSwaggerPetstore": {{ + Reference: "getSchemaSwaggerPetstore", + ToolID: ":getSchemaSwaggerPetstore", + }}, + "listOperationsSwaggerPetstore": {{ + Reference: "listOperationsSwaggerPetstore", + ToolID: ":listOperationsSwaggerPetstore", + }}, + "runOperationSwaggerPetstore": {{ + Reference: "runOperationSwaggerPetstore", + ToolID: ":runOperationSwaggerPetstore", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":getSchemaSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "getSchemaSwaggerPetstore", + Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }}}, + }, + }, + Instructions: "#!sys.openapi get-schema ", + }, + ID: ":getSchemaSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":listOperationsSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listOperationsSwaggerPetstore", + Description: "List available operations for Swagger Petstore. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", + ModelName: "gpt-4o", + }, + Instructions: "#!sys.openapi list ", + }, + ID: ":listOperationsSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":runOperationSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "runOperationSwaggerPetstore", + Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }}, + "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }}, + }, + }, + }, + Instructions: "#!sys.openapi run ", + }, + ID: ":runOperationSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, +} diff --git a/pkg/openapi/getschema.go b/pkg/openapi/getschema.go new file mode 100644 index 00000000..3550afcf --- /dev/null +++ b/pkg/openapi/getschema.go @@ -0,0 +1,285 @@ +package openapi + +import ( + "encoding/json" + "fmt" + "slices" + "strings" + + "github.com/getkin/kin-openapi/openapi3" +) + +type Parameter struct { + Name string `json:"name"` + Style string `json:"style"` + Explode *bool `json:"explode"` +} + +type OperationInfo struct { + Server string `json:"server"` + Path string `json:"path"` + Method string `json:"method"` + BodyContentMIME string `json:"bodyContentMIME"` + SecurityInfos [][]SecurityInfo `json:"securityInfos"` + QueryParams []Parameter `json:"queryParameters"` + PathParams []Parameter `json:"pathParameters"` + HeaderParams []Parameter `json:"headerParameters"` + CookieParams []Parameter `json:"cookieParameters"` +} + +var ( + supportedMIMETypes = []string{"application/json", "application/x-www-form-urlencoded", "multipart/form-data"} + supportedSecurityTypes = []string{"apiKey", "http"} +) + +const GetSchemaTool = "get-schema" + +func GetSupportedMIMETypes() []string { + return supportedMIMETypes +} + +func GetSupportedSecurityTypes() []string { + return supportedSecurityTypes +} + +// GetSchema returns the JSONSchema and OperationInfo for a particular OpenAPI operation. +// Return values in order: JSONSchema (string), OperationInfo, found (bool), error. +func GetSchema(operationID, defaultHost string, t *openapi3.T) (string, OperationInfo, bool, error) { + arguments := &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{}, + Required: []string{}, + } + + info := OperationInfo{} + + // Determine the default server. + var ( + defaultServer = defaultHost + err error + ) + if len(t.Servers) > 0 { + defaultServer, err = parseServer(t.Servers[0]) + if err != nil { + return "", OperationInfo{}, false, err + } + } + + var globalSecurity []map[string]struct{} + if t.Security != nil { + for _, item := range t.Security { + current := map[string]struct{}{} + for name := range item { + if scheme, ok := t.Components.SecuritySchemes[name]; ok && slices.Contains(supportedSecurityTypes, scheme.Value.Type) { + current[name] = struct{}{} + } + } + if len(current) > 0 { + globalSecurity = append(globalSecurity, current) + } + } + } + + for path, pathItem := range t.Paths.Map() { + // Handle path-level server override, if one exists. + pathServer := defaultServer + if pathItem.Servers != nil && len(pathItem.Servers) > 0 { + pathServer, err = parseServer(pathItem.Servers[0]) + if err != nil { + return "", OperationInfo{}, false, err + } + } + + for method, operation := range pathItem.Operations() { + if operation.OperationID == operationID { + // Handle operation-level server override, if one exists. + operationServer := pathServer + if operation.Servers != nil && len(*operation.Servers) > 0 { + operationServer, err = parseServer((*operation.Servers)[0]) + if err != nil { + return "", OperationInfo{}, false, err + } + } + + info.Server = operationServer + info.Path = path + info.Method = method + + // We found our operation. Now we need to process it and build the arguments. + // Handle query, path, header, and cookie parameters first. + for _, param := range append(operation.Parameters, pathItem.Parameters...) { + removeRefs(param.Value.Schema) + arg := param.Value.Schema.Value + + if arg.Description == "" { + arg.Description = param.Value.Description + } + + // Store the arg + arguments.Properties[param.Value.Name] = &openapi3.SchemaRef{Value: arg} + + // Check whether it is required + if param.Value.Required { + arguments.Required = append(arguments.Required, param.Value.Name) + } + + // Save the parameter to the correct set of params. + p := Parameter{ + Name: param.Value.Name, + Style: param.Value.Style, + Explode: param.Value.Explode, + } + switch param.Value.In { + case "query": + info.QueryParams = append(info.QueryParams, p) + case "path": + info.PathParams = append(info.PathParams, p) + case "header": + info.HeaderParams = append(info.HeaderParams, p) + case "cookie": + info.CookieParams = append(info.CookieParams, p) + } + } + + // Next, handle the request body, if one exists. + if operation.RequestBody != nil { + for mime, content := range operation.RequestBody.Value.Content { + // Each MIME type needs to be handled individually, so we keep a list of the ones we support. + if !slices.Contains(supportedMIMETypes, mime) { + continue + } + info.BodyContentMIME = mime + + removeRefs(content.Schema) + + arg := content.Schema.Value + if arg.Description == "" { + arg.Description = content.Schema.Value.Description + } + + // Read Only cannot be sent in the request body, so we remove it + for key, property := range arg.Properties { + if property.Value.ReadOnly { + delete(arg.Properties, key) + } + } + + // Unfortunately, the request body doesn't contain any good descriptor for it, + // so we just use "requestBodyContent" as the name of the arg. + arguments.Properties["requestBodyContent"] = &openapi3.SchemaRef{Value: arg} + arguments.Required = append(arguments.Required, "requestBodyContent") + break + } + + if info.BodyContentMIME == "" { + return "", OperationInfo{}, false, fmt.Errorf("no supported MIME type found for request body in operation %s", operationID) + } + } + + // See if there is any auth defined for this operation + var ( + noAuth bool + auths []map[string]struct{} + ) + if operation.Security != nil { + if len(*operation.Security) == 0 { + noAuth = true + } + for _, req := range *operation.Security { + current := map[string]struct{}{} + for name := range req { + current[name] = struct{}{} + } + if len(current) > 0 { + auths = append(auths, current) + } + } + } + + // Use the global security if it was not overridden for this operation + if !noAuth && len(auths) == 0 { + auths = append(auths, globalSecurity...) + } + + // For each set of auths, turn them into SecurityInfos, and drop ones that contain unsupported types. + outer: + for _, auth := range auths { + var current []SecurityInfo + for name := range auth { + if scheme, ok := t.Components.SecuritySchemes[name]; ok { + if !slices.Contains(supportedSecurityTypes, scheme.Value.Type) { + // There is an unsupported type in this auth, so move on to the next one. + continue outer + } + + current = append(current, SecurityInfo{ + Type: scheme.Value.Type, + Name: name, + In: scheme.Value.In, + Scheme: scheme.Value.Scheme, + APIKeyName: scheme.Value.Name, + }) + } + } + + if len(current) > 0 { + info.SecurityInfos = append(info.SecurityInfos, current) + } + } + + argumentsJSON, err := json.MarshalIndent(arguments, "", " ") + if err != nil { + return "", OperationInfo{}, false, err + } + return string(argumentsJSON), info, true, nil + } + } + } + + return "", OperationInfo{}, false, nil +} + +func parseServer(server *openapi3.Server) (string, error) { + s := server.URL + for name, variable := range server.Variables { + if variable == nil { + continue + } + + if variable.Default != "" { + s = strings.Replace(s, "{"+name+"}", variable.Default, 1) + } else if len(variable.Enum) > 0 { + s = strings.Replace(s, "{"+name+"}", variable.Enum[0], 1) + } + } + + if !strings.HasPrefix(s, "http") { + return "", fmt.Errorf("invalid server URL: %s (must use HTTP or HTTPS; relative URLs not supported)", s) + } + return s, nil +} + +func removeRefs(r *openapi3.SchemaRef) { + if r == nil { + return + } + + r.Ref = "" + r.Value.Discriminator = nil // Discriminators are not very useful and can junk up the schema. + + for i := range r.Value.OneOf { + removeRefs(r.Value.OneOf[i]) + } + for i := range r.Value.AnyOf { + removeRefs(r.Value.AnyOf[i]) + } + for i := range r.Value.AllOf { + removeRefs(r.Value.AllOf[i]) + } + removeRefs(r.Value.Not) + removeRefs(r.Value.Items) + + for i := range r.Value.Properties { + removeRefs(r.Value.Properties[i]) + } +} diff --git a/pkg/openapi/list.go b/pkg/openapi/list.go new file mode 100644 index 00000000..857c7014 --- /dev/null +++ b/pkg/openapi/list.go @@ -0,0 +1,68 @@ +package openapi + +import ( + "path/filepath" + "strings" + + "github.com/getkin/kin-openapi/openapi3" +) + +type OperationList struct { + Operations map[string]Operation `json:"operations"` +} + +type Operation struct { + Description string `json:"description,omitempty"` + Summary string `json:"summary,omitempty"` +} + +const ( + ListTool = "list" + NoFilter = "" +) + +func List(t *openapi3.T, filter string) (OperationList, error) { + operations := make(map[string]Operation) + for _, pathItem := range t.Paths.Map() { + for _, operation := range pathItem.Operations() { + var ( + match bool + err error + ) + if filter != "" && filter != NoFilter { + if strings.Contains(filter, "*") { + match, err = MatchFilters(strings.Split(filter, "|"), operation.OperationID) + if err != nil { + return OperationList{}, err + } + } else { + match = operation.OperationID == filter + } + } else { + match = true + } + + if match { + operations[operation.OperationID] = Operation{ + Description: operation.Description, + Summary: operation.Summary, + } + } + } + } + + return OperationList{Operations: operations}, nil +} + +func MatchFilters(filters []string, operationID string) (bool, error) { + for _, filter := range filters { + match, err := filepath.Match(filter, operationID) + if err != nil { + return false, err + } + if match { + return true, nil + } + } + return false, nil +} diff --git a/pkg/openapi/load.go b/pkg/openapi/load.go new file mode 100644 index 00000000..0ff82fdb --- /dev/null +++ b/pkg/openapi/load.go @@ -0,0 +1,121 @@ +package openapi + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strconv" + "strings" + + "github.com/getkin/kin-openapi/openapi2" + "github.com/getkin/kin-openapi/openapi2conv" + "github.com/getkin/kin-openapi/openapi3" + "gopkg.in/yaml.v3" + kyaml "sigs.k8s.io/yaml" +) + +func Load(source string) (*openapi3.T, error) { + if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + return loadFromURL(source) + } + return loadFromFile(source) +} + +func loadFromURL(source string) (*openapi3.T, error) { + resp, err := http.DefaultClient.Get(source) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + contents, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return LoadFromBytes(contents) +} + +func loadFromFile(source string) (*openapi3.T, error) { + contents, err := os.ReadFile(source) + if err != nil { + return nil, err + } + + return LoadFromBytes(contents) +} + +func LoadFromBytes(content []byte) (*openapi3.T, error) { + var ( + openAPIDocument *openapi3.T + err error + ) + + switch IsOpenAPI(content) { + case 2: + // Convert OpenAPI v2 to v3 + if !json.Valid(content) { + content, err = kyaml.YAMLToJSON(content) + if err != nil { + return nil, err + } + } + + doc := &openapi2.T{} + if err := doc.UnmarshalJSON(content); err != nil { + return nil, fmt.Errorf("failed to unmarshal OpenAPI v2 document: %w", err) + } + + openAPIDocument, err = openapi2conv.ToV3(doc) + if err != nil { + return nil, fmt.Errorf("failed to convert OpenAPI v2 to v3: %w", err) + } + case 3: + openAPIDocument, err = openapi3.NewLoader().LoadFromData(content) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported OpenAPI version") + } + + return openAPIDocument, nil +} + +// IsOpenAPI checks if the data is an OpenAPI definition and returns the version if it is. +func IsOpenAPI(data []byte) int { + var fragment struct { + Paths map[string]any `json:"paths,omitempty"` + Swagger string `json:"swagger,omitempty"` + OpenAPI string `json:"openapi,omitempty"` + } + + if err := json.Unmarshal(data, &fragment); err != nil { + if err := yaml.Unmarshal(data, &fragment); err != nil { + return 0 + } + } + if len(fragment.Paths) == 0 { + return 0 + } + + if v, _, _ := strings.Cut(fragment.OpenAPI, "."); v != "" { + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver + } + + if v, _, _ := strings.Cut(fragment.Swagger, "."); v != "" { + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver + } + + return 0 +} diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go new file mode 100644 index 00000000..17199851 --- /dev/null +++ b/pkg/openapi/run.go @@ -0,0 +1,451 @@ +package openapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "os" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/gptscript-ai/gptscript/pkg/env" + "github.com/tidwall/gjson" + "github.com/xeipuuv/gojsonschema" + "golang.org/x/exp/maps" +) + +const RunTool = "run" + +func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (string, bool, error) { + envMap := make(map[string]string, len(envs)) + for _, e := range envs { + k, v, _ := strings.Cut(e, "=") + envMap[k] = v + } + + if args == "" { + args = "{}" + } + schemaJSON, opInfo, found, err := GetSchema(operationID, defaultHost, t) + if err != nil || !found { + return "", false, err + } + + // Validate args against the schema. + validationResult, err := gojsonschema.Validate(gojsonschema.NewStringLoader(schemaJSON), gojsonschema.NewStringLoader(args)) + if err != nil { + return "", false, err + } + + if !validationResult.Valid() { + return "", false, fmt.Errorf("invalid arguments for operation %s: %s", operationID, validationResult.Errors()) + } + + // Construct and execute the HTTP request. + + // Handle path parameters. + opInfo.Path = HandlePathParameters(opInfo.Path, opInfo.PathParams, args) + + // Parse the URL + path, err := url.JoinPath(opInfo.Server, opInfo.Path) + if err != nil { + return "", false, fmt.Errorf("failed to join server and path: %w", err) + } + + u, err := url.Parse(path) + if err != nil { + return "", false, fmt.Errorf("failed to parse server URL %s: %w", opInfo.Server+opInfo.Path, err) + } + + // Set up the request + req, err := http.NewRequest(opInfo.Method, u.String(), nil) + if err != nil { + return "", false, fmt.Errorf("failed to create request: %w", err) + } + + // Check for authentication (only if using HTTPS or localhost) + if u.Scheme == "https" || u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" { + if len(opInfo.SecurityInfos) > 0 { + if err := HandleAuths(req, envMap, opInfo.SecurityInfos); err != nil { + return "", false, fmt.Errorf("error setting up authentication: %w", err) + } + } + + // If there is a bearer token set for the whole server, and no Authorization header has been defined, use it. + if token, ok := envMap["GPTSCRIPT_"+env.ToEnvLike(u.Hostname())+"_BEARER_TOKEN"]; ok { + if req.Header.Get("Authorization") == "" { + req.Header.Set("Authorization", "Bearer "+token) + } + } + } else { + fmt.Fprintf(os.Stderr, "no auth") + } + + // Handle query parameters + req.URL.RawQuery = HandleQueryParameters(req.URL.Query(), opInfo.QueryParams, args).Encode() + + // Handle header and cookie parameters + HandleHeaderParameters(req, opInfo.HeaderParams, args) + HandleCookieParameters(req, opInfo.CookieParams, args) + + // Handle request body + if opInfo.BodyContentMIME != "" { + res := gjson.Get(args, "requestBodyContent") + var body bytes.Buffer + switch opInfo.BodyContentMIME { + case "application/json": + var reqBody any = struct{}{} + if res.Exists() { + reqBody = res.Value() + } + if err := json.NewEncoder(&body).Encode(reqBody); err != nil { + return "", false, fmt.Errorf("failed to encode JSON: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + case "text/plain": + reqBody := "" + if res.Exists() { + reqBody = res.String() + } + body.WriteString(reqBody) + + req.Header.Set("Content-Type", "text/plain") + + case "multipart/form-data": + multiPartWriter := multipart.NewWriter(&body) + req.Header.Set("Content-Type", multiPartWriter.FormDataContentType()) + if res.Exists() && res.IsObject() { + for k, v := range res.Map() { + if err := multiPartWriter.WriteField(k, v.String()); err != nil { + return "", false, fmt.Errorf("failed to write multipart field: %w", err) + } + } + } else { + return "", false, fmt.Errorf("multipart/form-data requires an object as the requestBodyContent") + } + if err := multiPartWriter.Close(); err != nil { + return "", false, fmt.Errorf("failed to close multipart writer: %w", err) + } + + default: + return "", false, fmt.Errorf("unsupported MIME type: %s", opInfo.BodyContentMIME) + } + req.Body = io.NopCloser(&body) + } + + // Make the request + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", false, fmt.Errorf("failed to make request: %w", err) + } + defer resp.Body.Close() + + result, err := io.ReadAll(resp.Body) + if err != nil { + return "", false, fmt.Errorf("failed to read response: %w", err) + } + + return string(result), true, nil +} + +// HandleAuths will set up the request with the necessary authentication information. +// A set of sets of SecurityInfo is passed in, where each represents a possible set of security options. +func HandleAuths(req *http.Request, envMap map[string]string, infoSets [][]SecurityInfo) error { + var missingVariables [][]string + + // We need to find a set of infos where we have all the needed environment variables. + for _, infoSet := range infoSets { + var missing []string // Keep track of any missing environment variables + for _, info := range infoSet { + vars := info.getCredentialNamesAndEnvVars(req.URL.Hostname()) + + for _, envName := range vars { + if _, ok := envMap[envName]; !ok { + missing = append(missing, envName) + } + } + } + if len(missing) > 0 { + missingVariables = append(missingVariables, missing) + continue + } + + // We're using this info set, because no environment variables were missing. + // Set up the request as needed. + for _, info := range infoSet { + envNames := maps.Values(info.getCredentialNamesAndEnvVars(req.URL.Hostname())) + switch info.Type { + case "apiKey": + switch info.In { + case "header": + req.Header.Set(info.APIKeyName, envMap[envNames[0]]) + case "query": + v := url.Values{} + v.Add(info.APIKeyName, envMap[envNames[0]]) + req.URL.RawQuery = v.Encode() + case "cookie": + req.AddCookie(&http.Cookie{ + Name: info.APIKeyName, + Value: envMap[envNames[0]], + }) + } + case "http": + switch info.Scheme { + case "bearer": + req.Header.Set("Authorization", "Bearer "+envMap[envNames[0]]) + case "basic": + req.SetBasicAuth(envMap[envNames[0]], envMap[envNames[1]]) + } + } + } + return nil + } + + return fmt.Errorf("did not find the needed environment variables for any of the security options. "+ + "At least one of these sets of environment variables must be provided: %v", missingVariables) +} + +// HandlePathParameters extracts each path parameter from the input JSON and replaces its placeholder in the URL path. +func HandlePathParameters(path string, params []Parameter, input string) string { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + // If it's an array or object, handle the serialization style + if res.IsArray() { + switch param.Style { + case "simple", "": // simple is the default style for path parameters + // simple looks the same regardless of whether explode is true + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) + case "label": + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + + if param.Explode == nil || !*param.Explode { // default is to not explode + path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) + } else { + path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, "."), 1) + } + case "matrix": + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + + if param.Explode == nil || !*param.Explode { // default is to not explode + path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) + } else { + s := "" + for _, str := range strs { + s += ";" + param.Name + "=" + str + } + path = strings.Replace(path, "{"+param.Name+"}", s, 1) + } + } + } else if res.IsObject() { + switch param.Style { + case "simple", "": + if param.Explode == nil || !*param.Explode { // default is to not explode + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) + } else { + var strs []string + for k, v := range res.Map() { + strs = append(strs, k+"="+v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) + } + case "label": + if param.Explode == nil || !*param.Explode { // default is to not explode + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) + } else { + s := "" + for k, v := range res.Map() { + s += "." + k + "=" + v.String() + } + path = strings.Replace(path, "{"+param.Name+"}", s, 1) + } + case "matrix": + if param.Explode == nil || !*param.Explode { // default is to not explode + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) + } else { + s := "" + for k, v := range res.Map() { + s += ";" + k + "=" + v.String() + } + path = strings.Replace(path, "{"+param.Name+"}", s, 1) + } + } + } else { + // Serialization is handled slightly differently even for basic types. + // Explode doesn't do anything though. + switch param.Style { + case "simple", "": + path = strings.Replace(path, "{"+param.Name+"}", res.String(), 1) + case "label": + path = strings.Replace(path, "{"+param.Name+"}", "."+res.String(), 1) + case "matrix": + path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+res.String(), 1) + } + } + } + } + return path +} + +// HandleQueryParameters extracts each query parameter from the input JSON and adds it to the URL query. +func HandleQueryParameters(q url.Values, params []Parameter, input string) url.Values { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + // If it's an array or object, handle the serialization style + if res.IsArray() { + switch param.Style { + case "form", "": // form is the default style for query parameters + if param.Explode == nil || *param.Explode { // default is to explode + for _, item := range res.Array() { + q.Add(param.Name, item.String()) + } + } else { + var strs []string + for _, item := range res.Array() { + strs = append(strs, item.String()) + } + q.Add(param.Name, strings.Join(strs, ",")) + } + case "spaceDelimited": + if param.Explode == nil || *param.Explode { + for _, item := range res.Array() { + q.Add(param.Name, item.String()) + } + } else { + var strs []string + for _, item := range res.Array() { + strs = append(strs, item.String()) + } + q.Add(param.Name, strings.Join(strs, " ")) + } + case "pipeDelimited": + if param.Explode == nil || *param.Explode { + for _, item := range res.Array() { + q.Add(param.Name, item.String()) + } + } else { + var strs []string + for _, item := range res.Array() { + strs = append(strs, item.String()) + } + q.Add(param.Name, strings.Join(strs, "|")) + } + } + } else if res.IsObject() { + switch param.Style { + case "form", "": // form is the default style for query parameters + if param.Explode == nil || *param.Explode { // default is to explode + for k, v := range res.Map() { + q.Add(k, v.String()) + } + } else { + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + q.Add(param.Name, strings.Join(strs, ",")) + } + case "deepObject": + for k, v := range res.Map() { + q.Add(param.Name+"["+k+"]", v.String()) + } + } + } else { + q.Add(param.Name, res.String()) + } + } + } + return q +} + +// HandleHeaderParameters extracts each header parameter from the input JSON and adds it to the request headers. +func HandleHeaderParameters(req *http.Request, params []Parameter, input string) { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + if res.IsArray() { + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + req.Header.Add(param.Name, strings.Join(strs, ",")) + } else if res.IsObject() { + // Handle explosion + var strs []string + if param.Explode == nil || !*param.Explode { // default is to not explode + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + } else { + for k, v := range res.Map() { + strs = append(strs, k+"="+v.String()) + } + } + req.Header.Add(param.Name, strings.Join(strs, ",")) + } else { // basic type + req.Header.Add(param.Name, res.String()) + } + } + } +} + +// HandleCookieParameters extracts each cookie parameter from the input JSON and adds it to the request cookies. +func HandleCookieParameters(req *http.Request, params []Parameter, input string) { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + if res.IsArray() { + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + req.AddCookie(&http.Cookie{ + Name: param.Name, + Value: strings.Join(strs, ","), + }) + } else if res.IsObject() { + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + req.AddCookie(&http.Cookie{ + Name: param.Name, + Value: strings.Join(strs, ","), + }) + } else { // basic type + req.AddCookie(&http.Cookie{ + Name: param.Name, + Value: res.String(), + }) + } + } + } +} diff --git a/pkg/openapi/security.go b/pkg/openapi/security.go new file mode 100644 index 00000000..dd4521fc --- /dev/null +++ b/pkg/openapi/security.go @@ -0,0 +1,56 @@ +package openapi + +import ( + "fmt" + "strings" + + "github.com/gptscript-ai/gptscript/pkg/env" +) + +// A SecurityInfo represents a security scheme in OpenAPI. +type SecurityInfo struct { + Name string `json:"name"` // name as defined in the security schemes + Type string `json:"type"` // http or apiKey + Scheme string `json:"scheme"` // bearer or basic, for type==http + APIKeyName string `json:"apiKeyName"` // name of the API key, for type==apiKey + In string `json:"in"` // header, query, or cookie, for type==apiKey +} + +func (i SecurityInfo) GetCredentialToolStrings(hostname string) []string { + vars := i.getCredentialNamesAndEnvVars(hostname) + var tools []string + + for cred, v := range vars { + field := "value" + switch i.Type { + case "apiKey": + field = i.APIKeyName + case "http": + if i.Scheme == "bearer" { + field = "bearer token" + } else { + if strings.Contains(v, "PASSWORD") { + field = "password" + } else { + field = "username" + } + } + } + + tools = append(tools, fmt.Sprintf("github.com/gptscript-ai/credential as %s with %s as env and %q as message and %q as field", + cred, v, "Please provide a value for the "+v+" environment variable", field)) + } + return tools +} + +func (i SecurityInfo) getCredentialNamesAndEnvVars(hostname string) map[string]string { + if i.Type == "http" && i.Scheme == "basic" { + return map[string]string{ + hostname + i.Name + "Username": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_USERNAME", + hostname + i.Name + "Password": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_PASSWORD", + } + } + return map[string]string{ + hostname + i.Name: "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name), + } +} From 1bc416dc15ee28ede6e2d692be1e0c47bcd8ac30 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 31 Jul 2024 12:19:13 -0400 Subject: [PATCH 051/270] fix: stop timing out SDK runs after 15 minutes Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 4bed2b37..2e709e3f 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -1,7 +1,6 @@ package sdkserver import ( - "context" "encoding/json" "fmt" "io" @@ -10,7 +9,6 @@ import ( "sort" "strings" "sync" - "time" "github.com/gptscript-ai/broadcaster" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -26,8 +24,6 @@ import ( "github.com/gptscript-ai/gptscript/pkg/version" ) -const toolRunTimeout = 15 * time.Minute - type server struct { gptscriptOpts gptscript.Options address, token string @@ -158,8 +154,6 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { ctx := gserver.ContextWithNewRunID(r.Context()) runID := gserver.RunIDFromContext(ctx) - ctx, cancel := context.WithTimeout(ctx, toolRunTimeout) - defer cancel() // Ensure chat state is not empty. if reqObject.ChatState == "" { From 7646c4b848d5b70be8f01d75412b9f906bda150f Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 31 Jul 2024 11:23:25 -0400 Subject: [PATCH 052/270] fix: ensure default model is parsed when using default model provider When using the default model provider flag, the default model will be gpt-4o. However, when it is parsed, the expected format is 'gpt-4p from my-model-provider'. When using a default model provider, the 'gpt-4o' was being parsed and the model was set to the empty string. After this change, if the model name is empty after parsing, then we know that the model from the request doesn't have a 'from' in it and the first part of the split reference should be used. Signed-off-by: Donnie Adams --- pkg/remote/remote.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 8b9d2162..6d83e6cc 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -51,7 +51,12 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return nil, fmt.Errorf("failed to find remote model %s", messageRequest.Model) } - _, modelName := types.SplitToolRef(messageRequest.Model) + toolName, modelName := types.SplitToolRef(messageRequest.Model) + if modelName == "" { + // modelName is empty, then the messageRequest.Model is not of the form 'modelName from provider' + // Therefore, the modelName is the toolName + modelName = toolName + } messageRequest.Model = modelName return client.Call(ctx, messageRequest, status) } From 7f207a7c8b24dee86cdded99f32265fb781cd508 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 31 Jul 2024 18:40:04 -0400 Subject: [PATCH 053/270] feat: add support for default model provider in the TUI Signed-off-by: Donnie Adams --- go.mod | 4 ++-- go.sum | 8 ++++---- pkg/cli/gptscript.go | 7 ++++--- pkg/sdkserver/types.go | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index de545abd..858bfa65 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6 - github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1 + github.com/gptscript-ai/go-gptscript v0.9.3-0.20240731222146-b67275f3fa69 + github.com/gptscript-ai/tui v0.0.0-20240731002102-544a80108f89 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index 5a6ce6cf..53f72056 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6 h1:hF9Q8KdQhuoXSGKVh4ywRvwn5RJt9rbPraigpXqbGYU= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1 h1:gJXswjjwoiWdOS+s73mliWbN9dyJpiUkb3T+EiV7EFc= -github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1/go.mod h1:Llh3vi87gyry6j/sgJxhkHHvgv9uQRzEiMWuQtmpW1w= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240731222146-b67275f3fa69 h1:c+Tf6I8jUg8hDgfP8jKs93UcC9dDIGxClWGZUL36Hd0= +github.com/gptscript-ai/go-gptscript v0.9.3-0.20240731222146-b67275f3fa69/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240731002102-544a80108f89 h1:1G8OhXzCqCe/LARec8Qb7XkpQiEWoRYE/2UfohD+Do4= +github.com/gptscript-ai/tui v0.0.0-20240731002102-544a80108f89/go.mod h1:Llh3vi87gyry6j/sgJxhkHHvgv9uQRzEiMWuQtmpW1w= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 84d7de71..4b2fab92 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -468,9 +468,10 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // Don't use cmd.Context() because then sigint will cancel everything return tui.Run(context.Background(), args[0], tui.RunOptions{ ClientOpts: &gptscript2.GlobalOptions{ - OpenAIAPIKey: r.OpenAIOptions.APIKey, - OpenAIBaseURL: r.OpenAIOptions.BaseURL, - DefaultModel: r.DefaultModel, + OpenAIAPIKey: r.OpenAIOptions.APIKey, + OpenAIBaseURL: r.OpenAIOptions.BaseURL, + DefaultModel: r.DefaultModel, + DefaultModelProvider: r.DefaultModelProvider, }, TrustedRepoPrefixes: []string{"github.com/gptscript-ai"}, DisableCache: r.DisableCache, diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index a07cfb9e..9736f045 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -63,7 +63,7 @@ type toolOrFileRequest struct { Confirm bool `json:"confirm"` Location string `json:"location,omitempty"` ForceSequential bool `json:"forceSequential"` - DefaultModelProvider string `json:"defaultModelProvider,omitempty"` + DefaultModelProvider string `json:"DefaultModelProvider,omitempty"` } type content struct { From 6e0a50776269446900b8297b0c7cefdb2f54cf45 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Thu, 1 Aug 2024 06:46:48 +0200 Subject: [PATCH 054/270] chore (docs): add some newer fields to the gpt file reference (#683) Signed-off-by: Thorsten Klein --- docs/docs/02-examples/01-cli.md | 12 ++++---- docs/docs/02-examples/04-local-files.md | 4 +-- docs/docs/03-tools/03-openapi.md | 3 +- docs/docs/03-tools/05-context.md | 6 ++-- docs/docs/03-tools/06-how-it-works.md | 3 +- docs/docs/03-tools/07-gpt-file-reference.md | 34 ++++++++++++--------- docs/docs/05-alternative-model-providers.md | 6 ++-- 7 files changed, 37 insertions(+), 31 deletions(-) diff --git a/docs/docs/02-examples/01-cli.md b/docs/docs/02-examples/01-cli.md index 7a59f592..17c67244 100644 --- a/docs/docs/02-examples/01-cli.md +++ b/docs/docs/02-examples/01-cli.md @@ -157,7 +157,7 @@ Agents: k8s-agent, github-agent Context: shared-context Chat: true -Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You donlt need to start off by guiding them. +Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You don't need to start off by guiding them. ``` By being at the top of the file, this tool will serve as the script's entrypoint. Here are the parts of this tool that are worth additional explanation: @@ -201,14 +201,14 @@ Context: shared-context Agents: k8s-agent, github-agent Chat: true -Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You donlt need to start off by guiding them. +Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You don't need to start off by guiding them. --- Name: k8s-agent Description: An agent that can help you with your Kubernetes cluster by executing kubectl commands Context: shared-context Tools: sys.exec -Parameter: task: The kubectl releated task to accomplish +Parameter: task: The kubectl related task to accomplish Chat: true You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. @@ -268,15 +268,15 @@ By now you should notice a simple pattern emerging that you can follow to add yo ``` Name: {your cli}-agent Description: An agent to help you with {your taks} related tasks using the gh cli -Context: {here's your biggest decsion to make}, shared-context +Context: {here's your biggest decision to make}, shared-context Tools: sys.exec -Parameter: task: The {your task}The GitHub task to accomplish +Parameter: task: The {your task} to accomplish Chat: true You have the {your cli} cli available to you. Use it to accomplish the tasks that the user asks of you. ``` -You can drop in your task and CLI and have a fairly functional CLI-based chat agent. The biggest decision you'll need to make is what and how much context to give your agent. For well-known for CLIs/technologies like kubectl and Kubernetes, you probably won't need a custom context. For custom CLIs, you'll definitely need to help the LLM out. The best approach is to experiment and see what works best. +You can drop in your task and CLI and have a fairly functional CLI-based chat agent. The biggest decision you'll need to make is what and how much context to give your agent. For well-known CLIs/technologies like kubectl and Kubernetes, you probably won't need a custom context. For custom CLIs, you'll definitely need to help the LLM out. The best approach is to experiment and see what works best. ## Next steps diff --git a/docs/docs/02-examples/04-local-files.md b/docs/docs/02-examples/04-local-files.md index 522deb01..33471afa 100644 --- a/docs/docs/02-examples/04-local-files.md +++ b/docs/docs/02-examples/04-local-files.md @@ -45,11 +45,11 @@ This is actually the entirety of the script. We're packing a lot of power into j The **Tools: ...** stanza pulls two useful tools into this assistant. -The [structured-data-querier](https://github.com/gptscript-ai/structured-data-querier) makes it possible to query csv, xlsx, and json files as though they SQL databases (using an application called [DuckDB](https://duckdb.org/)). This is extremely powerful when combined with the power of LLMs because it let's you ask natural language questions that the LLM can then translate to SQL. +The [structured-data-querier](https://github.com/gptscript-ai/structured-data-querier) makes it possible to query csv, xlsx, and json files as though they were SQL databases (using an application called [DuckDB](https://duckdb.org/)). This is extremely powerful when combined with the power of LLMs because it let's you ask natural language questions that the LLM can then translate to SQL. The [pdf-reader](https://github.com/gptscript-ai/pdf-reader) isn't quite as exciting, but still useful. It parses and reads PDFs and returns the contents to the LLM. This will put the entire contents in your chat context, so it's not appropriate for extremely large PDFs, but it's handy for smaller ones. -**Context: github.com/gptscript-ai/context/workspace** introduces a context tool makes this assistant "workspace" aware. It's description reads: +**Context: github.com/gptscript-ai/context/workspace** introduces a context tool that makes this assistant "workspace" aware. Its description reads: > Adds the workspace and tools needed to access the workspace to the current context That translates to telling the LLM what the workspace directory is and instructing it to use that directory for reading and writing files. As we saw above, you can specify a workspace like this: diff --git a/docs/docs/03-tools/03-openapi.md b/docs/docs/03-tools/03-openapi.md index 2069b331..b40284b2 100644 --- a/docs/docs/03-tools/03-openapi.md +++ b/docs/docs/03-tools/03-openapi.md @@ -1,6 +1,6 @@ # OpenAPI Tools -GPTScript can treat OpenAPI v3 definition files as though they were tool files. +GPTScript can treat OpenAPI v2 and v3 definition files as though they were tool files. Each operation (a path and HTTP method) in the file will become a simple tool that makes an HTTP request. GPTScript will automatically and internally generate the necessary code to make the request and parse the response. @@ -44,6 +44,7 @@ Will be resolved as `https://api.example.com/v1`. :::warning All authentication options will be completely ignored if the server uses HTTP and not HTTPS. This is to protect users from accidentally sending credentials in plain text. +HTTP is only OK, if it's on localhost/127.0.0.1. ::: ### 1. Security Schemes diff --git a/docs/docs/03-tools/05-context.md b/docs/docs/03-tools/05-context.md index 3a4e8c15..6dd22ed1 100644 --- a/docs/docs/03-tools/05-context.md +++ b/docs/docs/03-tools/05-context.md @@ -45,7 +45,7 @@ Here is a simple example of a context provider tool that provides additional con ```yaml # my-search-context-tool.gpt -export: sys.http.html2text? +share tools: sys.http.html2text? #!/bin/bash echo You are an expert web researcher with access to the Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. @@ -71,7 +71,7 @@ Here is an example of a context provider tool that uses args to decide which sea ```yaml # context_with_arg.gpt -export: github.com/gptscript-ai/search/duckduckgo, github.com/gptscript-ai/search/brave, sys.http.html2text? +share tools: github.com/gptscript-ai/search/duckduckgo, github.com/gptscript-ai/search/brave, sys.http.html2text? args: search_tool: tool to search with #!/bin/bash @@ -84,7 +84,7 @@ Continuing with the above example, this is how you can use it in a script: ```yaml # my_context_with_arg.gpt context: ./context_with_arg.gpt with ${search} as search_tool -Args: search: Search tool to use +args: search: Search tool to use What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? diff --git a/docs/docs/03-tools/06-how-it-works.md b/docs/docs/03-tools/06-how-it-works.md index c6538395..29bf764d 100644 --- a/docs/docs/03-tools/06-how-it-works.md +++ b/docs/docs/03-tools/06-how-it-works.md @@ -1,7 +1,6 @@ # How it works -**_GPTScript is composed of tools._** Each tool performs a series of actions similar to a function. Tools have available -to them other tools that can be invoked similar to a function call. While similar to a function, the tools are +**_GPTScript is composed of tools._** Each tool performs a series of actions similar to a function. Tools have other tools available to them that can be invoked similar to a function call. While similar to a function, the tools are primarily implemented with a natural language prompt. **_The interaction of the tools is determined by the AI model_**, the model determines if the tool needs to be invoked and what arguments to pass. Tools are intended to be implemented with a natural language prompt but can also be implemented with a command or HTTP call. diff --git a/docs/docs/03-tools/07-gpt-file-reference.md b/docs/docs/03-tools/07-gpt-file-reference.md index c6207ad2..6734bc59 100644 --- a/docs/docs/03-tools/07-gpt-file-reference.md +++ b/docs/docs/03-tools/07-gpt-file-reference.md @@ -43,21 +43,25 @@ Tool instructions go here. Tool parameters are key-value pairs defined at the beginning of a tool block, before any instructional text. They are specified in the format `key: value`. The parser recognizes the following keys (case-insensitive and spaces are ignored): -| Key | Description | -|--------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| -| `Name` | The name of the tool. | -| `Model Name` | The LLM model to use, by default it uses "gpt-4-turbo". | -| `Global Model Name`| The LLM model to use for all the tools. | -| `Description` | The description of the tool. It is important that this properly describes the tool's purpose as the description is used by the LLM. | -| `Internal Prompt` | Setting this to `false` will disable the built-in system prompt for this tool. | -| `Tools` | A comma-separated list of tools that are available to be called by this tool. | -| `Global Tools` | A comma-separated list of tools that are available to be called by all tools. | -| `Credentials` | A comma-separated list of credential tools to run before the main tool. | -| `Args` | Arguments for the tool. Each argument is defined in the format `arg-name: description`. | -| `Max Tokens` | Set to a number if you wish to limit the maximum number of tokens that can be generated by the LLM. | -| `JSON Response` | Setting to `true` will cause the LLM to respond in a JSON format. If you set true you must also include instructions in the tool. | -| `Temperature` | A floating-point number representing the temperature parameter. By default, the temperature is 0. Set to a higher number for more creativity. | -| `Chat` | Setting it to `true` will enable an interactive chat session for the tool. | +| Key | Description | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| `Name` | The name of the tool. | +| `Model Name` | The LLM model to use, by default it uses "gpt-4-turbo". | +| `Global Model Name` | The LLM model to use for all the tools. | +| `Description` | The description of the tool. It is important that this properly describes the tool's purpose as the description is used by the LLM. | +| `Internal Prompt` | Setting this to `false` will disable the built-in system prompt for this tool. | +| `Tools` | A comma-separated list of tools that are available to be called by this tool. | +| `Global Tools` | A comma-separated list of tools that are available to be called by all tools. | +| `Parameter` / `Args` | Arguments for the tool. Each argument is defined in the format `arg-name: description`. | +| `Max Tokens` | Set to a number if you wish to limit the maximum number of tokens that can be generated by the LLM. | +| `JSON Response` | Setting to `true` will cause the LLM to respond in a JSON format. If you set true you must also include instructions in the tool. | +| `Temperature` | A floating-point number representing the temperature parameter. By default, the temperature is 0. Set to a higher number for more creativity. | +| `Chat` | Setting it to `true` will enable an interactive chat session for the tool. | +| `Credential` | Credential tool to call to set credentials as environment variables before doing anything else. One per line. | +| `Agents` | A comma-separated list of agents that are available to the tool. | +| `Share Tools` | A comma-separated list of tools that are shared by the tool. | +| `Context` | A comma-separated list of context tools available to the tool. | +| `Share Context` | A comma-separated list of context tools shared by this tool with any tool including this tool in its context. | diff --git a/docs/docs/05-alternative-model-providers.md b/docs/docs/05-alternative-model-providers.md index aa637136..51818546 100644 --- a/docs/docs/05-alternative-model-providers.md +++ b/docs/docs/05-alternative-model-providers.md @@ -12,9 +12,11 @@ model: mistral-large-latest from https://api.mistral.ai/v1 Say hello world ``` -#### Note -Mistral's La Plateforme has an OpenAI compatible API, but the model does not behave identically to gpt-4. For that reason, we also have a provider for it that might get better results in some cases. +:::note + Mistral's La Plateforme has an OpenAI compatible API, but the model does not behave identically to gpt-4. For that reason, we also have a provider for it that might get better results in some cases. + +::: ### Using a model that requires a provider ```gptscript From 3a9cfa340ad01fd9bcff1527fd8383c836860cad Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 1 Aug 2024 10:30:56 -0400 Subject: [PATCH 055/270] docs: improve examples (#686) Signed-off-by: Grant Linville --- docs/docs/02-examples/01-cli.md | 270 +++++------------------- docs/docs/02-examples/02-api.md | 236 +++++---------------- docs/docs/02-examples/04-local-files.md | 66 ++++-- docs/docs/02-examples/05-workflow.md | 11 +- docs/docs/03-tools/02-authoring.md | 2 +- 5 files changed, 153 insertions(+), 432 deletions(-) diff --git a/docs/docs/02-examples/01-cli.md b/docs/docs/02-examples/01-cli.md index 17c67244..4c8f3bab 100644 --- a/docs/docs/02-examples/01-cli.md +++ b/docs/docs/02-examples/01-cli.md @@ -1,110 +1,59 @@ # Chat with a Local CLI -GPTScript makes it easy to write AI integrations with CLIs and other executable available on your local workstation. This is powerful because it allows you to work AI to solve complex problems using your available CLIs. You can describe complex requests in plain English and GPTScript will figure out the best CLI commands to make that happen. This guide will show you how to build a GPTScript that integrates with two CLIs: - -- [gh](https://cli.github.com/) - the GitHub CLI -- [kubectl](https://kubernetes.io/docs/reference/kubectl/) - the Kubernetes CLI +GPTScript makes it easy to write AI integrations with CLIs and other executables available on your local workstation. +You can describe complex requests in plain English and GPTScript will figure out the best CLI commands to make that happen. +This guide will show you how to build a GPTScript that integrates with the `gh` CLI for GitHub. :::warning -This script **does not install** or configure gh or kubectl. We assume you've done that already. - -- For gh, you must be logged in via `gh auth login`. [See here for more details](https://docs.github.com/en/github-cli/github-cli/quickstart) -- For kubectl, you must have a proper `kubeconfig`. [See here for more details](https://kubernetes.io/docs/tasks/tools/) - +This script **does not install** or configure `gh`. We assume you've done that already. +You must be logged in via `gh auth login`. [See here for more details](https://docs.github.com/en/github-cli/github-cli/quickstart) ::: -## Too Long; Didn't Read - -Want to start using this script now? Just run: - -``` -gptscript github.com/gptscript-ai/cli-demo -``` - -Or if you want to skip ahead and just grab the full script so that you can start hacking on it, jump to the [Putting it all together section](cli#putting-it-all-together). +You should have basic familiarity with [tools](../03-tools/01-using.md) before starting this guide. ## Getting Started -The rest of this guide will walk you through building a script that can serve as an assistant for GitHub and Kubernetes tasks. We'll be explaining the how, what, and why along the way. - -First, open up a new gptscript file in your favorite editor. We'll call the file cli-demo.gpt +First, open up a new file in your favorite editor. We'll call the file `cli-demo.gpt`. ``` vim cli-demo.gpt ``` -All edits below are assumed to be in this file. At the end, we'll share the entire script as one cohesive file, but along the way we'll just be adding tools one-by-one. - -## Create the Kubernetes Agent - -Let's start by adding the Kubernetes agent. In our script, add the following: - -``` ---- -Name: k8s-agent -Description: An agent that can help you with your Kubernetes cluster by executing kubectl commands -Context: shared-context -Tools: sys.exec -Parameter: task: The kubectl related task to accomplish -Chat: true - -You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. - -``` - -Now, let's walk through this tool line-by-line. - -**---** is a block separator. It's how we delineate tools in a script. - -**Name and Description** help the LLM understand the purpose of this tool. You should always have meaningful names and descriptions. - -**Tools: sys.exec** makes the built-in `sys.exec` tool available to this agent. This gives the agent the ability to execute arbitrary commands. Based on our prompt, it will be used for kubectl commands. GPTScript's authorization system will prompt for approval whenever it's going to run a `sys.exec` command. - -**Parameter: task:** defines a parameter named "task" for this tool. This will be important later on when other tools need to hand-off to this tool - they'll pass the task to it as this parameter. As with the name and description fields, it's important to provide a good description so that the LLM knows how to use this parameter. - -**Chat: true** turns this tool into a "chat-able" tool, which we also call an "agent". This is important for open-ended tasks that might take some iteration. - -Finally, we have the **tool body**, which in this case is a prompt: - -``` -You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. -``` - -This is what the tool will actually do. Tool bodies can be prompts or raw code like python, javascript, or the [world's best programming language](https://x.com/ibuildthecloud/status/1796227491943637125) - bash. For chat-able tools, your tool body should always be a prompt. - -That's all there is to the Kubernetes agent. You can try it out now. One nice thing about GPTScript is that tools are composable. So, you can get this tool working well and then move onto the next tool without affecting this one. To launch this tool, run: - -``` -gptscript --sub-tool k8s-agent cli-demo.gpt -``` - -Once you're chatting, try asking it do something like list all the pods in your cluster or even to launch an new deployment in the cluster. +All edits below are assumed to be in this file. -## Create the GitHub Agent +## Create the entrypoint tool -Now let's add the GitHub Agent. Drop the following into the file below the tool we just added. +Let's start by adding the main tool to the file: ``` ---- -Name: github-agent -Description: An agent to help you with GitHub related tasks using the gh cli Context: learn-gh -Tools: sys.exec -Parameter: task: The GitHub task to accomplish +Context: github.com/gptscript-ai/context/cli Chat: true You have the gh cli available to you. Use it to accomplish the tasks that the user asks of you. ``` -This tool is very similar to the Kubernetes agent. There are just a few key differences: +Let's walk through this tool line by line. + +Each `Context` line references a context tool that will be run before the tool itself runs. +Context tools provide helpful output for the LLM to understand its capabilities and what it is supposed to do. +The first, `learn-gh`, we will define later in this file. +The second, `github.com/gptscript-ai/context/cli`, provides information to the LLM about the operating system that GPTScript is running on, +and gives it access to the `sys.exec` built-in tool, which is used to run commands. -1. Names and descriptions have been changed to reference GitHub and gh as appropriate. -2. We've introduced the `learn-gh` context. We'll explore this next. +`Chat: true` turns this tool into a "chat-able" tool, which we also call an "agent". +This causes the tool to run as an interactive chatbot, asking for user input and providing output. +If `Chat` is set to `false` (or not specified at all), the tool will run once without user interaction and exit. +This is useful for automated tasks, but right now we are working on an agent, so we set it to `true`. + +Lastly, there is the **tool body**, which in this case is a simple prompt, letting the LLM know that it should use the `gh` command and follow the user's instructions. +The tool body specifies what the tool should actually do. It can be a prompt or raw code like Python, JavaScript, or bash. +For chat-able tools, the tool body must be a prompt. ### The learn-gh context tool -Add this for the learn-gh context tool: +Next, add this to the file for the `learn-gh` context tool: ``` --- @@ -112,7 +61,7 @@ Name: learn-gh #!/usr/bin/env bash -echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." +echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicated --sort flag." gh --help gh repo --help gh issue --help @@ -128,159 +77,34 @@ gh release --help gh release create --help ``` -As we saw, this tool is used as the context for the github-agent. Why did we add this and what does it do? +The `---` at the top of this tool is a block separator. It's how we delineate tools within a script file. -To answer that, let's first understand what the Context stanza does. Any tools referenced in the Context stanza will be called and their output will be added to the chat context. As the name suggests, this gives the LLM additional context for subsequent messages. Sometimes, an LLM needs extra instructions or context in order to achieve the desired results. There's no hard or fast rule here for when you should include context; it's best discovered through trial-and-error. +This tool has a `Name` field. We named this tool `learn-gh` so that it matches the `Context: learn-gh` line from the entrypoint tool. - We didn't need extra context for the Kubernetes tool because we found our default LLM knows kubectl (and Kubernetes) quite well. However, our same testing showed that our default LLM doesn't know the gh cli as well. Specifically, the LLM would sometimes hallucinate invalid combinations of flags and parameters. Without this context, the LLM often takes several tries to get the gh command correct. +The body of this tool is a bash script, rather than a prompt. +This context tool will be run by GPTScript automatically at the start of execution, and its output will be provided to the LLM. +We're running a bunch of `--help` commands in the `gh` CLI so that the LLM can understand how to use it. +GPTScript knows that this tool body is a script rather than a prompt because it begins with `#!`. -:::tip -Did you catch that "takes several tries to get the command correct" part? One useful feature of GPTScript is that it will feed error messages back to the LLM, which allows the LLM to learn from its mistake and try again. -::: +## Running the tool -And that's the GitHub Agent. You can try it out now: +Now try running the tool: ``` -gptscript --sub-tool github-agent cli-demo.gpt +gptscript cli-demo.gpt ``` -Once you're chatting, try asking it do something like "Open an issue in gptscript-ai/gptscript with a title and body that says Hi from me and states how wonderful gptscript is but jazz it up and make it unique" - -## Your CLI Assistant - -Right now if you were to launch this script, you'd be dropped right into the Kubernetes agent. Let's create a new entrypoint whose job it is to handle your initial conversation and route to the appropriate agent. Add this to the **TOP** of your file: - -``` -Name: Your CLI Assistant -Description: An assistant to help you with local cli-based tasks for GitHub and Kubernetes -Agents: k8s-agent, github-agent -Context: shared-context -Chat: true - -Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You don't need to start off by guiding them. -``` - -By being at the top of the file, this tool will serve as the script's entrypoint. Here are the parts of this tool that are worth additional explanation: - -**Agents: k8s-agent, github-agent** puts these two agents into a group that can hand-off to each other. So, you can ask a GitHub question, then a Kubernetes question, and then a GitHub question again and the chat conversation will get transferred to the proper agent each time. - -Next is **Context: shared-context**. You're already familiar with contexts, but in the next section we'll explain what's unique about this one. - -### The shared-context tool - -Drop the shared-context tool in at the very bottom of the page: - -``` ---- -Name: shared-context -Share Context: github.com/gptscript-ai/context/history - -#!sys.echo -Always delegate to the best tool for the users request. -Ask the user for information needed to complete a task. -Provide the user with the exact action you will be taking and get the users confirmation when creating or updating resources. -ALWAYS ask the user to confirm deletions, provide as much detail about the action as possible. -``` - -and do one more thing: add it as a context tool to both the k8s-agent and github-agent. For k8s-agent, that means adding this line: `Context: shared-context` and for github-agent, it means modifying the existing Context line to: `Context: learn-gh, shared-context`. - -**Share Context: github.com/gptscript-ai/context/history** - In this line, "Share Context" means that the specified tool(s) will be part of the context for any tools that references this tool in their Context stanza. It's a way to compose and aggregate contexts. - - The specific tool referenced here - github.com/gptscript-ai/context/history - makes it so that when you transition from one agent to the next, your chat history is carried across. Using this file as an example, this would allow you to have a history of all the Kubernetes information you gathered available when talking to the GitHub tool. - -The **#!sys.echo** body is a simple way to directly output whatever text follows it. This is useful if you just have a static set of instructions you need to inject into the context. The actual text should make sense if you read it. We're telling the agents how we want them to behave and interact. - -## Putting it all together - -Let's take a look at this script as one cohesive file: - -``` -Name: Your CLI Assistant -Description: An assistant to help you with local cli-based dev tasks -Context: shared-context -Agents: k8s-agent, github-agent -Chat: true - -Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You don't need to start off by guiding them. - ---- -Name: k8s-agent -Description: An agent that can help you with your Kubernetes cluster by executing kubectl commands -Context: shared-context -Tools: sys.exec -Parameter: task: The kubectl related task to accomplish -Chat: true - -You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. - ---- -Name: github-agent -Description: An agent to help you with GitHub related tasks using the gh cli -Context: learn-gh, shared-context -Tools: sys.exec -Parameter: task: The GitHub task to accomplish -Chat: true - -You have the gh cli available to you. Use it to accomplish the tasks that the user asks of you. - ---- -Name: learn-gh - -#!/usr/bin/env bash - -echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." -gh --help -gh repo --help -gh issue --help -gh issue list --help -gh issue create --help -gh issue comment --help -gh issue delete --help -gh issue edit --help -gh pr --help -gh pr create --help -gh pr checkout --help -gh release --help -gh release create --help - - ---- -Name: shared-context -Share Context: github.com/gptscript-ai/context/history - -#!sys.echo -Always delegate to the best tool for the users request. -Ask the user for information needed to complete a task. -Provide the user with the exact action you will be taking and get the users confirmation when creating or updating resources. -ALWAYS ask the user to confirm deletions, provide as much detail about the action as possible. -``` - -There isn't anything new to cover in this file, we just wanted you to get a holistic view of it. This script is now fully functional. You can launch it via: - -``` -gpscript cli-demo.gpt -``` - -### Adding your own CLI - -By now you should notice a simple pattern emerging that you can follow to add your own CLI-powered agents to a script. Here are the basics of what you need: - -``` -Name: {your cli}-agent -Description: An agent to help you with {your taks} related tasks using the gh cli -Context: {here's your biggest decision to make}, shared-context -Tools: sys.exec -Parameter: task: The {your task} to accomplish -Chat: true - -You have the {your cli} cli available to you. Use it to accomplish the tasks that the user asks of you. -``` +Once you're chatting, try asking it do something like "Open an issue in gptscript-ai/gptscript with a title and body that says Hi from me and states how wonderful gptscript is but jazz it up and make it unique". +GPTScript will ask for confirmation before it runs each command, so you can make sure that it only runs the commands you want it to. -You can drop in your task and CLI and have a fairly functional CLI-based chat agent. The biggest decision you'll need to make is what and how much context to give your agent. For well-known CLIs/technologies like kubectl and Kubernetes, you probably won't need a custom context. For custom CLIs, you'll definitely need to help the LLM out. The best approach is to experiment and see what works best. +## A note on context tools -## Next steps +Context tools are a powerful way to provide additional information to the LLM, but they are not always necessary. +If you are working with a system that the LLM already understands well, you will probably not need to provide additional context. +When writing your own tools, it may take some trial and error to determine whether a context tool is needed. +If the LLM frequently hallucinates subcommands or arguments, it is probably worth adding a context tool to provide more information about the CLI. -Hopefully you've found this guide helpful. From here, you have several options: +## Next Steps -- You can checkout out some of our other guides available in this section of the docs -- You can dive deeper into the options available when [writing script](/tools/gpt-file-reference) +- You can check out some of our other guides available in this section of the docs +- You can dive deeper into the options available when [writing scripts](/tools/gpt-file-reference) diff --git a/docs/docs/02-examples/02-api.md b/docs/docs/02-examples/02-api.md index d84ae653..468fdd48 100644 --- a/docs/docs/02-examples/02-api.md +++ b/docs/docs/02-examples/02-api.md @@ -1,40 +1,27 @@ # Chat with an API -Interacting with cloud providers through dashboards, APIs, and CLIs is second nature to devops engineers. Using AI chat, the engineer can express a goal, and the AI can generate and execute the calls needed to achieve it. This saves the engineer time from having to look up the API calls needed themselves. GPTScript makes building a chat integration with an existing OpenAPI schema quick and easy. +GPTScript makes it easy to create a chatbot interface to interact with an API. -This guide will walk through the process of using the OpenAPI spec from Digital Ocean to build a chatbot capable of launching droplets and databases. The reader will be able to continue adding Digital Ocean capabilities or build their own chatbot with another OpenAPI schema. +This guide will demonstrate how to build a chatbot that interacts with the DigitalOcean API. -## Too Long; Didn't Read +## Getting Started -If you just want to try out the Digital Ocean chatbot first: - -Follow the [API credential](#api-access) settings here. - -Then you can run the following commands to get started: - -```bash -gptscript github.com/gptscript-ai/digital-ocean-agent -``` - -## Getting started - -First we will need to download a copy of the openapi.yaml. This spec technically can be accessed by URL, but initially, it is easier to download a copy and save it as openapi.yaml. - -### The Digital Ocean openapi.yaml spec - -Getting the openapi.yaml file from Digital Ocean can be done by running the following command in a terminal. +First, you will need to download a copy of DigitalOcean's OpenAPI definition. +While you can reference it by its URL, it is a bit easier to work with it locally. +You can download the file by running the following command: ```bash curl -o openapi.yaml -L https://api-engineering.nyc3.cdn.digitaloceanspaces.com/spec-ci/DigitalOcean-public.v2.yaml ``` -This will download a copy of the openapi yaml file to the local directory. +This will download a copy of the OpenAPI definition to the current directory. -Lets take a look at the spec file a little bit. The integration in GPTScript creates a tool named after each operationId in the OpenAPI spec. You can see what these tools would be by running the following. +Let's examine this OpenAPI file. GPTScript will create a tool named after each operationId in the file. +You can see the operationIds by running the following command: ```bash grep operationId openapi.yaml -# … +# ... # operationId: domains_delete_record # operationId: droplets_list # operationId: droplets_create @@ -50,189 +37,72 @@ grep operationId openapi.yaml # operationId: droplets_list_kernels # operationId: droplets_list_firewalls # operationId: droplets_list_neighbors -# … -``` - -If we look at the operationIds, you’ll notice they are structured around an object like droplet, database, or project. Each object has a collection of verb like list, get, delete, create, etc. Each tool in GPTScript has it’s own set of tools. So we can create agents, tools with chat enabled, that are experts in a specific set of objects and have access to all of the object_verb tools available to them. This allows us to fan out tools from a main entrypoint to multiple experts that can solve the users tasks. - -Lets explore this design pattern. - -## Creating Main Entrypoint - -Lets start by creating our main entrypoint to the Digital Ocean chatbot. The main tool in a GPTScript chat program is usually named agent.gpt. Let’s first setup the agents by giving it a name, the ability to chat, basic instructions, and the main greeting prompt. Create an agent.gpt file with the following contents. - -agent.gpt - -``` -Name: Digital Ocean Bot -Chat: true - -You are a helpful DevOps assistant that is an expert in Digital Ocean. -Using only the tools available, do not answer without using a tool, respond to the user task. -Greet the User with: "Hello! How can I help you with Digital Ocean?" -``` - -This file when run will show the following. - -![screenshot](/img/chat-api.png) - -In the current form, the chatbot will not be able to do anything since it doesn’t have access to any APIs. Let’s address that now, open our tool.gpt file and add the following. - -agent.gpt - +# ... ``` -Name: Digital Ocean Bot -Chat: true -Agents: droplets.gpt -You are a helpful DevOps assistant that is an expert in Digital Ocean -Using only the tools available, do not answer without using a tool, respond to the user task. -Greet the User with: "Hello! How can I help you with Digital Ocean?" -``` +The operationIds generally follow a pattern of `object_verb`. +This will be helpful for us, because we can use wildcard matching to refer to a subset of the operations. -Now lets create a droplets.gpt file to bring in the droplet tools. +## Creating the Script -droplets.gpt +Create a `tool.gpt` file with the following contents: ``` -Name: Droplet Agent +Name: DigitalOcean Bot Chat: true -Tools: droplets* from ./openapi.yaml -Description: Use this tool to work with droplets -Args: request: the task requested by the user +Tools: droplets* from openapi.yaml +Tools: databases* from openapi.yaml +Tools: images* from openapi.yaml +Tools: regions_list from openapi.yaml +Tools: tags* from openapi.yaml +Tools: sizes_list from openapi.yaml +Tools: sshKeys_list from openapi.yaml +Tools: sys.time.now -Help the user complete their Droplet operation requests using the tools available. -When creating droplets, always ask if the user would like to access via password or via SSHkey. +You are a helpful assistant with access to the DigitalOcean API to manage droplets and databases. +Before creating, updating, or deleting anything, tell the user about the exact action you are going to take, and get their confirmation. +Start the conversation by asking the user how you can help. ``` -Here we have defined the Droplet Agent, and enabled chat. We have also brought in an subset of the openapi.yaml tools that relate to droplets. By using droplets* we are making available everything droplet related into the available tools for this agent. We also provided the description to the main agent, and any other agent that has access to it, when to utilize this tool. We also have an argument called “request”, this is used when the LLM decides to call the agent it can smoothly pass off the user request without the Droplet Agent having to ask again. - -## Chat with Digital Ocean +This chatbot has access to several tools that correspond to various operations in the DigitalOcean OpenAPI file. +We give it access to all tools related to droplets and databases, since those are the main things we want it to work with. +In order to support this, we also need to give it access to images, regions, tags, etc. so that it can get the information it needs to create new droplets and databases. +Lastly, the `sys.time.now` tool is a tool that is built-in to GPTScript that provides the current date and time. -### API Access +:::note +We cannot give the entire `openapi.yaml` file to the tool because it contains too many API operations. +Most LLM providers, such as OpenAI, have a limit on the number of tools that you can provide to the model at one time. +The OpenAPI file contains over 300 operations, which is too many for most LLMs to handle at once. +::: -Now that we have brought in our first tool using the OpenAPI spec, we will need to setup authentication. Defined in the openapi.yaml is how the Digital Ocean API expects authenticated requests to work. If you look in the spec file path of components.securitySchemes you will see that Digital Ocean expects bearer_auth. So you will need to create an API key in the Digital Ocean dashboard with the access you want the LLM to be able to interact with Digital Ocean. For instance, you can do a read only key that will allow you to just query information, or you can provide it full access and the operator can work with the LLM to do anything in the project. It is up to you. For this example, we will be using a full access token, but you can adjust for your needs. You can create your API key by going to this link [Apps & API](https://cloud.digitalocean.com/account/api/tokens) section in your account. +## Creating an API Token -Once you have an API key, you will need to set an environment variable with that value stored. - -```bash -export GPTSCRIPT_API_DIGITALOCEAN_COM_BEARER_AUTH=****** -``` +Before you run this script, you need to have a DigitalOcean API token. -Where the *** is the API key created in the dashboard. +Go to [Applications & API](https://cloud.digitalocean.com/account/api/tokens) in the DigitalOcean dashboard and create a new token. +You can select whichever scopes you want, but you should at least give it the ability to read droplets and databases. -### Chatting with Digital Ocean APIs +## Running the Script -Now you can run gptscript to start your conversation with Digital Ocean. +Let's run the script and start chatting with it: ```bash -gptscript agent.gpt -``` - -You should now be able to ask how many droplets are running? - -And get an output from the chatbot. This is great, but not quite ready to use just yet. Lets keep adding some functionality. - -## Adding Database Support - -Now that we can do droplets, we can add support for databases just as easy. Lets create a databases.gpt file with the following contents. - -Ddtabases.gpt - -``` -Name: Database Agent -Chat: true -Tools: databases* from ./openapi.yaml -Description: Call this tool to manage databases on digital ocean -Args: request: the task requested by the user - -Help the user complete database operation requests with the tools available. -``` - -Here again, we are essentially scoping our agent to handle database calls with the Digital Ocean API. Now in order for this to be used, we need to add it to our agent list in the main agent.gpt file. - -Agent.gpt - -``` -Name: Digital Ocean Bot -Chat: true -Agents: droplets.gpt, databases.gpt - -You are a helpful DevOps assistant that is an expert in Digital Ocean -Using only the tools available, do not answer without using a tool, respond to the user task. -Greet the User with: "Hello! How can I help you with Digital Ocean?" -``` - -Now when we test it out we can ask how many databases are running? And it should give back the appropriate response. - -Now, when it comes to creating a database or droplet, we are missing some APIs to gather the correct information. We don’t have access to size information, regions, SSH Keys, etc. Since these are common tools, it would be a bit of a hassle to add lines to both the databases.gpt and droplets.gpt files. To avoid this, we can make use of the GPTScript Context to provide a common set of tools and instructions. - -## Context - -Context is a powerful concept in GPTScript that provides information to the system prompt, and provide a mechanism to compose a common set of tools reducing duplication in your GPTScript application. Lets add a context.gpt file to our chatbot here with the following contents. - -context.gpt - -``` -Share Tools: sys.time.now - -Share Tools: images* from ./openapi.yaml -Share Tools: regions_list from ./openapi.yaml -Share Tools: tags* from openapi.yaml -Share Tools: sizes_list from ./openapi.yaml -Share Tools: sshKeys_list from ./openapi.yaml - - -#!sys.echo -Always delegate to the best tool for the users request. -Ask the user for information needed to complete a task. -Provide the user with the exact action you will be taking and get the users confirmation when creating or updating resources. -ALWAYS ask the user to confirm deletions, provide as much detail about the action as possible. -``` - -There is quite a bit going on here, so lets break it down. Anywhere you see Share Tools it is making that tool available to anything uses the context. In this case, it is providing access to the time now tool so you can ask what was created yesterday and the LLM can get a frame of reference. Additionally, it provides a common set of Digital Ocean APIs that are needed for placement, organization(tags), sizes, and images, etc. Since multiple components in Digital Ocean use these values, it is useful to only need to define it once. Last we are providing a set of common instructions for how we want the chatbot to behave overall. This way, we do not need to provide this information in each agent. Also, since this is in the system prompt, it is given a higher weight to the instructions in the individual agents. - -Now lets add this to our agents. You will need to add the line: - -``` -Context: context.gpt -``` - -To each of our agents, so the droplets.gpt, agent.gpt, and databases.gpt will have this line. - -## Wrapping up - -Provided you have given API access through your token, you should now be able to run the chatbot and create a database or a droplet and be walked through the process. You should also be able to ask quesitons like What VMs were created this week? - -You now know how to add additional capabilities through agents to the chatbots. You can follow the same patterns outlined above to add more capabilities or you can checkout the chat bot repository to see additional functionality. - -### Use your own OpenAPI schema - -If you have your own OpenAPI schema, you can follow the same pattern to build a chatbot for your own APIs. The simplest way to get started is to create a gptscript file with the following contents. - -``` -Name: {Your API Name} Bot -Chat: true -Tools: openapi.yaml - -You are a helpful assistant. Say "Hello, how can I help you with {Your API Name} system today?" +gptscript tool.gpt ``` -You can then run that and the LLM will be able to interact with your API. +Try asking it to list your current databases or droplets, or to help you create a new one. -#### Note on OpenAI tool limits +The first time the LLM tries to make an API call, it will ask for your API token. +Paste it into the prompt. It will be used for all future API calls as well. +The LLM will never see or store your API token. It is only used client-side, on your computer. -As we mentioned before, GPTScript creates a tool for each operationId in the OpenAPI spec. If you have a large OpenAPI spec, you may run into a limit on the number of tools that can be created. OpenAI, the provider of the GPT-4o model only allows a total of 200 tools to be passed in at a single time. If you exceed this limit, you will see an error message from OpenAI. If you run into this issue, you can follow the same pattern we did above to create our Digital Ocean bot. +## Next Steps -A quick check to see how many tools total would be created, you can run the following: +Feel free to modify the script to add other parts of the DigitalOcean API. +You could also try creating a chatbot for a different API with an OpenAPI definition. -```bash -grep operationId openapi.yaml|wc -l - 306 -``` +For a more advanced DigitalOcean chatbot, see our [DigitalOcean Agent](https://github.com/gptscript-ai/digital-ocean-agent) tool. -In our case, there are 306 tools that would be created in the case of our Digital Ocean spec. This would not fit into a single agent, so breaking it up into multiple agents is the best way to handle this. - -## Next Steps +To read more about OpenAPI tools in GPTScript, see the [OpenAPI Tools](../03-tools/03-openapi.md) article. -Now that you have seen how to create a chatbot with an OpenAPI schema, checkout our other guides to see how to build other ChatBots and agents. +To read more about credential storage in GPTScript, see the [Credentials](../02-credentials.md) article. diff --git a/docs/docs/02-examples/04-local-files.md b/docs/docs/02-examples/04-local-files.md index 33471afa..252ddf96 100644 --- a/docs/docs/02-examples/04-local-files.md +++ b/docs/docs/02-examples/04-local-files.md @@ -1,6 +1,9 @@ # Chat with Local Files -With GPTScript interacting with local files is simple and powerful. This can help you streamline repetitive or data-intensive tasks. In this guide, we'll build a script that can query Excel files, CSVs, and PDFs. We'll then use the script to read, transform, and utilize the data in these files. +With GPTScript, interacting with local files is simple and powerful. +This can help you streamline repetitive or data-intensive tasks. +In this guide, we'll build a script that can query Excel files, CSVs, and PDFs. +We'll then use the script to read, transform, and utilize the data in these files. ## Too Long; Didn't Read @@ -14,60 +17,79 @@ gptscript --workspace=~/Documents github.com/gptscript-ai/local-files-demo ``` ## Getting Started -The rest of this guide will walk you through building and using a data processing assistant. We'll be explaining the how, what, and why along the way. + +The rest of this guide will walk you through building and using a data processing assistant. First, let's get some sample data to work with. You can clone our repo with our sample data: + ``` git clone https://github.com/gptscript-ai/local-files-demo.git cd local-files-demo ``` -Next, open up a new gptscript file in your favorite editor. We'll call the file data-assistant.gpt. +Next, open up a new gptscript file in your favorite editor. We'll call the file `data-assistant.gpt`. + ``` vim data-assistant.gpt ``` + All edits below are assumed to be in this file. ### Create the Assistant -Put this in the gpt file: + +Add this to the file: + ``` -Name: Your Data Processing Assitant -Description: An asistant to help you with processing data found in files on your workstation. Helpful for querying spreadsheets, CSVs, JSON files, and pdfs. +Name: Data Processing Assitant +Description: An assistant to help you with processing data found in files on your workstation. Helpful for querying spreadsheets, CSVs, JSON files, and PDFs. Tools: github.com/gptscript-ai/structured-data-querier, github.com/gptscript-ai/pdf-reader Context: github.com/gptscript-ai/context/workspace Chat: true -You are a helpful data processing assistant. Your goal is to help the user with data processing. Help the user accomplish their tasks using the tools you have. When the user starts this chat, just say hi, introduce yourself, and ask what you can help with. +You are a helpful data processing assistant. Your goal is to help the user with data processing. +Help the user accomplish their tasks using the tools you have. +When the user starts this chat, say hi, introduce yourself, and ask what you can help with. ``` -This is actually the entirety of the script. We're packing a lot of power into just a handful of lines here. Let's talk through them. -**Name and Description** help the LLM understand the purpose of this tool. You should always have meaningful names and descriptions. +This is the entire script. Here's what each part does: + +`Name and Description` help the LLM understand the purpose of this tool. You should always have meaningful names and descriptions. -The **Tools: ...** stanza pulls two useful tools into this assistant. +The `Tools: ...` line provides two useful tools to this assistant. -The [structured-data-querier](https://github.com/gptscript-ai/structured-data-querier) makes it possible to query csv, xlsx, and json files as though they were SQL databases (using an application called [DuckDB](https://duckdb.org/)). This is extremely powerful when combined with the power of LLMs because it let's you ask natural language questions that the LLM can then translate to SQL. +The [structured-data-querier](https://github.com/gptscript-ai/structured-data-querier) makes it possible to query CSV, XLSX, and JSON files as though they were SQL databases (using an application called [DuckDB](https://duckdb.org/)). +This is extremely powerful when combined with the power of LLMs because it allows you to ask natural language questions that the LLM can then translate to SQL. -The [pdf-reader](https://github.com/gptscript-ai/pdf-reader) isn't quite as exciting, but still useful. It parses and reads PDFs and returns the contents to the LLM. This will put the entire contents in your chat context, so it's not appropriate for extremely large PDFs, but it's handy for smaller ones. +The [pdf-reader](https://github.com/gptscript-ai/pdf-reader) parses and reads PDFs and returns the contents to the LLM. +This will put the entire contents in your chat context, so it's not appropriate for extremely large PDFs, but it's handy for smaller ones. -**Context: github.com/gptscript-ai/context/workspace** introduces a context tool that makes this assistant "workspace" aware. Its description reads: +`Context: github.com/gptscript-ai/context/workspace` introduces a [context tool](../03-tools/05-context.md) makes this assistant "workspace" aware. Its description reads: > Adds the workspace and tools needed to access the workspace to the current context -That translates to telling the LLM what the workspace directory is and instructing it to use that directory for reading and writing files. As we saw above, you can specify a workspace like this: +Basically, this context tool tells the LLM what the workspace directory is and instructs it to use that directory for reading and writing files. +As we saw above, you can specify a workspace like this: + ``` gptscript --workspace=/Your/path/here ... ``` + If you don't specify one, a temporary directory will be created and used for the workspace. -This context also shares the `sys.read`, `sys.write`, and `sys.ls` built-in tools with the assistant so that it automatically has access to them. +This context tool also shares the `sys.read`, `sys.write`, and `sys.ls` built-in tools with the assistant. -Next we have **Chat: true**, which you've seen if you looked at any of our other guides. This makes the current tool "chat-able". We refer to chatable tools as agents or assistants. +Next we have `Chat: true`. This makes the current tool "chat-able". We refer to chat-able tools as agents or assistants. Finally, we have the prompt: -> You are a helpful data processing assistant. Your goal is to help the user with data processing tasks. Help the user accomplish their tasks using the tools you have. When the user starts this chat, just say hi, introduce yourself, and ask what you can help with. +> You are a helpful data processing assistant. Your goal is to help the user with data processing. +> Help the user accomplish their tasks using the tools you have. +> When the user starts this chat, say hi, introduce yourself, and ask what you can help with. ## Using the Assistant -Once again, that's all there is to this assistant. You can start using it by specifying your own workspace or using our sample-data directory as the workspace. Assuming you're using our sample data and have followed these instructions, here's how you launch it: + +When you run the assistant, you can specify your own workspace folder or our sample data directory +Assuming you're using our sample data and have followed these instructions, here's how you run it: + ``` gptscript --workspace=./sample-data data-assistant.gpt ``` @@ -75,6 +97,7 @@ gptscript --workspace=./sample-data data-assistant.gpt Here's a few sample interactions with these files. ### Cleaning up data + ``` > whats in the key contacts file? @@ -114,6 +137,7 @@ Here's a few sample interactions with these files. ``` ### Identifying and fixing data gaps + ``` > is there any missing data in that csv? ... @@ -126,6 +150,7 @@ Here's a few sample interactions with these files. ``` ### Cross-referencing + ``` > what were sales like for Kevin's location? @@ -149,7 +174,9 @@ Here's a few sample interactions with these files. Is there anything else you would like to know or do with this data? ``` + ### Pulling all the info together + ``` > Let's help Kevin raise sales. What promotions do we have going on? ... @@ -234,11 +261,12 @@ Here's a few sample interactions with these files. Feel free to customize this email further to better suit your needs. Let me know if there's anything else I can assist you with! ``` + Try it out yourself and see what you can come up with. ## Next steps Hopefully you've found this guide helpful. From here, you have several options: -- You can checkout out some of our other guides available in this section of the docs +- You can check out some of our other guides available in this section of the docs - You can dive deeper into the options available when [writing script](/tools/gpt-file-reference) diff --git a/docs/docs/02-examples/05-workflow.md b/docs/docs/02-examples/05-workflow.md index 0aca6fc3..7f903536 100644 --- a/docs/docs/02-examples/05-workflow.md +++ b/docs/docs/02-examples/05-workflow.md @@ -1,15 +1,14 @@ # Run an Automated Workflow -Automating a sequence of tasks that integrate with one or more systems is a ubiquitous engineering problem that typically requires some degree of domain-specific knowledge up-front. However, workflows written with GPTScript all but eliminate this prerequisite, enabling developers to build their workflows by describing the high-level steps it should perform. +Automating a sequence of tasks that integrate with one or more systems is a ubiquitous engineering problem that typically requires some degree of domain-specific knowledge up-front. +However, workflows written with GPTScript all but eliminate this prerequisite, enabling developers to build their workflows by describing the high-level steps it should perform. This guide will show you how to build a GPTScript that encapsulates a workflow consisting of the following steps: -1. Get a selection of twitter posts +1. Get a selection of X (Twitter) posts 2. Summarize their content 3. Summarize the content of any links they directly reference 4. Write the results to a Markdown document -We'll be explaining the how, what, and why along the way. - ## Too long; didn't read Want to start using this script now? Just run: @@ -53,7 +52,7 @@ This tool: - imports two other tools - `sys.write` is a built-in tool which enables the entrypoint tool to write files to your system. - `summarize-tweet` is a custom tool that encapsulates how each tweet gets summarized. We'll define this tool in the next step. -- ensures tweets are never summarized in parallel to ensure they are summarized in the correct order +- ensures tweets are never summarized in parallel so that they are summarized in the correct order - defines the tweet URLs to summarize and the file to write them to At a high-level, it's getting the summaries for two tweets and storing them in the `tweets.md` file. @@ -87,7 +86,7 @@ This tool - imports three other tools to solve summarization sub-problems - `github.com/gptscript-ai/browser` is an external tool that is used to open the tweet URL in the browser and extract the page content - `get-hyperlinks` and `summarize-hyperlinks` are custom helper tools we'll define momentarily that extract hyperlinks from tweet text and summarize them -- describes the markdown document this tool should produce, leaving it up to the LLM to decide which of the available tools to call to make this happen +- describes the Markdown document this tool should produce, leaving it up to the LLM to decide which of the available tools to call to make this happen ## Hyperlink Summarization Tools diff --git a/docs/docs/03-tools/02-authoring.md b/docs/docs/03-tools/02-authoring.md index 3e81613a..186147a6 100644 --- a/docs/docs/03-tools/02-authoring.md +++ b/docs/docs/03-tools/02-authoring.md @@ -6,7 +6,7 @@ This file is itself a GPTScript that defines the tool's name, description, and w ## Quickstart -This is a guide for writing portable tools for GPTScript. The supported languages currently are Python, NodeJS, and Go. This guide uses Python but you can see documentation for the other language below. +This is a guide for writing portable tools for GPTScript. The supported languages currently are Python, Node.js, and Go. This guide uses Python, but you can see documentation for the other language below. ### 1. Write the code From f27e193499326d95d72689e73f8ec645e25ca168 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 1 Aug 2024 16:08:20 -0400 Subject: [PATCH 056/270] docs: various improvements and standardizing terms (#689) Signed-off-by: Grant Linville --- docs/docs/02-examples/02-api.md | 2 +- docs/docs/03-tools/01-using.md | 26 +++++-- docs/docs/03-tools/02-authoring.md | 27 +++++-- docs/docs/03-tools/03-openapi.md | 2 +- docs/docs/03-tools/04-credential-tools.md | 55 ++++++++++---- docs/docs/03-tools/05-context.md | 75 +++++++++---------- docs/docs/03-tools/06-how-it-works.md | 35 +++++---- docs/docs/03-tools/07-gpt-file-reference.md | 34 ++++----- docs/docs/05-alternative-model-providers.md | 20 +++-- .../{02-credentials.md => 06-credentials.md} | 4 +- docs/docs/09-faqs.md | 52 +++++++++---- 11 files changed, 200 insertions(+), 132 deletions(-) rename docs/docs/{02-credentials.md => 06-credentials.md} (96%) diff --git a/docs/docs/02-examples/02-api.md b/docs/docs/02-examples/02-api.md index 468fdd48..6c312c23 100644 --- a/docs/docs/02-examples/02-api.md +++ b/docs/docs/02-examples/02-api.md @@ -105,4 +105,4 @@ For a more advanced DigitalOcean chatbot, see our [DigitalOcean Agent](https://g To read more about OpenAPI tools in GPTScript, see the [OpenAPI Tools](../03-tools/03-openapi.md) article. -To read more about credential storage in GPTScript, see the [Credentials](../02-credentials.md) article. +To read more about credential storage in GPTScript, see the [Credentials](../06-credentials.md) article. diff --git a/docs/docs/03-tools/01-using.md b/docs/docs/03-tools/01-using.md index a2c8326b..27a59037 100644 --- a/docs/docs/03-tools/01-using.md +++ b/docs/docs/03-tools/01-using.md @@ -1,5 +1,9 @@ # Using Tools -In GPTScript, tools are used to extend the capabilities of a script. The idea behind them is that AI performs better when it has very specific instructions for a given task. Tools are a way to break-up the problem into smaller and more focused pieces where each tool is responsible for a specific task. A typical flow like this is to have a main script that imports a set of tools it can use to accomplish its goal. + +In GPTScript, tools are used to extend the capabilities of a script. +The idea behind them is that AI performs better when it has very specific instructions for a given task. +Tools are a way to break up the problem into smaller and more focused pieces where each tool is responsible for a specific task. +A typical pattern is to have a main script that imports a set of tools it can use to accomplish its goal. GPTScripts can utilize tools in one of three ways: 1. Built-in system tools @@ -7,6 +11,7 @@ GPTScripts can utilize tools in one of three ways: 3. External tools ### System Tools + All GPTScripts have access to system tools, like `sys.read` and `sys.write`, that can be used without any additional configuration. ```yaml @@ -16,11 +21,14 @@ Read all of the files in my current directory, do not recurse over any subdirect ``` System tools are a set of core tools that come packaged with GPTScript by default. +To see a list of the system tools, run `gptscript --list-tools`. ### In-Script Tools -Things get more interesting when you start to use custom tools. -The most basic example of this is an in-script tool that is defined in the same file as the main script. This is useful for breaking up a large script into smaller, more manageable pieces. +Things get more interesting when you start to write your own tools. + +The most basic example of this is an in-script tool that is defined in the same file as the main script. +This is useful for breaking up a large script into smaller, more manageable pieces. ```yaml tools: random-number @@ -35,7 +43,9 @@ Select a number at random between 1 and 100 and return only the number. ``` ### External Tools -You can refer to GPTScript tool files that are served on the web or stored locally. This is useful for sharing tools across multiple scripts or for using tools that are not part of the core GPTScript distribution. + +You can refer to GPTScript tool files that are served on the web or stored locally. +This is useful for sharing tools across multiple scripts or for using tools that are not part of the core GPTScript distribution. ```yaml tools: https://get.gptscript.ai/echo.gpt @@ -51,9 +61,11 @@ tools: echo.gpt Echo the phrase "Hello, World!". ``` -You can also refer to OpenAPI definition files as though they were GPTScript tool files. GPTScript will treat each operation in the file as a separate tool. For more details, see [OpenAPI Tools](03-openapi.md). +You can also refer to OpenAPI definition files as though they were GPTScript tool files. +GPTScript will treat each operation in the file as a separate tool. For more details, see [OpenAPI Tools](03-openapi.md). ### Packaged Tools on GitHub + GPTScript tools can be packaged and shared on GitHub, and referred to by their GitHub URL. For example: ```yaml @@ -64,5 +76,9 @@ Generate an image of a city skyline at night and write the resulting image to a Take this image and write a description of it in the style of pirate. ``` +:::important +The GitHub URL must not be prefixed with `http://` or `https://`. +::: + When this script is run, GPTScript will locally clone the referenced GitHub repos and run the tools referenced inside them. For more info on how this works, see [Authoring Tools](02-authoring.md). diff --git a/docs/docs/03-tools/02-authoring.md b/docs/docs/03-tools/02-authoring.md index 186147a6..b8757440 100644 --- a/docs/docs/03-tools/02-authoring.md +++ b/docs/docs/03-tools/02-authoring.md @@ -2,11 +2,12 @@ You can author your own tools for your use or to share with others. The process for authoring a tool is as simple as creating a `tool.gpt` file in the root directory of your project. -This file is itself a GPTScript that defines the tool's name, description, and what it should do. +This file is a GPTScript that defines the tool's name, description, and what it should do. ## Quickstart -This is a guide for writing portable tools for GPTScript. The supported languages currently are Python, Node.js, and Go. This guide uses Python, but you can see documentation for the other language below. +This is a guide for writing portable tools for GPTScript. The supported languages currently are Python, Node.js, and Go. +This guide uses Python, but you can see documentation for the other languages below. ### 1. Write the code @@ -65,7 +66,11 @@ gptscript github.com// '{"url": "https://github.com"}' ## Sharing Tools -GPTScript is designed to easily export and import tools. Doing this is currently based entirely around the use of GitHub repositories. You can export a tool by creating a GitHub repository and ensuring you have the `tool.gpt` file in the root of the repository. You can then import the tool into a GPTScript by specifying the URL of the repository in the `tools` section of the script. For example, we can leverage the `image-generation` tool by adding the following line to a GPTScript: +GPTScript is designed to easily export and import tools. +Doing this is currently based entirely around the use of GitHub repositories. +You can export a tool by creating a GitHub repository and ensuring you have the `tool.gpt` file in the root of the repository. +You can then import the tool into a GPTScript by specifying the URL of the repository in the `tools` section of the script. +For example, we can leverage the `image-generation` tool by adding the following line to a GPTScript: ```yaml tools: github.com/gptscript-ai/dalle-image-generation @@ -73,9 +78,12 @@ tools: github.com/gptscript-ai/dalle-image-generation Generate an image of a city skyline at night. ``` -### Supported Languages +## Supported Languages -GPTScript can execute any binary that you ask it to. However, it can also manage the installation of a language runtime and dependencies for you. Currently this is only supported for a few languages. Here are the supported languages and examples of tools written in those languages: +GPTScript can execute any binary that you ask it to. +However, it can also manage the installation of a language runtime and dependencies for you. +Currently, this is only supported for a few languages. +Here are the supported languages and examples of tools written in those languages: | Language | Example | |-----------|----------------------------------------------------------------------------------------------------------------| @@ -84,10 +92,13 @@ GPTScript can execute any binary that you ask it to. However, it can also manage | `Golang` | [Search](https://github.com/gptscript-ai/search) - Use various providers to search the internet | -### Automatic Documentation +## Automatic Documentation -Each GPTScript tool is self-documented using the `tool.gpt` file. You can automatically generate documentation for your tools by visiting `tools.gptscript.ai/`. This documentation site allows others to easily search and explore the tools that have been created. +Each GPTScript tool is self-documented using the `tool.gpt` file. +You can automatically generate documentation for your tools by visiting `https://tools.gptscript.ai/`. +This documentation site allows others to easily search and explore the tools that have been created. -You can add more information about how to use your tool by adding an `examples` directory to your repository and adding a collection of `.gpt` files that demonstrate how to use your tool. These examples will be automatically included in the documentation. +You can add more information about how to use your tool by adding an `examples` directory to your repository and adding a collection of `.gpt` files that demonstrate how to use your tool. +These examples will be automatically included in the documentation. For more information and to explore existing tools, visit [tools.gptscript.ai](https://tools.gptscript.ai). diff --git a/docs/docs/03-tools/03-openapi.md b/docs/docs/03-tools/03-openapi.md index b40284b2..0b0f4961 100644 --- a/docs/docs/03-tools/03-openapi.md +++ b/docs/docs/03-tools/03-openapi.md @@ -42,7 +42,7 @@ Will be resolved as `https://api.example.com/v1`. ## Authentication :::warning -All authentication options will be completely ignored if the server uses HTTP and not HTTPS. +All authentication options will be completely ignored if the server uses HTTP and not HTTPS, unless the request is for `localhost` or 127.0.0.1. This is to protect users from accidentally sending credentials in plain text. HTTP is only OK, if it's on localhost/127.0.0.1. ::: diff --git a/docs/docs/03-tools/04-credential-tools.md b/docs/docs/03-tools/04-credential-tools.md index 9aaa7601..3e6a678a 100644 --- a/docs/docs/03-tools/04-credential-tools.md +++ b/docs/docs/03-tools/04-credential-tools.md @@ -13,7 +13,7 @@ Here is a simple example of a credential provider tool that uses the builtin `sy ```yaml # my-credential-tool.gpt -name: my-credential-tool +Name: my-credential-tool #!/usr/bin/env bash @@ -27,13 +27,17 @@ echo "{\"env\":{\"MY_ENV_VAR\":\"$credential\"}}" Continuing with the above example, this is how you can use it in a script: ```yaml -credentials: my-credential-tool.gpt +Credentials: my-credential-tool.gpt as myCred #!/usr/bin/env bash echo "The value of MY_ENV_VAR is $MY_ENV_VAR" ``` +:::note +GPTScript accepts `Cred:`, `Creds:`, `Credential:`, and `Credentials:` as valid directives. +::: + When you run the script, GPTScript will call the credential provider tool first, set the environment variables from its output, and then run the script body. The credential provider tool is called by GPTScript itself. GPTScript does not ask the LLM about it or even tell the LLM about the tool. @@ -41,11 +45,13 @@ LLM about it or even tell the LLM about the tool. If GPTScript has called the credential provider tool in the same context (more on that later), then it will use the stored credential instead of fetching it again. +To delete the credential that just got stored, run `gptscript credential delete myCred`. + You can also specify multiple credential tools for the same script, but they must be on separate lines: ```yaml -credentials: credential-tool-1.gpt -credentials: credential-tool-2.gpt +Credentials: credential-tool-1.gpt +Credentials: credential-tool-2.gpt (tool stuff here) ``` @@ -56,7 +62,7 @@ GPTScript also provides a generic credential tool (`github.com/gptscript-ai/cred where you only need to set one environment variable. Here is an example of how to use it: ```yaml -credentials: github.com/gptscript-ai/credential as myCredentialName with MY_ENV_VAR as env and "this message will be displayed to the user" as message and key as field +Credentials: github.com/gptscript-ai/credential as myCredentialName with MY_ENV_VAR as env and "this message will be displayed to the user" as message and key as field (tool stuff here) ``` @@ -66,24 +72,24 @@ the environment variable `MY_ENV_VAR` and stored in a credential called `myCrede See [the repo](https://github.com/gptscript-ai/credential) for more information. -## Credential Tool Arguments +## Credential Tool Parameters -A credential tool may define arguments. Here is an example: +A credential tool may define parameters. Here is an example: ```yaml -name: my-credential-tool -args: env: the environment variable to set -args: val: the value to set it to +Name: my-credential-tool +Parameter: env: the environment variable to set +Parameter: val: the value to set it to #!/usr/bin/env bash echo "{\"env\":{\"$ENV\":\"$VAL\"}}" ``` -When you reference this credential tool in another file, you can use syntax like this to set both arguments: +When you reference this credential tool in another file, you can use syntax like this to set both parameters: ```yaml -credential: my-credential-tool.gpt with MY_ENV_VAR as env and "my value" as val +Credential: my-credential-tool.gpt with MY_ENV_VAR as env and "my value" as val (tool stuff here) ``` @@ -92,7 +98,7 @@ In this example, the tool's output would be `{"env":{"MY_ENV_VAR":"my value"}}` ## Storing Credentials -By default, credentials are automatically stored in the credential store. Read the [main credentials page](../02-credentials.md) +By default, credentials are automatically stored in the credential store. Read the [main credentials page](../06-credentials.md) for more information about the credential store. :::note @@ -105,7 +111,7 @@ will not be stored in the credentials store. When you reference a credential tool in your script, you can give it an alias using the `as` keyword like this: ```yaml -credentials: my-credential-tool.gpt as myAlias +Credentials: my-credential-tool.gpt as myAlias (tool stuff here) ``` @@ -121,8 +127,7 @@ A credential context is basically a namespace for credentials. If you have multi you can switch between them by defining them in different credential contexts. The default context is called `default`, and this is used if none is specified. -You can set the credential context to use with the `--credential-context` flag when running GPTScript. For -example: +You can set the credential context to use with the `--credential-context` flag when running GPTScript. For example: ```bash gptscript --credential-context my-azure-workspace my-azure-script.gpt @@ -181,3 +186,21 @@ In this example, `toolA` provides the variables `ENV_VAR_1` and `ENV_VAR_2`, This will read the values of `ENV_VAR_1` through `ENV_VAR_4` from the current environment and set them for the credential. This is a direct mapping of environment variable names. **This is not recommended when overriding credentials for multiple tools that use the same environment variable names.** + +## Credential Refresh (Advanced) + +Some use cases (such as OAuth) may involve the need to refresh expired credentials. +To support this, your credential tool can return other fields besides `env` in its JSON output. +This is the full list of supported fields in the credential tool output: + +- `env` (type: object) - The environment variables to set. +- `expiresAt` (type: string, timestamp in RFC3339 format) - The time when the credential expires. +- `refreshToken` (type: string) - The refresh token to use to refresh the credential. + +When GPTScript tries to use a credential that has a defined `expiresAt` time, it will check if the credential has expired. +If the credential has expired, it will run the credential tool again, and the current value of the credential will be +set to the environment variable `GPTSCRIPT_EXISTING_CREDENTIAL` as a JSON string. This way, the credential tool can check for +that environment variable, and if it is set, get the refresh token from the existing credential and use it to refresh and return a new credential, +typically without user interaction. + +For an example of a tool that uses the refresh feature, see the [Gateway OAuth2 tool](https://github.com/gptscript-ai/gateway-oauth2). diff --git a/docs/docs/03-tools/05-context.md b/docs/docs/03-tools/05-context.md index 6dd22ed1..15e600e4 100644 --- a/docs/docs/03-tools/05-context.md +++ b/docs/docs/03-tools/05-context.md @@ -1,97 +1,94 @@ # Context -GPTScript provides a mechanism to share prompt information across many tools using the tool parameter `context`. It is used to provide additional information to the calling tool on when to use a specific tool by prepending the `context` to the instruction of the calling tool. +GPTScript provides a mechanism to share prompt information across many tools using the tool directive `Context`. +It is used to provide additional information to the calling tool on when to use a specific tool by prepending the context to the instruction of the calling tool. - Context can point to a static text or a GPTScript. -- Context tools are just regular GPTScript tools, and any valid gptscript field can be used. -- Exported tools from a context tool are made available to the calling tool. +- Context tools are just regular GPTScript tools, and any valid GPTScript fields can be used in them. +- Shared tools from a context tool are made available to the calling tool. - When context points to a GPTScript tool, output from the context tool gets prepended to the instruction of the calling tool. ## Writing a Context Provider Tool as static text ```yaml -# my-search-context.txt +# my-context.txt -You are an expert web researcher with access to the Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. +You have access to run commands on the user's system. Please ask for confirmation from the user before running a command. ``` -## Using a Context Provider Tool +## Using a Context Tool -Continuing with the above example, this is how you can use the same context in tools that uses different search providers: +Continuing with the above example, this is how you can use the same context in different tools: ```yaml -# my-search-duduckgo.gpt -context: ./my-search-context.txt -tools: github.com/gptscript-ai/search/duckduckgo,sys.http.html2text - -What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? +Context: ./my-context.txt +Tools: sys.exec, sys.write +Which processes on my system are using the most memory? Write their PIDs to a file called pids.txt. ``` ```yaml -# my-search-brave.gpt -context: ./my-search-context.txt -tools: github.com/gptscript-ai/search/brave,sys.http.html2text - -List out some of the main actors in the Christopher Nolan movie Inception, as well as the names of the other Christopher Nolan movies they have appeared in. +Context: ./my-context.txt +Tools: sys.exec +Which file in my current directory is the largest? ``` - ## Context Provider Tool with exported tools Here is a simple example of a context provider tool that provides additional context to search tool: ```yaml -# my-search-context-tool.gpt -share tools: sys.http.html2text? +# my-context-tool.gpt +Share Tools: sys.exec -#!/bin/bash -echo You are an expert web researcher with access to the Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. +#!sys.echo +You have access to run commands on the user's system. Please ask for confirmation from the user before running a command. ``` +The `#!sys.echo` at the start of the tool body tells GPTScript to return everything after it as the output of the tool. + Continuing with the above example, this is how you can use it in a script: ```yaml -context: ./my-search-context-tool.gpt -tools: github.com/gptscript-ai/search/duckduckgo - -What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? +Context: ./my-context-tool.gpt +Tools: sys.write +Which processes on my system are using the most memory? Write their PIDs to a file called pids.txt. ``` When you run this script, GPTScript will use the output from the context tool and add it to the user message along with the existing prompt in this tool to provide additional context to LLM. -## Context Provider Tool with args +## Context Provider Tool with Parameters -Here is an example of a context provider tool that uses args to decide which search tool to use when answering the user provided queries: +Here is an example of a context provider tool that takes a parameter: ```yaml -# context_with_arg.gpt -share tools: github.com/gptscript-ai/search/duckduckgo, github.com/gptscript-ai/search/brave, sys.http.html2text? -args: search_tool: tool to search with +# context_with_param.gpt +Param: tone: the tone to use when responding to the user's request #!/bin/bash -echo You are an expert web researcher with access to the ${search_tool} Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. +echo "Respond to the user's request in a ${tone} tone." ``` Continuing with the above example, this is how you can use it in a script: ```yaml -# my_context_with_arg.gpt -context: ./context_with_arg.gpt with ${search} as search_tool -args: search: Search tool to use +# tool.gpt +Context: ./context_with_param.gpt with ${tone} as tone +Param: tone: the tone to use when responding to the user's request +Tools: sys.http.html2text -What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? +What are the top stories on Hacker News right now? ``` -This script can be used to search with `brave` or `duckduckdb` tools depending on the search parameter passed to the tool. -Example usage for using brave search tool: +Here's how you can run the script and define the tone parameter: + ```yaml -gptscript --disable-cache my_context_with_arg.gpt '{"search": "brave"}' +gptscript tool.gpt '{"tone": "obnoxious"}' ``` diff --git a/docs/docs/03-tools/06-how-it-works.md b/docs/docs/03-tools/06-how-it-works.md index 29bf764d..31dd17ce 100644 --- a/docs/docs/03-tools/06-how-it-works.md +++ b/docs/docs/03-tools/06-how-it-works.md @@ -1,32 +1,33 @@ # How it works -**_GPTScript is composed of tools._** Each tool performs a series of actions similar to a function. Tools have other tools available to them that can be invoked similar to a function call. While similar to a function, the tools are -primarily implemented with a natural language prompt. **_The interaction of the tools is determined by the AI model_**, -the model determines if the tool needs to be invoked and what arguments to pass. Tools are intended to be implemented -with a natural language prompt but can also be implemented with a command or HTTP call. +**_GPTScript is fundamentally composed of tools._** Each tool is either a natural language prompt for the LLM, or is +programmatic (i.e. a command, script, or program to be run). Tools that use a natural language prompt can also invoke +other tools, similar to function calls. The LLM decides when a tool needs to be invoked and sets the parameters to pass to it. ## Example -Below are two tool definitions, separated by `---`. The first tool does not require a name or description, but -every tool after name and description are required. The first tool, has the parameter `tools: bob` meaning that the tool named `bob` is available to be called if needed. +Below are two tool definitions, separated by `---`. +The first tool in the file (often referred to as the "entrypoint tool") does not need a name and description, +but a name is required for all other tools in the file, and a description is recommended. +The entrypoint tool also has the line `Tools: bob` meaning that the tool named `bob` is available to be called if needed. ```yaml -tools: bob +Tools: bob Ask Bob how he is doing and let me know exactly what he said. --- -name: bob -description: I'm Bob, a friendly guy. -args: question: The question to ask Bob. +Name: bob +Description: I'm Bob, a friendly guy. +Param: question: The question to ask Bob. When asked how I am doing, respond with "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" ``` Put the above content in a file named `bob.gpt` and run the following command: -```shell -$ gptscript bob.gpt +```bash +gptscript bob.gpt ``` ``` @@ -35,8 +36,8 @@ OUTPUT: Bob said, "Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!" ``` -Tools can be implemented by invoking a program instead of a natural language prompt. The below -example is the same as the previous example but implements Bob using python. +Tools can be implemented by invoking a program instead of a natural language prompt. +The below example is the same as the previous example but implements Bob using Python. ```yaml Tools: bob @@ -46,7 +47,7 @@ Ask Bob how he is doing and let me know exactly what he said. --- Name: bob Description: I'm Bob, a friendly guy. -Args: question: The question to ask Bob. +Param: question: The question to ask Bob. #!python3 @@ -55,6 +56,4 @@ import os print(f"Thanks for asking {os.environ['question']}, I'm doing great fellow friendly AI tool!") ``` -With these basic building blocks you can create complex scripts with AI interacting with AI, your local system, data, -or external services. - +With these basic building blocks you can create complex scripts with AI interacting with AI, your local system, data, or external services. diff --git a/docs/docs/03-tools/07-gpt-file-reference.md b/docs/docs/03-tools/07-gpt-file-reference.md index 6734bc59..fdc3b363 100644 --- a/docs/docs/03-tools/07-gpt-file-reference.md +++ b/docs/docs/03-tools/07-gpt-file-reference.md @@ -23,10 +23,10 @@ Do more sample tool stuff. ## Tool Definition -A tool starts with a preamble that defines the tool's name, description, args, available tools and additional parameters. +A tool starts with a preamble that defines the tool's name, description, parameters, available tools, and additional directives. The preamble is followed by the tool's body, which contains the instructions for the tool. Comments in the preamble are lines starting with `#` and are ignored by the parser. Comments are not really encouraged -as the text is typically more useful in the description, argument descriptions or instructions. +as the text is typically more useful in the description, parameter descriptions, or body. ```yaml Name: tool-name @@ -34,14 +34,15 @@ Name: tool-name Description: Tool description # This tool can invoke tool1 or tool2 if needed Tools: tool1, tool2 -Args: arg1: The description of arg1 +Param: param1: The description of param1 Tool instructions go here. ``` -## Tool Parameters +## Tool Directives -Tool parameters are key-value pairs defined at the beginning of a tool block, before any instructional text. They are specified in the format `key: value`. The parser recognizes the following keys (case-insensitive and spaces are ignored): +Tool directives are key-value pairs defined at the beginning of a tool block, before the tool body. +They are specified in the format `Key: value`. The parser recognizes the following keys (case-insensitive and spaces are ignored): | Key | Description | |----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| @@ -52,7 +53,7 @@ Tool parameters are key-value pairs defined at the beginning of a tool block, be | `Internal Prompt` | Setting this to `false` will disable the built-in system prompt for this tool. | | `Tools` | A comma-separated list of tools that are available to be called by this tool. | | `Global Tools` | A comma-separated list of tools that are available to be called by all tools. | -| `Parameter` / `Args` | Arguments for the tool. Each argument is defined in the format `arg-name: description`. | +| `Parameter` / `Args` | Parameters for the tool. Each parameter is defined in the format `param-name: description`. | | `Max Tokens` | Set to a number if you wish to limit the maximum number of tokens that can be generated by the LLM. | | `JSON Response` | Setting to `true` will cause the LLM to respond in a JSON format. If you set true you must also include instructions in the tool. | | `Temperature` | A floating-point number representing the temperature parameter. By default, the temperature is 0. Set to a higher number for more creativity. | @@ -63,26 +64,23 @@ Tool parameters are key-value pairs defined at the beginning of a tool block, be | `Context` | A comma-separated list of context tools available to the tool. | | `Share Context` | A comma-separated list of context tools shared by this tool with any tool including this tool in its context. | - - ## Tool Body -The tool body contains the instructions for the tool which can be a natural language prompt or -a command to execute. Commands must start with `#!` followed by the interpreter (e.g. `#!/bin/bash`, `#!python3`) -a text that will be placed in a file and passed to the interpreter. Arguments can be references in the instructions -using the format `${arg1}`. +The tool body contains the instructions for the tool. It can be a natural language prompt or +a command to execute. Commands must start with `#!` followed by the interpreter (e.g. `#!/bin/bash`, `#!python3`). +Parameters can be referenced in the body using the format `${param1}`. ```yaml -name: echo-ai -description: A tool that echos the input -args: input: The input +Name: echo-ai +Description: A tool that echos the input +Parameter: input: The input Just return only "${input}" --- -name: echo-command -description: A tool that echos the input -args: input: The input +Name: echo-command +Description: A tool that echos the input +Parameter: input: The input #!/bin/bash diff --git a/docs/docs/05-alternative-model-providers.md b/docs/docs/05-alternative-model-providers.md index 51818546..2c05e7dd 100644 --- a/docs/docs/05-alternative-model-providers.md +++ b/docs/docs/05-alternative-model-providers.md @@ -2,7 +2,7 @@ ## Usage -GPTScript can be used against alternative models that expose an OpenAI compatible API or have a provider shim available. +GPTScript can be used against alternative models that expose an OpenAI-compatible API or have a provider available. ### Using a model with an OpenAI compatible API @@ -13,9 +13,7 @@ Say hello world ``` :::note - - Mistral's La Plateforme has an OpenAI compatible API, but the model does not behave identically to gpt-4. For that reason, we also have a provider for it that might get better results in some cases. - +Mistral's La Plateforme has an OpenAI-compatible API, but the model does not behave identically to gpt-4. For that reason, we also have a provider for it that might get better results in some cases. ::: ### Using a model that requires a provider @@ -29,15 +27,15 @@ Say hello world For OpenAI compatible providers, GPTScript will look for an API key to be configured with the prefix `GPTSCRIPT_PROVIDER_`, the base domain converted to environment variable format, and a suffix of `_API_KEY`. -As an example if you are using `mistral-large-latest from https://api.mistral.ai/v1`, the environment variable would -be `GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY` +For example, if you are using `mistral-large-latest from https://api.mistral.ai/v1`, the environment variable would +be `GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY`. -Each provider shim has different requirements for authentication. Please check the readme for the provider you are +Each provider has different requirements for authentication. Please check the readme for the provider you are trying to use. ## Available Model Providers -The following shims are currently available: +The following providers are currently available: * [github.com/gptscript-ai/azure-openai-provider](https://github.com/gptscript-ai/azure-openai-provider) * [github.com/gptscript-ai/azure-other-provider](https://github.com/gptscript-ai/azure-other-provider) @@ -52,16 +50,16 @@ The following shims are currently available: For any provider that supports listing models, you can use this command: ```bash -# With a shim +# With a provider gptscript --list-models github.com/gptscript-ai/claude3-anthropic-provider -# To OpenAI compatible endpoint +# With an OpenAI-compatible endpoint gptscript --list-models https://api.mistral.ai/v1 ``` ## Compatibility -While the shims provide support for using GPTScript with other models, the effectiveness of using a +While the providers allow GPTScript to work with other models, the effectiveness of using a different model will depend on a combination of prompt engineering and the quality of the model. You may need to change wording or add more description if you are not getting the results you want. In some cases, the model might not be capable of intelligently handling the complex function calls. diff --git a/docs/docs/02-credentials.md b/docs/docs/06-credentials.md similarity index 96% rename from docs/docs/02-credentials.md rename to docs/docs/06-credentials.md index 5ba349b9..1d3431de 100644 --- a/docs/docs/02-credentials.md +++ b/docs/docs/06-credentials.md @@ -18,7 +18,9 @@ The configuration file is located in the following location based on your operat - macOS: `$HOME/Library/Application Support/gptscript/config.json` - Linux: `$XDG_CONFIG_HOME/gptscript/config.json` -(Note: if you set the `XDG_CONFIG_HOME` environment variable on macOS, then the same path as Linux will be used.) +:::note +If you set the `XDG_CONFIG_HOME` environment variable on macOS, then the same path as Linux will be used. +::: The configured credential store will be automatically downloaded and compiled from the [gptscript-ai/gptscript-credential-helpers](https://github.com/gptscript-ai/gptscript-credential-helpers) repository, other than the `file` store, which is built-in to GPTScript itself. diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index 00f26700..e8e15cb5 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -2,14 +2,15 @@ ### I don't have Homebrew, how can I install GPTScript? -On MacOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` +On macOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` On all supported systems, you download and install the archive for your platform and architecture from the [releases page](https://github.com/gptscript-ai/gptscript/releases). - ### Does GPTScript have an SDK or API I can program against? -Currently, there are three SDKs being maintained: [Python](https://github.com/gptscript-ai/py-gptscript), [Node](https://github.com/gptscript-ai/node-gptscript), and [Go](https://github.com/gptscript-ai/go-gptscript). They are currently under development and are being iterated on relatively rapidly. The READMEs in each repository contain the most up-to-date documentation for the functionality of each. +Currently, there are three SDKs being maintained: [Python](https://github.com/gptscript-ai/py-gptscript), [Node](https://github.com/gptscript-ai/node-gptscript), and [Go](https://github.com/gptscript-ai/go-gptscript). +They are under development and are being iterated on relatively rapidly. +The READMEs in each repository contain the most up-to-date documentation for the functionality of each. ### I see there's a --disable-cache flag. How does caching working in GPTScript? @@ -17,27 +18,43 @@ GPTScript leverages caching to speed up execution and reduce LLM costs. There ar - Git commit hash lookups for tools - LLM responses -Caching is enabled for both of these by default. It can be disabled via the `--disable-cache` flag. Below is an explanation of how these areas behave when caching is enabled and disabled. +Caching is enabled for both of these by default. It can be disabled via the `--disable-cache` flag. +Below is an explanation of how these areas behave when caching is enabled and disabled. #### Git commit hash lookups for tools -When a remote tool or context is included in your script (like so: `Tools: github.com/gptscript-ai/browser`) and then invoked during script execution, GPTScript will pull the Git repo for that tool and build it. The tool’s repo and build will be stored in your system’s cache directory (at [$XDG_CACHE_HOME](https://pkg.go.dev/os#UserCacheDir)/gptscript/repos). Subsequent invocations of the tool leverage that cache. When the cache is enabled, GPTScript will only check for a newer version of the tool once an hour; if an hour hasn’t passed since the last check, it will just use the one it has. If this is the first invocation and the tool doesn’t yet exist in the cache, it will be pulled and built as normal. +When a remote tool or context is included in your script (like so: `Tools: github.com/gptscript-ai/browser`) and then invoked during script execution, +GPTScript will pull the Git repo for that tool and build it. +The tool's repo and build will be stored in your system's cache directory (at [$XDG_CACHE_HOME](https://pkg.go.dev/os#UserCacheDir)/gptscript/repos). +Subsequent invocations of the tool leverage that cache. +When the cache is enabled, GPTScript will only check for a newer version of the tool once an hour; +if an hour hasn't passed since the last check, it will just use the one it has. +If this is the first invocation and the tool doesn't yet exist in the cache, it will be pulled and built as normal. -When the cache is disabled, GPTScript will check that it has the latest version of the tool (meaning the latest git commit for the repo) on every single invocation of the tool. If GPTScript determines it already has the latest version, that build will be used as-is. In other words, disabling the cache DOES NOT force GPTScript to rebuild the tool, it only forces GPTScript to always check if it has the latest version. +When the cache is disabled, GPTScript will check that it has the latest version of the tool (meaning the latest git commit for the repo) on every single invocation of the tool. +If GPTScript determines it already has the latest version, that build will be used as-is. +In other words, disabling the cache DOES NOT force GPTScript to rebuild the tool, it only forces GPTScript to always check if it has the latest version. #### LLM responses -With regards to LLM responses, when the cache is enabled GPTScript will cache the LLM’s response to a chat completion request. Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. +In regard to LLM responses, when the cache is enabled, GPTScript will cache the LLM's response to a chat completion request. +Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. -It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. This means that every message between user and LLM affects the cache lookup. So, when using GPTScript in chat mode, it is very unlikely you’ll receive a cached LLM response. Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. +It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. +This means that every message between user and LLM affects the cache lookup. +So, when using GPTScript in chat mode, it is very unlikely you'll receive a cached LLM response. +Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. ### I see there's a --workspace flag. How do I make use of that? -Every invocation of GPTScript has a workspace directory available to it. By default, this directory is a one-off temp directory, but you can override this and explicitly set a workspace using the `--workspace` flag, like so: +Every invocation of GPTScript has a workspace directory available to it. +By default, this directory is a one-off temp directory, but you can override this and explicitly set a workspace using the `--workspace` flag, like so: + ``` gptscript --workspace . my-script.gpt ``` -In the above example, the user’s current directory (denoted by `.`) will be set as the workspace. Both absolute and relative paths are supported. + +In the above example, the user's current directory (denoted by `.`) will be set as the workspace. Both absolute and relative paths are supported. Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. @@ -45,17 +62,24 @@ Regardless of whether it is set implicitly or explicitly, the workspace is then GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. The tools must decide to make use of the workspace environment variable. ::: -To make prompt-based tools workspace aware, you can add our workspace context, like so: +To make prompt-based tools workspace aware, you can reference our workspace context tool, like so: + ``` Context: github.com/gptscript-ai/context/workspace ``` -This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. This will not, however, have any impact on code-based tools (ie python, bash, or go tools). Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. -This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. This is because if a tool intends to interact with the workspace, it minimally needs these tools. +This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. +This will not, however, have any impact on code-based tools (ie python, bash, or go tools). +Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. + +This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. +This is because if a tool intends to interact with the workspace, it minimally needs these tools. ### I'm hitting GitHub's rate limit for unauthenticated requests when using GPTScript. -By default, GPTScript makes unauthenticated requests to GitHub when pulling tools. Since GitHub's rate limits for unauthenticated requests are fairly low, running into them when developing with GPTScript is a common issue. To avoid this, you can get GPTScript to make authenticated requests -- which have higher rate limits -- by setting the `GITHUB_AUTH_TOKEN` environment variable to your github account's PAT (Personal Access Token). +By default, GPTScript makes unauthenticated requests to GitHub when pulling tools. +Since GitHub's rate limits for unauthenticated requests are fairly low, running into them when developing with GPTScript is a common issue. +To avoid this, you can get GPTScript to make authenticated requests -- which have higher rate limits -- by setting the `GITHUB_AUTH_TOKEN` environment variable to your github account's PAT (Personal Access Token). If you're already authenticated with the `gh` CLI, you can use its token by running: ```bash From d28649a8282ca8400b5c873b361ff25cf50e4911 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 1 Aug 2024 20:28:02 -0700 Subject: [PATCH 057/270] chore: compress env vars that are too large --- .../04-command-line-reference/gptscript.md | 1 + .../gptscript_getenv.md | 48 +++++++++++++++ pkg/cli/getenv.go | 60 +++++++++++++++++++ pkg/cli/getenv_test.go | 57 ++++++++++++++++++ pkg/cli/gptscript.go | 1 + pkg/engine/cmd.go | 25 +++++++- pkg/tests/runner_test.go | 32 ++++++++++ .../testdata/TestEnvOverflow/context.json | 1 + pkg/tests/testdata/TestEnvOverflow/test.gpt | 14 +++++ 9 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 docs/docs/04-command-line-reference/gptscript_getenv.md create mode 100644 pkg/cli/getenv.go create mode 100644 pkg/cli/getenv_test.go create mode 100644 pkg/tests/testdata/TestEnvOverflow/context.json create mode 100644 pkg/tests/testdata/TestEnvOverflow/test.gpt diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 6cd3feb6..de29a97f 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -51,5 +51,6 @@ gptscript [flags] PROGRAM_FILE [INPUT...] * [gptscript credential](gptscript_credential.md) - List stored credentials * [gptscript eval](gptscript_eval.md) - * [gptscript fmt](gptscript_fmt.md) - +* [gptscript getenv](gptscript_getenv.md) - Looks up an environment variable for use in GPTScript tools * [gptscript parse](gptscript_parse.md) - diff --git a/docs/docs/04-command-line-reference/gptscript_getenv.md b/docs/docs/04-command-line-reference/gptscript_getenv.md new file mode 100644 index 00000000..80fea614 --- /dev/null +++ b/docs/docs/04-command-line-reference/gptscript_getenv.md @@ -0,0 +1,48 @@ +--- +title: "gptscript getenv" +--- +## gptscript getenv + +Looks up an environment variable for use in GPTScript tools + +``` +gptscript getenv [flags] KEY [DEFAULT] +``` + +### Options + +``` + -h, --help help for getenv +``` + +### Options inherited from parent commands + +``` + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) +``` + +### SEE ALSO + +* [gptscript](gptscript.md) - + diff --git a/pkg/cli/getenv.go b/pkg/cli/getenv.go new file mode 100644 index 00000000..6d81944d --- /dev/null +++ b/pkg/cli/getenv.go @@ -0,0 +1,60 @@ +package cli + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/cobra" +) + +type Getenv struct { +} + +func (e *Getenv) Customize(cmd *cobra.Command) { + cmd.Use = "getenv [flags] KEY [DEFAULT]" + cmd.Short = "Looks up an environment variable for use in GPTScript tools" + cmd.Args = cobra.RangeArgs(1, 2) +} + +func (e *Getenv) Run(_ *cobra.Command, args []string) error { + var ( + key = args[0] + def string + ) + if len(args) > 1 { + def = args[1] + } + value := getEnv(key, def) + fmt.Print(value) + return nil +} + +func getEnv(key, def string) string { + v := os.Getenv(key) + if v == "" { + return def + } + + if strings.HasPrefix(v, `{"_gz":"`) && strings.HasSuffix(v, `"}`) { + data, err := base64.StdEncoding.DecodeString(v[8 : len(v)-2]) + if err != nil { + return v + } + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return v + } + strBytes, err := io.ReadAll(gz) + if err != nil { + return v + } + return string(strBytes) + } + + return v +} diff --git a/pkg/cli/getenv_test.go b/pkg/cli/getenv_test.go new file mode 100644 index 00000000..8cc9e05f --- /dev/null +++ b/pkg/cli/getenv_test.go @@ -0,0 +1,57 @@ +package cli + +import ( + "os" + "testing" +) + +func TestGetEnv(t *testing.T) { + // Cleaning up + defer func(currentEnvValue string) { + os.Setenv("testKey", currentEnvValue) + }(os.Getenv("testKey")) + + // Tests + testCases := []struct { + name string + key string + def string + envValue string + expectedResult string + }{ + { + name: "NoValueUseDefault", + key: "testKey", + def: "defaultValue", + envValue: "", + expectedResult: "defaultValue", + }, + { + name: "ValueExistsNoCompress", + key: "testKey", + def: "defaultValue", + envValue: "testValue", + expectedResult: "testValue", + }, + { + name: "ValueExistsCompressed", + key: "testKey", + def: "defaultValue", + envValue: `{"_gz":"H4sIAEosrGYC/ytJLS5RKEvMKU0FACtB3ewKAAAA"}`, + + expectedResult: "test value", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + os.Setenv(test.key, test.envValue) + + result := getEnv(test.key, test.def) + + if result != test.expectedResult { + t.Errorf("expected: %s, got: %s", test.expectedResult, result) + } + }) + } +} diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4b2fab92..4458d87b 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -85,6 +85,7 @@ func New() *cobra.Command { &Credential{root: root}, &Parse{}, &Fmt{}, + &Getenv{}, &SDKServer{ GPTScript: root, }, diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 9e4b94fc..7089f664 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -2,7 +2,9 @@ package engine import ( "bytes" + "compress/gzip" "context" + "encoding/base64" "encoding/json" "fmt" "io" @@ -44,6 +46,25 @@ func (o *outputWriter) Write(p []byte) (n int, err error) { return len(p), nil } +func compressEnv(envs []string) (result []string) { + for _, env := range envs { + k, v, ok := strings.Cut(env, "=") + if !ok || len(v) < 40_000 { + result = append(result, env) + continue + } + + out := bytes.NewBuffer(nil) + b64 := base64.NewEncoder(base64.StdEncoding, out) + gz := gzip.NewWriter(b64) + _, _ = gz.Write([]byte(v)) + _ = gz.Close() + _ = b64.Close() + result = append(result, k+`={"_gz":"`+out.String()+`"}`) + } + return +} + func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCategory ToolCategory) (cmdOut string, cmdErr error) { id := counter.Next() @@ -95,10 +116,10 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate for _, inputContext := range ctx.InputContext { instructions = append(instructions, inputContext.Content) } + var extraEnv = []string{ strings.TrimSpace("GPTSCRIPT_CONTEXT=" + strings.Join(instructions, "\n")), } - cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input) if err != nil { return "", err @@ -277,6 +298,6 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T } cmd := exec.CommandContext(ctx, env.Lookup(envvars, args[0]), cmdArgs...) - cmd.Env = envvars + cmd.Env = compressEnv(envvars) return cmd, stop, nil } diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 70d5346c..a9a01510 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -1,8 +1,11 @@ package tests import ( + "bytes" + "compress/gzip" "context" "encoding/json" + "io" "os" "runtime" "testing" @@ -919,6 +922,35 @@ func TestOutput(t *testing.T) { autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step3")) } +func TestEnvOverflow(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + + out := r.RunDefault() + autogold.Expect(`{"_gz":"H4sIAAAAAAAA/+zAgQAAAADCMNb8JQK4wjYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgHgAA//+94pKFQBkBAA=="} +`).Equal(t, out) + + data, err := os.ReadFile("testdata/TestEnvOverflow/context.json") + require.NoError(t, err) + + compressed := struct { + Data []byte `json:"_gz"` + }{} + err = json.Unmarshal(data, &compressed) + require.NoError(t, err) + + gunzip, err := gzip.NewReader(bytes.NewReader(compressed.Data)) + require.NoError(t, err) + + content, err := io.ReadAll(gunzip) + require.NoError(t, err) + + autogold.Expect("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").Equal(t, string(content)) +} + func TestSysContext(t *testing.T) { if runtime.GOOS == "windows" { t.Skip() diff --git a/pkg/tests/testdata/TestEnvOverflow/context.json b/pkg/tests/testdata/TestEnvOverflow/context.json new file mode 100644 index 00000000..eb99ddb6 --- /dev/null +++ b/pkg/tests/testdata/TestEnvOverflow/context.json @@ -0,0 +1 @@ +{"_gz":"H4sIAAAAAAAA/+zAgQAAAADCMNb8JQK4wjYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgHgAA//+94pKFQBkBAA=="} diff --git a/pkg/tests/testdata/TestEnvOverflow/test.gpt b/pkg/tests/testdata/TestEnvOverflow/test.gpt new file mode 100644 index 00000000..406e6de7 --- /dev/null +++ b/pkg/tests/testdata/TestEnvOverflow/test.gpt @@ -0,0 +1,14 @@ +context: c + +#!/bin/bash + +echo "${GPTSCRIPT_CONTEXT}" +echo "${GPTSCRIPT_CONTEXT}" > ${GPTSCRIPT_TOOL_DIR}/context.json + +--- +name: c + +#!/bin/bash + +string=$(printf 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa%.0s' {1..1000}) +echo "$string" \ No newline at end of file From 6e554b95f7a62ac60aef67ceb41486df506029b1 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 1 Aug 2024 22:24:51 -0700 Subject: [PATCH 058/270] chore: capture stderr by default and return to LLM, even on success --- pkg/engine/cmd.go | 19 +++++++++++-------- pkg/tests/testdata/TestCwd/subtool/test.gpt | 2 +- pkg/tests/testdata/TestCwd/test.gpt | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 7089f664..ec4949ce 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -134,24 +134,27 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate }, } - output := &bytes.Buffer{} - all := &bytes.Buffer{} - cmd.Stderr = io.MultiWriter(all, os.Stderr) - cmd.Stdout = io.MultiWriter(all, output, &outputWriter{ + result := &bytes.Buffer{} + all := io.MultiWriter(result, &outputWriter{ id: id, progress: e.Progress, }) + cmd.Stdout = all + cmd.Stderr = all + if log.IsDebug() { + cmd.Stderr = io.MultiWriter(all, os.Stderr) + } + if err := cmd.Run(); err != nil { if toolCategory == NoCategory { - return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, all), nil + return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, result), nil } - _, _ = os.Stderr.Write(output.Bytes()) log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) - return "", fmt.Errorf("ERROR: %s: %w", all, err) + return "", fmt.Errorf("ERROR: %s: %w", result, err) } - return output.String(), IsChatFinishMessage(output.String()) + return result.String(), IsChatFinishMessage(result.String()) } func (e *Engine) getRuntimeEnv(ctx context.Context, tool types.Tool, cmd, env []string) ([]string, error) { diff --git a/pkg/tests/testdata/TestCwd/subtool/test.gpt b/pkg/tests/testdata/TestCwd/subtool/test.gpt index 41314bbe..29cf1ed0 100644 --- a/pkg/tests/testdata/TestCwd/subtool/test.gpt +++ b/pkg/tests/testdata/TestCwd/subtool/test.gpt @@ -1,6 +1,6 @@ # #!/usr/bin/env X=${GPTSCRIPT_TOOL_DIR} /bin/bash -set -e -x +set -e [ ${X} = ${GPTSCRIPT_TOOL_DIR} ] cd $X diff --git a/pkg/tests/testdata/TestCwd/test.gpt b/pkg/tests/testdata/TestCwd/test.gpt index 5185635a..e053571c 100644 --- a/pkg/tests/testdata/TestCwd/test.gpt +++ b/pkg/tests/testdata/TestCwd/test.gpt @@ -6,7 +6,7 @@ noop name: local #!/bin/bash -set -e -x +set -e [ "" = "${TOOL_DIR}" ] P=$(pwd) From df204dfbc1a1d109af4f9c6a91f08934fa379530 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 1 Aug 2024 22:42:39 -0700 Subject: [PATCH 059/270] chore: bump tui --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 858bfa65..dd2c9d95 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/go-gptscript v0.9.3-0.20240731222146-b67275f3fa69 - github.com/gptscript-ai/tui v0.0.0-20240731002102-544a80108f89 + github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 + github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index 53f72056..42a404f2 100644 --- a/go.sum +++ b/go.sum @@ -171,10 +171,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf037 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240731222146-b67275f3fa69 h1:c+Tf6I8jUg8hDgfP8jKs93UcC9dDIGxClWGZUL36Hd0= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240731222146-b67275f3fa69/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240731002102-544a80108f89 h1:1G8OhXzCqCe/LARec8Qb7XkpQiEWoRYE/2UfohD+Do4= -github.com/gptscript-ai/tui v0.0.0-20240731002102-544a80108f89/go.mod h1:Llh3vi87gyry6j/sgJxhkHHvgv9uQRzEiMWuQtmpW1w= +github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= +github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502 h1:172DJthLtMUqXrODC/wGd0Vmj6adn9YiP9KnlrZpD+4= +github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= From 0e6722e74d429e2b558cf17031564afad4cfb6e0 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 2 Aug 2024 08:07:57 -0400 Subject: [PATCH 060/270] feat: include fields in prompt only when there are fields This essentially allows tools to "display" info to a user, even when using SDKs. Signed-off-by: Donnie Adams --- pkg/prompt/prompt.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go index c1b693be..44cb20f1 100644 --- a/pkg/prompt/prompt.go +++ b/pkg/prompt/prompt.go @@ -61,9 +61,13 @@ func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string for _, env := range envs { if url, ok := strings.CutPrefix(env, types.PromptURLEnvVar+"="); ok { + var fields []string + if params.Fields != "" { + fields = strings.Split(params.Fields, ",") + } httpPrompt := types.Prompt{ Message: params.Message, - Fields: strings.Split(params.Fields, ","), + Fields: fields, Sensitive: params.Sensitive == "true", } return sysPromptHTTP(ctx, envs, url, httpPrompt) @@ -76,7 +80,7 @@ func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string func sysPrompt(ctx context.Context, req types.Prompt) (_ string, err error) { defer context2.GetPauseFuncFromCtx(ctx)()() - if req.Message != "" && len(req.Fields) == 1 && strings.TrimSpace(req.Fields[0]) == "" { + if req.Message != "" && len(req.Fields) == 0 { var errs []error _, err := fmt.Fprintln(os.Stderr, req.Message) errs = append(errs, err) From 97e9bcc82a0338b72bf2bfd780ccb1cd78be2ba8 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 2 Aug 2024 16:10:38 -0700 Subject: [PATCH 061/270] bug: don't also abort the program on double ctrl+c --- go.mod | 2 +- go.sum | 4 ++-- pkg/cli/main.go | 6 +++++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index dd2c9d95..2682876c 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 - github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d + github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502 github.com/hexops/autogold/v2 v2.2.1 diff --git a/go.sum b/go.sum index 42a404f2..34ece36f 100644 --- a/go.sum +++ b/go.sum @@ -169,8 +169,8 @@ github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtI github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 h1:vYnXoIyCXzaCEw0sYifQ4bDpsv3/fO/dZ2suEsTwCIo= github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= -github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= -github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= +github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= +github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502 h1:172DJthLtMUqXrODC/wGd0Vmj6adn9YiP9KnlrZpD+4= diff --git a/pkg/cli/main.go b/pkg/cli/main.go index d06f614f..b607281b 100644 --- a/pkg/cli/main.go +++ b/pkg/cli/main.go @@ -1,7 +1,9 @@ package cli import ( + "context" "os" + "os/signal" "github.com/gptscript-ai/cmd" "github.com/gptscript-ai/gptscript/pkg/daemon" @@ -18,5 +20,7 @@ func Main() { } os.Exit(0) } - cmd.Main(New()) + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + cmd.MainCtx(ctx, New()) } From 632c2cf7b8079675f515d79c2cfceea76f9914b7 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 2 Aug 2024 16:35:11 -0700 Subject: [PATCH 062/270] bug: don't capture stderr for cred and provider tools --- pkg/engine/cmd.go | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index ec4949ce..d62aad2e 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -134,21 +134,28 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate }, } - result := &bytes.Buffer{} - all := io.MultiWriter(result, &outputWriter{ - id: id, - progress: e.Progress, - }) - - cmd.Stdout = all - cmd.Stderr = all - if log.IsDebug() { - cmd.Stderr = io.MultiWriter(all, os.Stderr) + var ( + stdout = &bytes.Buffer{} + stdoutAndErr = &bytes.Buffer{} + progressOut = &outputWriter{ + id: id, + progress: e.Progress, + } + result *bytes.Buffer + ) + + cmd.Stdout = io.MultiWriter(stdout, stdoutAndErr, progressOut) + if toolCategory == NoCategory || toolCategory == ContextToolCategory { + cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut) + result = stdoutAndErr + } else { + cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut, os.Stderr) + result = stdout } if err := cmd.Run(); err != nil { if toolCategory == NoCategory { - return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, result), nil + return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) return "", fmt.Errorf("ERROR: %s: %w", result, err) From d0092f977ac3cc4ba43fde7fb8a3973ef80d5bfb Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 2 Aug 2024 22:40:00 -0700 Subject: [PATCH 063/270] chore: bump tui --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2682876c..b09b2665 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 - github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502 + github.com/gptscript-ai/tui v0.0.0-20240803053736-0360a5a6339b github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index 34ece36f..5a84c667 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7J github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502 h1:172DJthLtMUqXrODC/wGd0Vmj6adn9YiP9KnlrZpD+4= -github.com/gptscript-ai/tui v0.0.0-20240802053758-038d3eed9502/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= +github.com/gptscript-ai/tui v0.0.0-20240803053736-0360a5a6339b h1:KyjSzximDwyFDAze7UVGlgU8P76mNvtVyPG9dH5Cd/A= +github.com/gptscript-ai/tui v0.0.0-20240803053736-0360a5a6339b/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= From d6f6096f1147a0df266201422251f0cac09579a0 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Sat, 3 Aug 2024 10:42:47 -0400 Subject: [PATCH 064/270] docs: rearrange model provider doc (#705) Signed-off-by: Grant Linville --- docs/docs/05-alternative-model-providers.md | 70 ++++++++++++--------- 1 file changed, 42 insertions(+), 28 deletions(-) diff --git a/docs/docs/05-alternative-model-providers.md b/docs/docs/05-alternative-model-providers.md index 2c05e7dd..8e32ca21 100644 --- a/docs/docs/05-alternative-model-providers.md +++ b/docs/docs/05-alternative-model-providers.md @@ -1,34 +1,24 @@ # Supported Models and Platforms -## Usage - GPTScript can be used against alternative models that expose an OpenAI-compatible API or have a provider available. +Here is an example using Claude: -### Using a model with an OpenAI compatible API - -```gptscript -model: mistral-large-latest from https://api.mistral.ai/v1 - -Say hello world -``` - -:::note -Mistral's La Plateforme has an OpenAI-compatible API, but the model does not behave identically to gpt-4. For that reason, we also have a provider for it that might get better results in some cases. -::: - -### Using a model that requires a provider ```gptscript model: claude-3-haiku-20240307 from github.com/gptscript-ai/claude3-anthropic-provider Say hello world ``` -### Authentication +A note on model compatibility: -For OpenAI compatible providers, GPTScript will look for an API key to be configured with the -prefix `GPTSCRIPT_PROVIDER_`, the base domain converted to environment variable format, and a suffix of `_API_KEY`. -For example, if you are using `mistral-large-latest from https://api.mistral.ai/v1`, the environment variable would -be `GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY`. +:::important +While the providers allow GPTScript to work with other models, the effectiveness of using a +different model will depend on a combination of prompt engineering and the quality of the model. You may need to change +wording or add more description if you are not getting the results you want. In some cases, the model might not be +capable of intelligently handling the complex function calls. +::: + +## Authentication Each provider has different requirements for authentication. Please check the readme for the provider you are trying to use. @@ -50,16 +40,40 @@ The following providers are currently available: For any provider that supports listing models, you can use this command: ```bash -# With a provider gptscript --list-models github.com/gptscript-ai/claude3-anthropic-provider +``` -# With an OpenAI-compatible endpoint -gptscript --list-models https://api.mistral.ai/v1 +## OpenAI-Compatible APIs (Advanced) + +:::warning +Even if a non-OpenAI service has an API that claims to be OpenAI-compatible, there are usually subtle differences that cause things to break. +The approach described in this section often does not work. +::: + +You can use a model from an OpenAI-compatible API like this: + +```gptscript +model: mistral-large-latest from https://api.mistral.ai/v1 + +Say hello world ``` -## Compatibility +:::note +Mistral's La Plateforme has an OpenAI-compatible API, but the model does not behave identically to gpt-4. +For that reason, we also have a provider for it that might get better results in some cases. +::: -While the providers allow GPTScript to work with other models, the effectiveness of using a -different model will depend on a combination of prompt engineering and the quality of the model. You may need to change -wording or add more description if you are not getting the results you want. In some cases, the model might not be -capable of intelligently handling the complex function calls. +### Authentication + +For OpenAI-compatible providers, GPTScript will look for an API key to be configured with the +prefix `GPTSCRIPT_PROVIDER_`, the base domain converted to environment variable format, and a suffix of `_API_KEY`. +For example, if you are using `mistral-large-latest from https://api.mistral.ai/v1`, the environment variable would +be `GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY`. + +### Listing available models + +You can list models from an OpenAI-compatible API like this: + +```bash +gptscript --list-models https://api.mistral.ai/v1 +``` From 7b53087968b49b1799b02eb0ff38980f76ef5241 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Sat, 3 Aug 2024 11:37:35 -0400 Subject: [PATCH 065/270] docs: document more features (#694) Signed-off-by: Grant Linville Co-authored-by: Donnie Adams --- docs/docs/03-tools/08-workspace.md | 37 +++++ docs/docs/03-tools/09-code-tool-guidelines.md | 135 ++++++++++++++++++ docs/docs/03-tools/10-daemon.md | 108 ++++++++++++++ docs/docs/09-faqs.md | 35 ++--- 4 files changed, 295 insertions(+), 20 deletions(-) create mode 100644 docs/docs/03-tools/08-workspace.md create mode 100644 docs/docs/03-tools/09-code-tool-guidelines.md create mode 100644 docs/docs/03-tools/10-daemon.md diff --git a/docs/docs/03-tools/08-workspace.md b/docs/docs/03-tools/08-workspace.md new file mode 100644 index 00000000..8756cb2a --- /dev/null +++ b/docs/docs/03-tools/08-workspace.md @@ -0,0 +1,37 @@ +# Workspace + +One concept in GPTScript is the workspace directory. +This is a directory meant to be used by tools that need to interact with the local file system. +By default, the workspace directory is a one-off temporary directory. +The workspace directory can be set with the `--workspace` argument when running GPTScript, like this: + +```bash +gptscript --workspace . my-script.gpt +``` + +In the above example, the user’s current directory (denoted by `.`) will be set as the workspace. +The workspace directory is no longer temporary if it is explicitly set, and everything in it will persist after the script has finished running. +Both absolute and relative paths are supported. + +Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. + +:::info +GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. +The tools must decide to make use of the workspace environment variable. +::: + +## The Workspace Context Tool + +To make a non-code tool that uses the LLM aware of the workspace, you can reference the workspace context tool: + +``` +Context: github.com/gptscript-ai/context/workspace +``` + +This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, +what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. +This will not, however, have any impact on code-based tools (i.e. Python, Bash, or Go tools). +Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. + +This context tool also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. +This is because if a tool intends to interact with the workspace, it minimally needs these tools. diff --git a/docs/docs/03-tools/09-code-tool-guidelines.md b/docs/docs/03-tools/09-code-tool-guidelines.md new file mode 100644 index 00000000..0f13ac7f --- /dev/null +++ b/docs/docs/03-tools/09-code-tool-guidelines.md @@ -0,0 +1,135 @@ +# Code Tool Guidelines + +GPTScript can handle the packaging and distribution of code-based tools via GitHub repos. +For more information on how this works, see the [authoring guide](02-authoring.md#sharing-tools). + +This guide provides guidelines for setting up GitHub repos for proper tool distribution. + +## Common Guidelines + +### `tool.gpt` or `agent.gpt` file + +Every repo should have a `tool.gpt` or `agent.gpt` file. This is the main logic of the tool. +If both files exist, GPTScript will use the `agent.gpt` file and ignore the `tool.gpt` file. +Your repo can have other `.gpt` files that are referenced by the main file, but there must be a `tool.gpt` or `agent.gpt` file present. + +Under most circumstances, this file should live in the root of the repo. +If you are using a single repo for the distribution of multiple tools (see [gptscript-ai/context](https://github.com/gptscript-ai/context) for an example), +then you can have the `tool.gpt`/`agent.gpt` file in a subdirectory, and the tool will now be able to be referenced as `github.com///`. + +### Name and Description directives + +We recommend including a `Name` and `Description` directive for your tool. +This is useful for both people and LLMs to understand what the tool will do and when to use it. + +### Parameters + +Any parameters specified in the tool will be available as environment variables in your code. +We recommend handling parameters that way, rather than using command-line arguments. + +## Python Guidelines + +### Calling Python in the tool body + +The body of the `tool.gpt`/`agent.gpt` file needs to call Python. This can be done as an inline script like this: + +``` +Name: my-python-tool + +#!python3 + +print('hello world') +``` + +An inline script like this is only recommended for simple use cases that don't need external dependencies. + +If your use case is more complex or requires external dependencies, you can reference a Python script in your repo, like this: + +``` +Name: my-python-tool + +#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/tool.py +``` + +(This example assumes that your entrypoint to your Python program is in a file called `tool.py`. You can call it what you want.) + +### `requirements.txt` file + +If your Python program needs any external dependencies, you can create a `requirements.txt` file at the same level as +your `tool.gpt`/`agent.gpt` file. GPTScript will handle downloading the dependencies before it runs the tool. + +The file structure should look something like this: + +``` +. +├── requirements.txt +├── tool.py +└── tool.gpt +``` + +## JavaScript (Node.js) Guidelines + +### Calling Node.js in the tool body + +The body of the `tool.gpt`/`agent.gpt` file needs to call Node. This can be done as an inline script like this: + +``` +Name: my-node-tool + +#!node + +console.log('hello world') +``` + +An inline script like this is only recommended for simple use cases that don't need external dependencies. + +If your use case is more complex or requires external dependencies, you can reference a Node script in your repo, like this: + +``` +Name: my-node-tool + +#!/usr/bin/env node ${GPTSCRIPT_TOOL_DIR}/tool.js +``` + +(This example assumes that your entrypoint to your Node program is in a file called `tool.js`. You can call it what you want.) + +### `package.json` file + +If your Node program needs any external dependencies, you can create a `package.json` file at the same level as +your `tool.gpt`/`agent.gpt` file. GPTScript will handle downloading the dependencies before it runs the tool. + +The file structure should look something like this: + +``` +. +├── package.json +├── tool.js +└── tool.gpt +``` + +## Go Guidelines + +GPTScript does not support inline code for Go, so you must call to an external program from the tool body like this: + +``` +Name: my-go-tool + +#!${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool +``` + +:::important +Unlike the Python and Node cases above where you can name the file anything you want, Go tools must be `#!${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool`. +::: + +GPTScript will build the Go program located at `./main.go` to a file called `./bin/gptscript-go-tool` before running the tool. +All of your dependencies need to be properly specified in a `go.mod` file. + +The file structure should look something like this: + +``` +. +├── go.mod +├── go.sum +├── main.go +└── tool.gpt +``` diff --git a/docs/docs/03-tools/10-daemon.md b/docs/docs/03-tools/10-daemon.md new file mode 100644 index 00000000..128c161a --- /dev/null +++ b/docs/docs/03-tools/10-daemon.md @@ -0,0 +1,108 @@ +# Daemon Tools (Advanced) + +One advanced use case that GPTScript supports is daemon tools. +A daemon tool is a tool that starts a long-running HTTP server in the background, that will continue running until GPTScript is done executing. +Other tools can easily send HTTP POST requests to the daemon tool. + +## Example + +Here is an example of a daemon tool with a simple echo server written in an inline Node.js script: + +``` +Tools: my-daemon +Param: first: the first parameter +Param: second: the second parameter + +#!http://my-daemon.daemon.gptscript.local/myPath + +--- +Name: my-daemon + +#!sys.daemon node + +const http = require('http'); + +const server = http.createServer((req, res) => { + if (req.method === 'GET' || req.method === 'POST') { + // Extract the path from the request URL + const path = req.url; + + let body = ''; + + req.on('data', chunk => { + body += chunk.toString(); + }) + + // Respond with the path and body + req.on('end', () => { + res.writeHead(200, { 'Content-Type': 'text/plain' }); + res.write(`Body: ${body}\n`); + res.end(`Path: ${path}`); + }) + } else { + res.writeHead(405, { 'Content-Type': 'text/plain' }); + res.end('Method Not Allowed'); + } +}); + +const PORT = process.env.PORT || 3000; +server.listen(PORT, () => { + console.log(`Server is listening on port ${PORT}`); +}); +``` + +Let's talk about the daemon tool, called `my-daemon`, first. + +### The Daemon Tool + +The body of this tool begins with `#!sys.daemon`. This tells GPTScript to take the rest of the body as a command to be +run in the background that will listen for HTTP requests. GPTScript will run this command (in this case, a Node script). +GPTScript will assign a port number for the server and set the `PORT` environment variable to that number, so the +server needs to check that variable and listen on the proper port. + +After GPTScript runs the daemon, it will send it an HTTP GET request to make sure that it is running properly. +The daemon needs to respond with a 200 OK to this request. +By default, the request goes to `/`, but this can be configured with the following syntax: + +``` +#!sys.daemon (path=/api/ready) node + +// (node script here) +``` + +### The Entrypoint Tool + +The entrypoint tool at the top of this script sends an HTTP request to the daemon tool. +There are a few important things to note here: + +- The `Tools: my-daemon` directive is needed to show that this tool requires the `my-daemon` tool to already be running. + - When the entrypoint tool runs, GPTScript will check if `my-daemon` is already running. If it is not, GPTScript will start it. +- The `#!http://my-daemon.daemon.gptscript.local/myPath` in the body tells GPTScript to send an HTTP request to the daemon tool. + - The request will be a POST request, with the body of the request being a JSON string of the parameters passed to the entrypoint tool. + - For example, if the script is run like `gptscript script.gpt '{"first":"hello","second":"world"}'`, then the body of the request will be `{"first":"hello","second":"world"}`. + - The path of the request will be `/myPath`. + - The hostname is `my-daemon.daemon.gptscript.local`. When sending a request to a daemon tool, the hostname must always start with the daemon tool's name, followed by `.daemon.gptscript.local`. + - GPTScript recognizes this hostname and determines the correct port number to send the request to, on localhost. + +### Running the Example + +Now let's try running it: + +```bash +gptscript script.gpt '{"first":"hello","second":"world"}' +``` + +``` +OUTPUT: + +Body: {"first":"hello","second":"world"} +Path: /myPath +``` + +This is exactly what we expected. This is a silly, small example just to demonstrate how this feature works. +A real-world situation would involve several different tools sending different HTTP requests to the daemon tool, +likely with an LLM determining when to call which tool. + +## Real-World Example + +To see a real-world example of a daemon tool, check out the [GPTScript Browser tool](https://github.com/gptscript-ai/browser). diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index e8e15cb5..20196011 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -54,26 +54,7 @@ By default, this directory is a one-off temp directory, but you can override thi gptscript --workspace . my-script.gpt ``` -In the above example, the user's current directory (denoted by `.`) will be set as the workspace. Both absolute and relative paths are supported. - -Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. - -:::info -GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. The tools must decide to make use of the workspace environment variable. -::: - -To make prompt-based tools workspace aware, you can reference our workspace context tool, like so: - -``` -Context: github.com/gptscript-ai/context/workspace -``` - -This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. -This will not, however, have any impact on code-based tools (ie python, bash, or go tools). -Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. - -This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. -This is because if a tool intends to interact with the workspace, it minimally needs these tools. +For more info, see the [Workspace](03-tools/08-workspace.md) page. ### I'm hitting GitHub's rate limit for unauthenticated requests when using GPTScript. @@ -85,3 +66,17 @@ If you're already authenticated with the `gh` CLI, you can use its token by runn ```bash export GITHUB_AUTH_TOKEN="$(gh auth token)" ``` + +### Can I save my chat and resume it later? + +Yes! When you run GPTScript, be sure to specify the `--save-chat-state-file` argument like this: + +```bash +gptscript --save-chat-state-file chat-state.json my-script.gpt +``` + +Then, when you want to resume your chat, you can use the `--chat-state` argument to specify the file you saved: + +```bash +gptscript --chat-state chat-state.json my-script.gpt +``` From a7509b0d1e59ccc26431b3fb6c6ba98087d10dc4 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sat, 3 Aug 2024 17:44:07 -0700 Subject: [PATCH 066/270] chore: bump tui --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b09b2665..783e638b 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 - github.com/gptscript-ai/tui v0.0.0-20240803053736-0360a5a6339b + github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index 5a84c667..fd165d3f 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7J github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240803053736-0360a5a6339b h1:KyjSzximDwyFDAze7UVGlgU8P76mNvtVyPG9dH5Cd/A= -github.com/gptscript-ai/tui v0.0.0-20240803053736-0360a5a6339b/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= +github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e h1:OO/b8gGQi3jIpDoII+jf7fc4ssqOZdFcb9zB+QjsxRQ= +github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= From 89312473f72ad7d845927d605ce99320738a0031 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sun, 4 Aug 2024 22:31:32 -0700 Subject: [PATCH 067/270] chore: add type field to tools This allows the tools field to reference tools, context tools, or agent tools without using the context:, agent: fields. --- pkg/parser/parser.go | 2 + pkg/runner/runner.go | 2 +- pkg/tests/runner_test.go | 5 + .../testdata/TestToolRefAll/call1-resp.golden | 9 ++ .../testdata/TestToolRefAll/call1.golden | 61 +++++++++ pkg/tests/testdata/TestToolRefAll/test.gpt | 30 +++++ pkg/types/tool.go | 118 ++++++++++++------ pkg/types/tool_test.go | 5 + 8 files changed, 191 insertions(+), 41 deletions(-) create mode 100644 pkg/tests/testdata/TestToolRefAll/call1-resp.golden create mode 100644 pkg/tests/testdata/TestToolRefAll/call1.golden create mode 100644 pkg/tests/testdata/TestToolRefAll/test.gpt diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index d12f838e..ff5d1374 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -150,6 +150,8 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { tool.Parameters.Credentials = append(tool.Parameters.Credentials, value) case "sharecredentials", "sharecreds", "sharecredential", "sharecred", "sharedcredentials", "sharedcreds", "sharedcredential", "sharedcred": tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, value) + case "type": + tool.Type = types.ToolType(strings.ToLower(value)) default: return false, nil } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 9e8695a7..3a33c720 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -332,7 +332,7 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) } func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monitor, env []string, input string) (result []engine.InputContext, _ *State, _ error) { - toolRefs, err := callCtx.Program.GetContextToolRefs(callCtx.Tool.ID) + toolRefs, err := callCtx.Tool.GetContextTools(*callCtx.Program) if err != nil { return nil, nil, err } diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index a9a01510..12eff23a 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -995,3 +995,8 @@ func TestMissingTool(t *testing.T) { r.AssertResponded(t) autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp) } + +func TestToolRefAll(t *testing.T) { + r := tester.NewRunner(t) + r.RunDefault() +} diff --git a/pkg/tests/testdata/TestToolRefAll/call1-resp.golden b/pkg/tests/testdata/TestToolRefAll/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestToolRefAll/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestToolRefAll/call1.golden b/pkg/tests/testdata/TestToolRefAll/call1.golden new file mode 100644 index 00000000..4957014d --- /dev/null +++ b/pkg/tests/testdata/TestToolRefAll/call1.golden @@ -0,0 +1,61 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestToolRefAll/test.gpt:tool", + "name": "tool", + "parameters": { + "properties": { + "toolArg": { + "description": "stuff", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestToolRefAll/test.gpt:none", + "name": "none", + "parameters": { + "properties": { + "noneArg": { + "description": "stuff", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestToolRefAll/test.gpt:agentAssistant", + "name": "agent", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nContext Body\nMain tool" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestToolRefAll/test.gpt b/pkg/tests/testdata/TestToolRefAll/test.gpt new file mode 100644 index 00000000..93c4ea05 --- /dev/null +++ b/pkg/tests/testdata/TestToolRefAll/test.gpt @@ -0,0 +1,30 @@ +tools: tool, agentAssistant, context, none + +Main tool + +--- +name: agentAssistant +type: agent + +Agent body + +--- +name: context +type: context + +#!sys.echo + +Context Body + +--- +name: none +param: noneArg: stuff + +Default type + +--- +name: tool +type: Tool +param: toolArg: stuff + +Typed tool \ No newline at end of file diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 82effad4..54d5d817 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -26,6 +26,20 @@ var ( DefaultFiles = []string{"agent.gpt", "tool.gpt"} ) +type ToolType string + +const ( + ToolTypeContext = ToolType("context") + ToolTypeAgent = ToolType("agent") + ToolTypeOutput = ToolType("output") + ToolTypeInput = ToolType("input") + ToolTypeAssistant = ToolType("assistant") + ToolTypeTool = ToolType("tool") + ToolTypeCredential = ToolType("credential") + ToolTypeProvider = ToolType("provider") + ToolTypeDefault = ToolType("") +) + type ErrToolNotFound struct { ToolName string } @@ -77,28 +91,6 @@ type ToolReference struct { ToolID string `json:"toolID,omitempty"` } -func (p Program) GetContextToolRefs(toolID string) ([]ToolReference, error) { - return p.ToolSet[toolID].GetContextTools(p) -} - -func (p Program) GetCompletionTools() (result []CompletionTool, err error) { - return Tool{ - ToolDef: ToolDef{ - Parameters: Parameters{ - Tools: []string{"main"}, - }, - }, - ToolMapping: map[string][]ToolReference{ - "main": { - { - Reference: "main", - ToolID: p.EntryToolID, - }, - }, - }, - }.GetCompletionTools(p) -} - func (p Program) TopLevelTools() (result []Tool) { for _, tool := range p.ToolSet[p.EntryToolID].LocalTools { if target, ok := p.ToolSet[tool]; ok { @@ -145,6 +137,7 @@ type Parameters struct { OutputFilters []string `json:"outputFilters,omitempty"` ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` Blocking bool `json:"-"` + Type ToolType `json:"type,omitempty"` } func (p Parameters) ToolRefNames() []string { @@ -347,6 +340,13 @@ func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { return nil, err } + genericToolRefs, err := t.getCompletionToolRefs(prg, nil, ToolTypeAgent) + if err != nil { + return nil, err + } + + toolRefs = append(toolRefs, genericToolRefs...) + // Agent Tool refs must be named for i, toolRef := range toolRefs { if toolRef.Named != "" { @@ -358,7 +358,9 @@ func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { name = toolRef.Reference } normed := ToolNormalizer(name) - normed = strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant") + if trimmed := strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant"); trimmed != "" { + normed = trimmed + } toolRefs[i].Named = normed } @@ -404,6 +406,9 @@ func (t ToolDef) String() string { if t.Parameters.Description != "" { _, _ = fmt.Fprintf(buf, "Description: %s\n", t.Parameters.Description) } + if t.Parameters.Type != ToolTypeDefault { + _, _ = fmt.Fprintf(buf, "Type: %s\n", strings.ToUpper(string(t.Type[0]))+string(t.Type[1:])) + } if len(t.Parameters.Agents) != 0 { _, _ = fmt.Fprintf(buf, "Agents: %s\n", strings.Join(t.Parameters.Agents, ", ")) } @@ -486,7 +491,7 @@ func (t ToolDef) String() string { return buf.String() } -func (t Tool) GetExportedContext(prg Program) ([]ToolReference, error) { +func (t Tool) getExportedContext(prg Program) ([]ToolReference, error) { result := &toolRefSet{} exportRefs, err := t.GetToolRefsFromNames(t.ExportContext) @@ -498,13 +503,13 @@ func (t Tool) GetExportedContext(prg Program) ([]ToolReference, error) { result.Add(exportRef) tool := prg.ToolSet[exportRef.ToolID] - result.AddAll(tool.GetExportedContext(prg)) + result.AddAll(tool.getExportedContext(prg)) } return result.List() } -func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { +func (t Tool) getExportedTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} exportRefs, err := t.GetToolRefsFromNames(t.Export) @@ -514,7 +519,7 @@ func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { for _, exportRef := range exportRefs { result.Add(exportRef) - result.AddAll(prg.ToolSet[exportRef.ToolID].GetExportedTools(prg)) + result.AddAll(prg.ToolSet[exportRef.ToolID].getExportedTools(prg)) } return result.List() @@ -524,6 +529,15 @@ func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { // contexts that are exported by the context tools. This will recurse all exports. func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} + result.AddAll(t.getDirectContextToolRefs(prg)) + result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeContext)) + return result.List() +} + +// GetContextTools returns all tools that are in the context of the tool including all the +// contexts that are exported by the context tools. This will recurse all exports. +func (t Tool) getDirectContextToolRefs(prg Program) ([]ToolReference, error) { + result := &toolRefSet{} contextRefs, err := t.GetToolRefsFromNames(t.Context) if err != nil { @@ -531,7 +545,7 @@ func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { } for _, contextRef := range contextRefs { - result.AddAll(prg.ToolSet[contextRef.ToolID].GetExportedContext(prg)) + result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) result.Add(contextRef) } @@ -550,7 +564,9 @@ func (t Tool) GetOutputFilterTools(program Program) ([]ToolReference, error) { result.Add(outputFilterRef) } - contextRefs, err := t.GetContextTools(program) + result.AddAll(t.getCompletionToolRefs(program, nil, ToolTypeOutput)) + + contextRefs, err := t.getDirectContextToolRefs(program) if err != nil { return nil, err } @@ -575,7 +591,9 @@ func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { result.Add(inputFilterRef) } - contextRefs, err := t.GetContextTools(program) + result.AddAll(t.getCompletionToolRefs(program, nil, ToolTypeInput)) + + contextRefs, err := t.getDirectContextToolRefs(program) if err != nil { return nil, err } @@ -602,11 +620,28 @@ func (t Tool) GetNextAgentGroup(prg Program, agentGroup []ToolReference, toolID return agentGroup, nil } +func filterRefs(prg Program, refs []ToolReference, types ...ToolType) (result []ToolReference) { + for _, ref := range refs { + if slices.Contains(types, prg.ToolSet[ref.ToolID].Type) { + result = append(result, ref) + } + } + return +} + func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (result []CompletionTool, err error) { - refs, err := t.getCompletionToolRefs(prg, agentGroup) + toolSet := &toolRefSet{} + toolSet.AddAll(t.getCompletionToolRefs(prg, agentGroup, ToolTypeDefault, ToolTypeTool)) + + if err := t.addAgents(prg, toolSet); err != nil { + return nil, err + } + + refs, err := toolSet.List() if err != nil { return nil, err } + return toolRefsToCompletionTools(refs, prg), nil } @@ -638,26 +673,30 @@ func (t Tool) addReferencedTools(prg Program, result *toolRefSet) error { result.Add(subToolRef) // Get all tools exports - result.AddAll(prg.ToolSet[subToolRef.ToolID].GetExportedTools(prg)) + result.AddAll(prg.ToolSet[subToolRef.ToolID].getExportedTools(prg)) } return nil } func (t Tool) addContextExportedTools(prg Program, result *toolRefSet) error { - contextTools, err := t.GetContextTools(prg) + contextTools, err := t.getDirectContextToolRefs(prg) if err != nil { return err } for _, contextTool := range contextTools { - result.AddAll(prg.ToolSet[contextTool.ToolID].GetExportedTools(prg)) + result.AddAll(prg.ToolSet[contextTool.ToolID].getExportedTools(prg)) } return nil } -func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { +func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference, types ...ToolType) ([]ToolReference, error) { + if len(types) == 0 { + types = []ToolType{ToolTypeDefault, ToolTypeTool} + } + result := toolRefSet{} if t.Chat { @@ -677,11 +716,8 @@ func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([] return nil, err } - if err := t.addAgents(prg, &result); err != nil { - return nil, err - } - - return result.List() + refs, err := result.List() + return filterRefs(prg, refs, types...), err } func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { @@ -689,6 +725,8 @@ func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]Too result.AddAll(t.GetToolRefsFromNames(t.Credentials)) + result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeCredential)) + toolRefs, err := t.getCompletionToolRefs(prg, agentGroup) if err != nil { return nil, err diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index 43af6cee..a47014a1 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -33,6 +33,8 @@ func TestToolDef_String(t *testing.T) { ExportInputFilters: []string{"SharedFilter1", "SharedFilter2"}, OutputFilters: []string{"Filter1", "Filter2"}, ExportOutputFilters: []string{"SharedFilter1", "SharedFilter2"}, + ExportCredentials: []string{"ExportCredential1", "ExportCredential2"}, + Type: ToolTypeContext, }, Instructions: "This is a sample instruction", } @@ -41,6 +43,7 @@ func TestToolDef_String(t *testing.T) { Global Tools: GlobalTool1, GlobalTool2 Name: Tool Sample Description: This is a sample tool +Type: Context Agents: Agent1, Agent2 Tools: Tool1, Tool2 Share Tools: Export1, Export2 @@ -60,6 +63,8 @@ Parameter: arg2: desc2 Internal Prompt: true Credential: Credential1 Credential: Credential2 +Share Credential: ExportCredential1 +Share Credential: ExportCredential2 Chat: true This is a sample instruction From 160a7334bafc5cde435cfd8a0932c2b5558b02e2 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 5 Aug 2024 12:25:45 -0400 Subject: [PATCH 068/270] docs: input and output filters (#700) Signed-off-by: Grant Linville --- docs/docs/03-tools/11-input-output-filters.md | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 docs/docs/03-tools/11-input-output-filters.md diff --git a/docs/docs/03-tools/11-input-output-filters.md b/docs/docs/03-tools/11-input-output-filters.md new file mode 100644 index 00000000..1db8f937 --- /dev/null +++ b/docs/docs/03-tools/11-input-output-filters.md @@ -0,0 +1,137 @@ +# Input and Output Filters (Advanced) + +GPTScript supports input and output filters, which are tools that can modify the input to a tool or the output from a tool. +These are best explained with examples. + +## Input Filter Example + +In this example, the entrypoint tool uses an input filter to modify the `message` parameter, before calling the subtool. +Then, the subtool uses another input filter to modify the message, then writes it to a file. + +``` +# File name: script.gpt +Param: message: the message from the user +Tools: subtool +Input Filter: appleToOrange + +Take the message and give it to the subtool. Then say "Done". + +--- +Name: subtool +Param: message: the message from the user +Input Filter: orangeToBanana + +#!python3 + +import os + +message = os.getenv("message", "") +with open("gptscript_output.txt", "w") as f: + f.write(message) + +--- +Name: appleToOrange + +#!python3 + +import os + +def output(input: str): + return input.replace("apple", "orange") + +print(output(os.getenv("INPUT", ""))) + +--- +Name: orangeToBanana + +#!python3 + +import os + +def output(input: str): + return input.replace("orange", "banana") + +print(output(os.getenv("INPUT", ""))) +``` + +Try running this tool with the following command: + +```bash +gptscript script.gpt '{"message":"apple is great"}' + +# Then view the output: +cat gptscript_output.txt +``` + +The output should say "banana is great". +This matches what we expect, because the input filter `appleToOrange` changes "apple" to "orange", +and the input filter `orangeToBanana` changes "orange" to "banana". +If we run the tool again with a different message, like "hello world", the final message will be unmodified, +since it did not include the words "apple" or "orange". + +The input filter tools both read the input from the environment variable `INPUT`. +They write their modified input to stdout. +This variable is set by GPTScript before running the input filter tool. + +### Input Filter Real-World Example + +For a real-world example of an input filter tool, check out the [gptscript-ai/context/at-syntax](https://github.com/gptscript-ai/context/tree/main/at-syntax) tool. + +## Output Filter Example + +In this example, the tool is asked to write a poem about apples. +The output filter then replaces all references to apples with oranges. + +``` +Output Filter: applesToOranges + +Write a poem about apples. + +--- +Name: applesToOranges + +#!python3 + +import os + +replacements = { + "Apples": "Oranges", + "apples": "oranges", + "apple": "orange", + "Apple": "Orange", +} + +def applesToOranges(input: str) -> str: + for key, value in replacements.items(): + if input.startswith(key): + # This approach doesn't maintain whitespace, but it's good enough for this example + input = input.replace(key, value) + return input + +output: str = os.getenv("OUTPUT", "") +new_output: str = "" +for i in output.split(): + new_output += applesToOranges(i) + " " +print(new_output.strip()) +``` + +``` +OUTPUT: + +In orchards where the sunlight gleams, Among the leaves, in golden beams, The oranges hang on branches high, A feast for both the heart and eye. +Their skins, a palette rich and bright, In hues of red and green delight, With every bite, a crisp surprise, A taste of autumn, pure and wise. +From pies to cider, sweet and bold, Their stories through the seasons told, In every crunch, a memory, Of nature's gift, so wild and free. +Oh, oranges, treasures of the earth, In every form, you bring us mirth, A simple fruit, yet so profound, In you, a world of joy is found. +``` + +The output tool reads the output from the environment variable `OUTPUT`. +It can then modify the output as needed, and print the new output to stdout. + +Output filter tools can also access the following environment variables if needed: + +- `CHAT` (boolean): indicates whether the current script is being run in chat mode or not +- `CONTINUATION` (boolean): if `CHAT` is true, indicates whether the current chat will continue executing, or if this is the final message + +### Output Filter Real-World Example + +For a real-world example of an output filter tool, check out the [gptscript-ai/context/chat-summary](https://github.com/gptscript-ai/context/tree/main/chat-summary) tool. From b77cd130a99f9abb4720b13c52dac18ee5cad2c8 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 6 Aug 2024 09:39:59 -0400 Subject: [PATCH 069/270] enhance: credentials: add GPTSCRIPT_CREDENTIAL_EXPIRATION (#709) Signed-off-by: Grant Linville --- docs/docs/03-tools/04-credential-tools.md | 18 ++++++++ integration/cred_test.go | 24 ++++++++-- integration/scripts/cred_expiration.gpt | 46 +++++++++++++++++++ .../{credscopes.gpt => cred_scopes.gpt} | 0 pkg/config/cliconfig.go | 2 + pkg/credentials/credential.go | 1 + pkg/runner/runner.go | 9 ++++ 7 files changed, 97 insertions(+), 3 deletions(-) create mode 100644 integration/scripts/cred_expiration.gpt rename integration/scripts/{credscopes.gpt => cred_scopes.gpt} (100%) diff --git a/docs/docs/03-tools/04-credential-tools.md b/docs/docs/03-tools/04-credential-tools.md index 3e6a678a..1911dc34 100644 --- a/docs/docs/03-tools/04-credential-tools.md +++ b/docs/docs/03-tools/04-credential-tools.md @@ -204,3 +204,21 @@ that environment variable, and if it is set, get the refresh token from the exis typically without user interaction. For an example of a tool that uses the refresh feature, see the [Gateway OAuth2 tool](https://github.com/gptscript-ai/gateway-oauth2). + +### GPTSCRIPT_CREDENTIAL_EXPIRATION environment variable + +When a tool references a credential tool, GPTScript will add the environment variables from the credential to the tool's +environment before executing the tool. If at least one of the credentials has an `expiresAt` field, GPTScript will also +set the environment variable `GPTSCRIPT_CREDENTIAL_EXPIRATION`, which contains the nearest expiration time out of all +credentials referenced by the tool, in RFC 3339 format. That way, it can be referenced in the tool body if needed. +Here is an example: + +``` +Credential: my-credential-tool.gpt as myCred + +#!python3 + +import os + +print("myCred expires at " + os.getenv("GPTSCRIPT_CREDENTIAL_EXPIRATION", "")) +``` diff --git a/integration/cred_test.go b/integration/cred_test.go index 67298ef8..d77f096c 100644 --- a/integration/cred_test.go +++ b/integration/cred_test.go @@ -1,7 +1,9 @@ package integration import ( + "strings" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -15,15 +17,31 @@ func TestGPTScriptCredential(t *testing.T) { // TestCredentialScopes makes sure that environment variables set by credential tools and shared credential tools // are only available to the correct tools. See scripts/credscopes.gpt for more details. func TestCredentialScopes(t *testing.T) { - out, err := RunScript("scripts/credscopes.gpt", "--sub-tool", "oneOne") + out, err := RunScript("scripts/cred_scopes.gpt", "--sub-tool", "oneOne") require.NoError(t, err) require.Contains(t, out, "good") - out, err = RunScript("scripts/credscopes.gpt", "--sub-tool", "twoOne") + out, err = RunScript("scripts/cred_scopes.gpt", "--sub-tool", "twoOne") require.NoError(t, err) require.Contains(t, out, "good") - out, err = RunScript("scripts/credscopes.gpt", "--sub-tool", "twoTwo") + out, err = RunScript("scripts/cred_scopes.gpt", "--sub-tool", "twoTwo") require.NoError(t, err) require.Contains(t, out, "good") } + +// TestCredentialExpirationEnv tests a GPTScript with two credentials that expire at different times. +// One expires after two hours, and the other expires after one hour. +// This test makes sure that the GPTSCRIPT_CREDENTIAL_EXPIRATION environment variable is set to the nearer expiration time (1h). +func TestCredentialExpirationEnv(t *testing.T) { + out, err := RunScript("scripts/cred_expiration.gpt") + require.NoError(t, err) + + for _, line := range strings.Split(out, "\n") { + if timestamp, found := strings.CutPrefix(line, "Expires: "); found { + expiresTime, err := time.Parse(time.RFC3339, timestamp) + require.NoError(t, err) + require.True(t, time.Until(expiresTime) < time.Hour) + } + } +} diff --git a/integration/scripts/cred_expiration.gpt b/integration/scripts/cred_expiration.gpt new file mode 100644 index 00000000..da535df0 --- /dev/null +++ b/integration/scripts/cred_expiration.gpt @@ -0,0 +1,46 @@ +cred: credentialTool with 2 as hours +cred: credentialTool with 1 as hours + +#!python3 + +import os + +print("Expires: " + os.getenv("GPTSCRIPT_CREDENTIAL_EXPIRATION", ""), end="") + +--- +name: credentialTool +args: hours: the number of hours from now to expire + +#!python3 + +import os +import json +from datetime import datetime, timedelta, timezone + +class Output: + def __init__(self, env, expires_at): + self.env = env + self.expiresAt = expires_at + + def to_dict(self): + return { + "env": self.env, + "expiresAt": self.expiresAt.isoformat() + } + +hours_str = os.getenv("HOURS") +if hours_str is None: + print("HOURS environment variable is not set") + os._exit(1) + +try: + hours = int(hours_str) +except ValueError: + print("failed to parse HOURS") + os._exit(1) + +expires_at = datetime.now(timezone.utc) + timedelta(hours=hours) +out = Output(env={"yeet": "yote"}, expires_at=expires_at) +out_json = json.dumps(out.to_dict()) + +print(out_json) diff --git a/integration/scripts/credscopes.gpt b/integration/scripts/cred_scopes.gpt similarity index 100% rename from integration/scripts/credscopes.gpt rename to integration/scripts/cred_scopes.gpt diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index e4aa49ab..7970415f 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -55,6 +55,8 @@ type CLIConfig struct { Auths map[string]AuthConfig `json:"auths,omitempty"` CredentialsStore string `json:"credsStore,omitempty"` GPTScriptConfigFile string `json:"gptscriptConfig,omitempty"` + GatewayURL string `json:"gatewayURL,omitempty"` + Integrations map[string]string `json:"integrations,omitempty"` auths map[string]types.AuthConfig authsLock *sync.Mutex diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index 605208a0..3d1e2192 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -16,6 +16,7 @@ const ( CredentialTypeTool CredentialType = "tool" CredentialTypeModelProvider CredentialType = "modelProvider" ExistingCredential = "GPTSCRIPT_EXISTING_CREDENTIAL" + CredentialExpiration = "GPTSCRIPT_CREDENTIAL_EXPIRATION" ) type Credential struct { diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 3a33c720..c2137bea 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -865,6 +865,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } } + var nearestExpiration *time.Time for _, ref := range credToolRefs { toolName, credentialAlias, args, err := types.ParseCredentialArgs(ref.Reference, callCtx.Input) if err != nil { @@ -967,11 +968,19 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } else { log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } + + if c.ExpiresAt != nil && (nearestExpiration == nil || nearestExpiration.After(*c.ExpiresAt)) { + nearestExpiration = c.ExpiresAt + } } for k, v := range c.Env { env = append(env, fmt.Sprintf("%s=%s", k, v)) } + + if nearestExpiration != nil { + env = append(env, fmt.Sprintf("%s=%s", credentials.CredentialExpiration, nearestExpiration.Format(time.RFC3339))) + } } return env, nil From 0f820879b595fe317a659dd53f67b8fd2826a958 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sun, 2 Jun 2024 22:46:37 -0700 Subject: [PATCH 070/270] feat: add basic bash support for windows --- go.mod | 12 +-- go.sum | 24 ++--- pkg/engine/cmd.go | 5 + pkg/repos/download/extract.go | 14 +++ pkg/repos/get.go | 43 ++++++-- pkg/repos/runtimes/busybox/SHASUMS256.txt | 1 + pkg/repos/runtimes/busybox/busybox.go | 107 ++++++++++++++++++++ pkg/repos/runtimes/busybox/busybox_test.go | 41 ++++++++ pkg/repos/runtimes/busybox/log.go | 5 + pkg/repos/runtimes/default.go | 2 + pkg/tests/runner_test.go | 3 - pkg/tests/testdata/TestContextArg/other.gpt | 4 +- pkg/tests/testdata/TestContextArg/test.gpt | 2 +- pkg/tests/tester/runner.go | 12 ++- 14 files changed, 243 insertions(+), 32 deletions(-) create mode 100644 pkg/repos/runtimes/busybox/SHASUMS256.txt create mode 100644 pkg/repos/runtimes/busybox/busybox.go create mode 100644 pkg/repos/runtimes/busybox/busybox_test.go create mode 100644 pkg/repos/runtimes/busybox/log.go diff --git a/go.mod b/go.mod index 783e638b..c84dae97 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc golang.org/x/sync v0.7.0 - golang.org/x/term v0.20.0 + golang.org/x/term v0.22.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 @@ -108,10 +108,10 @@ require ( github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.23.0 // indirect mvdan.cc/gofumpt v0.6.0 // indirect ) diff --git a/go.sum b/go.sum index fd165d3f..7e2d7b75 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,8 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -419,8 +419,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -474,8 +474,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -485,8 +485,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -498,8 +498,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -531,8 +531,8 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index d62aad2e..8e7b234e 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -10,6 +10,7 @@ import ( "io" "os" "os/exec" + "path" "path/filepath" "runtime" "sort" @@ -269,6 +270,10 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T }) } + if runtime.GOOS == "windows" && (args[0] == "/bin/bash" || args[0] == "/bin/sh") { + args[0] = path.Base(args[0]) + } + if runtime.GOOS == "windows" && (args[0] == "/usr/bin/env" || args[0] == "/bin/env") { args = args[1:] } diff --git a/pkg/repos/download/extract.go b/pkg/repos/download/extract.go index 95a82d74..4cf09f0c 100644 --- a/pkg/repos/download/extract.go +++ b/pkg/repos/download/extract.go @@ -9,7 +9,9 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" + "strings" "time" "github.com/mholt/archiver/v4" @@ -60,6 +62,18 @@ func Extract(ctx context.Context, downloadURL, digest, targetDir string) error { return err } + bin := path.Base(parsedURL.Path) + if strings.HasSuffix(bin, ".exe") { + dst, err := os.Create(filepath.Join(targetDir, bin)) + if err != nil { + return err + } + defer dst.Close() + + _, err = io.Copy(dst, tmpFile) + return err + } + format, input, err := archiver.Identify(filepath.Base(parsedURL.Path), tmpFile) if err != nil { return err diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 2f96d8b3..bead4a7a 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -15,6 +15,7 @@ import ( "github.com/BurntSushi/locker" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/loader/github" "github.com/gptscript-ai/gptscript/pkg/repos/git" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/golang" @@ -51,6 +52,7 @@ type Manager struct { credHelperDirs credentials.CredentialHelperDirs runtimes []Runtime credHelperConfig *credHelperConfig + supportLocal bool } type credHelperConfig struct { @@ -60,6 +62,10 @@ type credHelperConfig struct { env []string } +func (m *Manager) SetSupportLocal() { + m.supportLocal = true +} + func New(cacheDir string, runtimes ...Runtime) *Manager { root := filepath.Join(cacheDir, "repos") return &Manager{ @@ -200,8 +206,14 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e _ = os.RemoveAll(doneFile) _ = os.RemoveAll(target) - if err := git.Checkout(ctx, m.gitDir, tool.Source.Repo.Root, tool.Source.Repo.Revision, target); err != nil { - return "", nil, err + if tool.Source.Repo.VCS == "git" { + if err := git.Checkout(ctx, m.gitDir, tool.Source.Repo.Root, tool.Source.Repo.Revision, target); err != nil { + return "", nil, err + } + } else { + if err := os.MkdirAll(target, 0755); err != nil { + return "", nil, err + } } newEnv, err := runtime.Setup(ctx, m.runtimeDir, targetFinal, env) @@ -227,12 +239,25 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e } func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { - if tool.Source.Repo == nil { - return tool.WorkingDir, env, nil - } + var isLocal bool + if !m.supportLocal { + if tool.Source.Repo == nil { + return tool.WorkingDir, env, nil + } - if tool.Source.Repo.VCS != "git" { - return "", nil, fmt.Errorf("only git is supported, found VCS %s for %s", tool.Source.Repo.VCS, tool.ID) + if tool.Source.Repo.VCS != "git" { + return "", nil, fmt.Errorf("only git is supported, found VCS %s for %s", tool.Source.Repo.VCS, tool.ID) + } + } else if tool.Source.Repo == nil { + isLocal = true + id := hash.Digest(tool)[:12] + tool.Source.Repo = &types.Repo{ + VCS: "", + Root: id, + Path: "/", + Name: id, + Revision: id, + } } for _, runtime := range m.runtimes { @@ -242,5 +267,9 @@ func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []st } } + if isLocal { + return tool.WorkingDir, env, nil + } + return m.setup(ctx, &noopRuntime{}, tool, env) } diff --git a/pkg/repos/runtimes/busybox/SHASUMS256.txt b/pkg/repos/runtimes/busybox/SHASUMS256.txt new file mode 100644 index 00000000..7da1aff6 --- /dev/null +++ b/pkg/repos/runtimes/busybox/SHASUMS256.txt @@ -0,0 +1 @@ +6d2dfd1c1412c3550a89071a1b36a6f6073844320e687343d1dfc72719ecb8d9 FRP-5301-gda71f7c57/busybox-w64-FRP-5301-gda71f7c57.exe \ No newline at end of file diff --git a/pkg/repos/runtimes/busybox/busybox.go b/pkg/repos/runtimes/busybox/busybox.go new file mode 100644 index 00000000..b0c00a0c --- /dev/null +++ b/pkg/repos/runtimes/busybox/busybox.go @@ -0,0 +1,107 @@ +package busybox + +import ( + "bufio" + "bytes" + "context" + _ "embed" + "errors" + "fmt" + "io/fs" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + + runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" + "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/repos/download" +) + +//go:embed SHASUMS256.txt +var releasesData []byte + +const downloadURL = "https://github.com/gptscript-ai/busybox-w32/releases/download/%s" + +type Runtime struct { +} + +func (r *Runtime) ID() string { + return "busybox" +} + +func (r *Runtime) Supports(cmd []string) bool { + if runtime.GOOS != "windows" { + return false + } + for _, bin := range []string{"bash", "sh", "/bin/sh", "/bin/bash"} { + if runtimeEnv.Matches(cmd, bin) { + return true + } + } + return false +} + +func (r *Runtime) Setup(ctx context.Context, dataRoot, _ string, env []string) ([]string, error) { + binPath, err := r.getRuntime(ctx, dataRoot) + if err != nil { + return nil, err + } + + newEnv := runtimeEnv.AppendPath(env, binPath) + return newEnv, nil +} + +func (r *Runtime) getReleaseAndDigest() (string, string, error) { + scanner := bufio.NewScanner(bytes.NewReader(releasesData)) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + return fmt.Sprintf(downloadURL, fields[1]), fields[0], nil + } + + return "", "", fmt.Errorf("failed to find %s release", r.ID()) +} + +func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { + url, sha, err := r.getReleaseAndDigest() + if err != nil { + return "", err + } + + target := filepath.Join(cwd, "busybox", hash.ID(url, sha)) + if _, err := os.Stat(target); err == nil { + return target, nil + } else if !errors.Is(err, fs.ErrNotExist) { + return "", err + } + + log.Infof("Downloading Busybox") + tmp := target + ".download" + defer os.RemoveAll(tmp) + + if err := os.MkdirAll(tmp, 0755); err != nil { + return "", err + } + + if err := download.Extract(ctx, url, sha, tmp); err != nil { + return "", err + } + + bbExe := filepath.Join(tmp, path.Base(url)) + + cmd := exec.Command(bbExe, "--install", ".") + cmd.Dir = filepath.Dir(bbExe) + + if err := cmd.Run(); err != nil { + return "", err + } + + if err := os.Rename(tmp, target); err != nil { + return "", err + } + + return target, nil +} diff --git a/pkg/repos/runtimes/busybox/busybox_test.go b/pkg/repos/runtimes/busybox/busybox_test.go new file mode 100644 index 00000000..f3add18a --- /dev/null +++ b/pkg/repos/runtimes/busybox/busybox_test.go @@ -0,0 +1,41 @@ +package busybox + +import ( + "context" + "errors" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/adrg/xdg" + "github.com/samber/lo" + "github.com/stretchr/testify/require" +) + +var ( + testCacheHome = lo.Must(xdg.CacheFile("gptscript-test-cache/runtime")) +) + +func firstPath(s []string) string { + _, p, _ := strings.Cut(s[0], "=") + return strings.Split(p, string(os.PathListSeparator))[0] +} + +func TestRuntime(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip() + } + + r := Runtime{} + + s, err := r.Setup(context.Background(), testCacheHome, "testdata", os.Environ()) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(firstPath(s), "busybox.exe")) + if errors.Is(err, fs.ErrNotExist) { + _, err = os.Stat(filepath.Join(firstPath(s), "busybox")) + } + require.NoError(t, err) +} diff --git a/pkg/repos/runtimes/busybox/log.go b/pkg/repos/runtimes/busybox/log.go new file mode 100644 index 00000000..b7e486f1 --- /dev/null +++ b/pkg/repos/runtimes/busybox/log.go @@ -0,0 +1,5 @@ +package busybox + +import "github.com/gptscript-ai/gptscript/pkg/mvl" + +var log = mvl.Package() diff --git a/pkg/repos/runtimes/default.go b/pkg/repos/runtimes/default.go index d37cca8f..3782e26e 100644 --- a/pkg/repos/runtimes/default.go +++ b/pkg/repos/runtimes/default.go @@ -3,12 +3,14 @@ package runtimes import ( "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/repos" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/busybox" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/golang" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/node" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/python" ) var Runtimes = []repos.Runtime{ + &busybox.Runtime{}, &python.Runtime{ Version: "3.12", Default: true, diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 12eff23a..60f1cfc3 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -751,9 +751,6 @@ func TestGlobalErr(t *testing.T) { } func TestContextArg(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip() - } runner := tester.NewRunner(t) x, err := runner.Run("", `{ "file": "foo.db" diff --git a/pkg/tests/testdata/TestContextArg/other.gpt b/pkg/tests/testdata/TestContextArg/other.gpt index b1acd66a..f97b4ba6 100644 --- a/pkg/tests/testdata/TestContextArg/other.gpt +++ b/pkg/tests/testdata/TestContextArg/other.gpt @@ -2,5 +2,5 @@ name: fromcontext args: first: an arg args: second: an arg -#!/bin/bash -echo this is from other context ${first} and then ${second} \ No newline at end of file +#!/usr/bin/env bash +echo this is from other context ${FIRST} and then ${SECOND} \ No newline at end of file diff --git a/pkg/tests/testdata/TestContextArg/test.gpt b/pkg/tests/testdata/TestContextArg/test.gpt index 9569aaf9..50d2ccf2 100644 --- a/pkg/tests/testdata/TestContextArg/test.gpt +++ b/pkg/tests/testdata/TestContextArg/test.gpt @@ -9,4 +9,4 @@ name: fromcontext args: first: an arg #!/bin/bash -echo this is from context -- ${first} \ No newline at end of file +echo this is from context -- ${FIRST} \ No newline at end of file diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index 775f0248..ef75c0f5 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -8,8 +8,11 @@ import ( "path/filepath" "testing" + "github.com/adrg/xdg" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/repos" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" @@ -171,8 +174,15 @@ func NewRunner(t *testing.T) *Runner { t: t, } + cacheDir, err := xdg.CacheFile("gptscript-test-cache/runtime") + require.NoError(t, err) + + rm := runtimes.Default(cacheDir) + rm.(*repos.Manager).SetSupportLocal() + run, err := runner.New(c, credentials.NoopStore{}, runner.Options{ - Sequential: true, + Sequential: true, + RuntimeManager: rm, }) require.NoError(t, err) From 4c3da3a352b5a5f26553685fb07938529d7ce766 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Fri, 2 Aug 2024 16:02:37 -0700 Subject: [PATCH 071/270] chore: uppercase env variable names Signed-off-by: Taylor Price --- integration/scripts/cred_scopes.gpt | 4 ++-- pkg/engine/cmd.go | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/integration/scripts/cred_scopes.gpt b/integration/scripts/cred_scopes.gpt index 7319f163..dc8e24e7 100644 --- a/integration/scripts/cred_scopes.gpt +++ b/integration/scripts/cred_scopes.gpt @@ -149,8 +149,8 @@ name: getcred import os import json -var = os.getenv('var') -val = os.getenv('val') +var = os.getenv('VAR') +val = os.getenv('VAL') output = { "env": { diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 8e7b234e..869e67dc 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -204,10 +204,9 @@ var ignoreENV = map[string]struct{}{ } func appendEnv(envs []string, k, v string) []string { - for _, k := range []string{k, env.ToEnvLike(k)} { - if _, ignore := ignoreENV[k]; !ignore { - envs = append(envs, k+"="+v) - } + //fmt.Printf("%s=%s\n", k, v) + if _, ignore := ignoreENV[k]; !ignore { + envs = append(envs, strings.ToUpper(k)+"="+v) } return envs } From 9b8dc494ac29a921ea274b12814b2117514b655f Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Mon, 5 Aug 2024 10:05:35 -0700 Subject: [PATCH 072/270] fix: correct small comment typo --- pkg/tests/smoke/smoke_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/tests/smoke/smoke_test.go b/pkg/tests/smoke/smoke_test.go index 66374ef1..a6d0ab2c 100644 --- a/pkg/tests/smoke/smoke_test.go +++ b/pkg/tests/smoke/smoke_test.go @@ -83,7 +83,7 @@ func TestSmoke(t *testing.T) { actualEvents, ` - disregard differences in timestamps, generated IDs, natural language verbiage, and event order -- omit callProgress events from the comparision +- omit callProgress events from the comparison - the overall stream of events and set of tools called should roughly match - arguments passed in tool calls should be roughly the same - the final callFinish event should be semantically similar From ae328ac395ecfc710399a5b50fbc0aaa799c4168 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Mon, 5 Aug 2024 14:05:16 -0700 Subject: [PATCH 073/270] chore: ensure key is env-like Signed-off-by: Taylor Price --- pkg/engine/cmd.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 869e67dc..484b00cb 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -204,9 +204,8 @@ var ignoreENV = map[string]struct{}{ } func appendEnv(envs []string, k, v string) []string { - //fmt.Printf("%s=%s\n", k, v) if _, ignore := ignoreENV[k]; !ignore { - envs = append(envs, strings.ToUpper(k)+"="+v) + envs = append(envs, strings.ToUpper(env.ToEnvLike(k))+"="+v) } return envs } From 806fdc38b48658cb5bb68cb492f411f4fc1d55c0 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 5 Aug 2024 20:56:56 -0700 Subject: [PATCH 074/270] feat: support inline package.json and requirements.txt --- pkg/engine/cmd.go | 2 +- pkg/env/env.go | 9 ++ pkg/parser/parser.go | 40 +++++- pkg/parser/parser_test.go | 29 ++++ pkg/repos/get.go | 30 ++-- pkg/repos/runtimes/busybox/busybox.go | 5 +- pkg/repos/runtimes/busybox/busybox_test.go | 3 +- pkg/repos/runtimes/golang/golang.go | 8 +- pkg/repos/runtimes/golang/golang_test.go | 3 +- pkg/repos/runtimes/node/node.go | 26 +++- pkg/repos/runtimes/node/node_test.go | 5 +- pkg/repos/runtimes/python/python.go | 29 +++- pkg/repos/runtimes/python/python_test.go | 3 +- pkg/tests/runner_test.go | 21 +++ .../testdata/TestRuntimes/call1-resp.golden | 16 +++ pkg/tests/testdata/TestRuntimes/call1.golden | 37 +++++ .../testdata/TestRuntimes/call2-resp.golden | 16 +++ pkg/tests/testdata/TestRuntimes/call2.golden | 70 +++++++++ .../testdata/TestRuntimes/call3-resp.golden | 16 +++ pkg/tests/testdata/TestRuntimes/call3.golden | 103 +++++++++++++ .../testdata/TestRuntimes/call4-resp.golden | 9 ++ pkg/tests/testdata/TestRuntimes/call4.golden | 136 ++++++++++++++++++ pkg/tests/testdata/TestRuntimes/test.gpt | 58 ++++++++ pkg/tests/tester/runner.go | 2 - pkg/types/tool.go | 5 + 25 files changed, 635 insertions(+), 46 deletions(-) create mode 100644 pkg/tests/testdata/TestRuntimes/call1-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call1.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call2-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call2.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call3-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call3.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call4-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimes/call4.golden create mode 100644 pkg/tests/testdata/TestRuntimes/test.gpt diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 484b00cb..57dfd477 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -282,7 +282,7 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T ) if strings.TrimSpace(rest) != "" { - f, err := os.CreateTemp("", version.ProgramName+requiredFileExtensions[args[0]]) + f, err := os.CreateTemp(env.Getenv("GPTSCRIPT_TMPDIR", envvars), version.ProgramName+requiredFileExtensions[args[0]]) if err != nil { return nil, nil, err } diff --git a/pkg/env/env.go b/pkg/env/env.go index bedd5f9d..2994825b 100644 --- a/pkg/env/env.go +++ b/pkg/env/env.go @@ -26,6 +26,15 @@ func ToEnvLike(v string) string { return strings.ToUpper(v) } +func Getenv(key string, envs []string) string { + for i := len(envs) - 1; i >= 0; i-- { + if k, v, ok := strings.Cut(envs[i], "="); ok && k == key { + return v + } + } + return "" +} + func Matches(cmd []string, bin string) bool { switch len(cmd) { case 0: diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index ff5d1374..b57cb658 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -16,7 +16,7 @@ import ( var ( sepRegex = regexp.MustCompile(`^\s*---+\s*$`) strictSepRegex = regexp.MustCompile(`^---\n$`) - skipRegex = regexp.MustCompile(`^![-\w]+\s*$`) + skipRegex = regexp.MustCompile(`^![-.:\w]+\s*$`) ) func normalize(key string) string { @@ -308,6 +308,8 @@ func Parse(input io.Reader, opts ...Options) (Document, error) { } } + nodes = assignMetadata(nodes) + if !opt.AssignGlobals { return Document{ Nodes: nodes, @@ -359,6 +361,42 @@ func Parse(input io.Reader, opts ...Options) (Document, error) { }, nil } +func assignMetadata(nodes []Node) (result []Node) { + metadata := map[string]map[string]string{} + result = make([]Node, 0, len(nodes)) + for _, node := range nodes { + if node.TextNode != nil { + body, ok := strings.CutPrefix(node.TextNode.Text, "!metadata:") + if ok { + line, rest, ok := strings.Cut(body, "\n") + if ok { + toolName, metaKey, ok := strings.Cut(strings.TrimSpace(line), ":") + if ok { + d, ok := metadata[toolName] + if !ok { + d = map[string]string{} + metadata[toolName] = d + } + d[metaKey] = strings.TrimSpace(rest) + } + } + } + } + } + if len(metadata) == 0 { + return nodes + } + + for _, node := range nodes { + if node.ToolNode != nil { + node.ToolNode.Tool.MetaData = metadata[node.ToolNode.Tool.Name] + } + result = append(result, node) + } + + return +} + func isGPTScriptHashBang(line string) bool { if !strings.HasPrefix(line, "#!") { return false diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index 9f682efa..3967ebd5 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -6,6 +6,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -239,3 +240,31 @@ share output filters: shared }}, }}).Equal(t, out) } + +func TestParseMetaData(t *testing.T) { + input := ` +name: first + +body +--- +!metadata:first:package.json +foo=base +f + +--- +!metadata:first2:requirements.txt +asdf2 + +--- +!metadata:first:requirements.txt +asdf +` + tools, err := ParseTools(strings.NewReader(input)) + require.NoError(t, err) + + assert.Len(t, tools, 1) + autogold.Expect(map[string]string{ + "package.json": "foo=base\nf", + "requirements.txt": "asdf", + }).Equal(t, tools[0].MetaData) +} diff --git a/pkg/repos/get.go b/pkg/repos/get.go index bead4a7a..fc675c58 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -26,8 +26,8 @@ const credentialHelpersRepo = "github.com/gptscript-ai/gptscript-credential-help type Runtime interface { ID() string - Supports(cmd []string) bool - Setup(ctx context.Context, dataRoot, toolSource string, env []string) ([]string, error) + Supports(tool types.Tool, cmd []string) bool + Setup(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) ([]string, error) } type noopRuntime struct { @@ -37,11 +37,11 @@ func (n noopRuntime) ID() string { return "none" } -func (n noopRuntime) Supports(_ []string) bool { +func (n noopRuntime) Supports(_ types.Tool, _ []string) bool { return false } -func (n noopRuntime) Setup(_ context.Context, _, _ string, _ []string) ([]string, error) { +func (n noopRuntime) Setup(_ context.Context, _ types.Tool, _, _ string, _ []string) ([]string, error) { return nil, nil } @@ -52,7 +52,6 @@ type Manager struct { credHelperDirs credentials.CredentialHelperDirs runtimes []Runtime credHelperConfig *credHelperConfig - supportLocal bool } type credHelperConfig struct { @@ -62,10 +61,6 @@ type credHelperConfig struct { env []string } -func (m *Manager) SetSupportLocal() { - m.supportLocal = true -} - func New(cacheDir string, runtimes ...Runtime) *Manager { root := filepath.Join(cacheDir, "repos") return &Manager{ @@ -216,7 +211,7 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e } } - newEnv, err := runtime.Setup(ctx, m.runtimeDir, targetFinal, env) + newEnv, err := runtime.Setup(ctx, tool, m.runtimeDir, targetFinal, env) if err != nil { return "", nil, err } @@ -240,17 +235,10 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { var isLocal bool - if !m.supportLocal { - if tool.Source.Repo == nil { - return tool.WorkingDir, env, nil - } - - if tool.Source.Repo.VCS != "git" { - return "", nil, fmt.Errorf("only git is supported, found VCS %s for %s", tool.Source.Repo.VCS, tool.ID) - } - } else if tool.Source.Repo == nil { + if tool.Source.Repo == nil { isLocal = true - id := hash.Digest(tool)[:12] + d, _ := json.Marshal(tool) + id := hash.Digest(d)[:12] tool.Source.Repo = &types.Repo{ VCS: "", Root: id, @@ -261,7 +249,7 @@ func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []st } for _, runtime := range m.runtimes { - if runtime.Supports(cmd) { + if runtime.Supports(tool, cmd) { log.Debugf("Runtime %s supports %v", runtime.ID(), cmd) return m.setup(ctx, runtime, tool, env) } diff --git a/pkg/repos/runtimes/busybox/busybox.go b/pkg/repos/runtimes/busybox/busybox.go index b0c00a0c..542ba94a 100644 --- a/pkg/repos/runtimes/busybox/busybox.go +++ b/pkg/repos/runtimes/busybox/busybox.go @@ -18,6 +18,7 @@ import ( runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/repos/download" + "github.com/gptscript-ai/gptscript/pkg/types" ) //go:embed SHASUMS256.txt @@ -32,7 +33,7 @@ func (r *Runtime) ID() string { return "busybox" } -func (r *Runtime) Supports(cmd []string) bool { +func (r *Runtime) Supports(_ types.Tool, cmd []string) bool { if runtime.GOOS != "windows" { return false } @@ -44,7 +45,7 @@ func (r *Runtime) Supports(cmd []string) bool { return false } -func (r *Runtime) Setup(ctx context.Context, dataRoot, _ string, env []string) ([]string, error) { +func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, _ string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { return nil, err diff --git a/pkg/repos/runtimes/busybox/busybox_test.go b/pkg/repos/runtimes/busybox/busybox_test.go index f3add18a..77bfae59 100644 --- a/pkg/repos/runtimes/busybox/busybox_test.go +++ b/pkg/repos/runtimes/busybox/busybox_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/adrg/xdg" + "github.com/gptscript-ai/gptscript/pkg/types" "github.com/samber/lo" "github.com/stretchr/testify/require" ) @@ -31,7 +32,7 @@ func TestRuntime(t *testing.T) { r := Runtime{} - s, err := r.Setup(context.Background(), testCacheHome, "testdata", os.Environ()) + s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) require.NoError(t, err) _, err = os.Stat(filepath.Join(firstPath(s), "busybox.exe")) if errors.Is(err, fs.ErrNotExist) { diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 28300439..b19cfe90 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -18,6 +18,7 @@ import ( runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/repos/download" + "github.com/gptscript-ai/gptscript/pkg/types" ) //go:embed digests.txt @@ -34,11 +35,12 @@ func (r *Runtime) ID() string { return "go" + r.Version } -func (r *Runtime) Supports(cmd []string) bool { - return len(cmd) > 0 && cmd[0] == "${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool" +func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { + return tool.Source.IsGit() && + len(cmd) > 0 && cmd[0] == "${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool" } -func (r *Runtime) Setup(ctx context.Context, dataRoot, toolSource string, env []string) ([]string, error) { +func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, toolSource string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { return nil, err diff --git a/pkg/repos/runtimes/golang/golang_test.go b/pkg/repos/runtimes/golang/golang_test.go index 5f71fb50..56098a51 100644 --- a/pkg/repos/runtimes/golang/golang_test.go +++ b/pkg/repos/runtimes/golang/golang_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/adrg/xdg" + "github.com/gptscript-ai/gptscript/pkg/types" "github.com/samber/lo" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,7 +28,7 @@ func TestRuntime(t *testing.T) { Version: "1.22.1", } - s, err := r.Setup(context.Background(), testCacheHome, "testdata", os.Environ()) + s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) require.NoError(t, err) p, v, _ := strings.Cut(s[0], "=") v, _, _ = strings.Cut(v, string(filepath.ListSeparator)) diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index 575e3b23..fde5103d 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -17,12 +17,16 @@ import ( runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/repos/download" + "github.com/gptscript-ai/gptscript/pkg/types" ) //go:embed SHASUMS256.txt.asc var releasesData []byte -const downloadURL = "https://nodejs.org/dist/%s/" +const ( + downloadURL = "https://nodejs.org/dist/%s/" + packageJSON = "package.json" +) type Runtime struct { // version something like "3.12" @@ -35,7 +39,10 @@ func (r *Runtime) ID() string { return "node" + r.Version } -func (r *Runtime) Supports(cmd []string) bool { +func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { + if _, hasPackageJSON := tool.MetaData[packageJSON]; !hasPackageJSON && !tool.Source.IsGit() { + return false + } for _, testCmd := range []string{"node", "npx", "npm"} { if r.supports(testCmd, cmd) { return true @@ -54,17 +61,21 @@ func (r *Runtime) supports(testCmd string, cmd []string) bool { return runtimeEnv.Matches(cmd, testCmd) } -func (r *Runtime) Setup(ctx context.Context, dataRoot, toolSource string, env []string) ([]string, error) { +func (r *Runtime) Setup(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { return nil, err } newEnv := runtimeEnv.AppendPath(env, binPath) - if err := r.runNPM(ctx, toolSource, binPath, append(env, newEnv...)); err != nil { + if err := r.runNPM(ctx, tool, toolSource, binPath, append(env, newEnv...)); err != nil { return nil, err } + if _, ok := tool.MetaData[packageJSON]; ok { + newEnv = append(newEnv, "GPTSCRIPT_TMPDIR="+toolSource) + } + return newEnv, nil } @@ -100,11 +111,16 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { return "", "", fmt.Errorf("failed to find %s release for os=%s arch=%s", r.ID(), osName(), arch()) } -func (r *Runtime) runNPM(ctx context.Context, toolSource, binDir string, env []string) error { +func (r *Runtime) runNPM(ctx context.Context, tool types.Tool, toolSource, binDir string, env []string) error { log.InfofCtx(ctx, "Running npm in %s", toolSource) cmd := debugcmd.New(ctx, filepath.Join(binDir, "npm"), "install") cmd.Env = env cmd.Dir = toolSource + if contents, ok := tool.MetaData[packageJSON]; ok { + if err := os.WriteFile(filepath.Join(toolSource, packageJSON), []byte(contents+"\n"), 0644); err != nil { + return err + } + } return cmd.Run() } diff --git a/pkg/repos/runtimes/node/node_test.go b/pkg/repos/runtimes/node/node_test.go index 50ef1e0a..014619c8 100644 --- a/pkg/repos/runtimes/node/node_test.go +++ b/pkg/repos/runtimes/node/node_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/adrg/xdg" + "github.com/gptscript-ai/gptscript/pkg/types" "github.com/samber/lo" "github.com/stretchr/testify/require" ) @@ -28,7 +29,7 @@ func TestRuntime(t *testing.T) { Version: "20", } - s, err := r.Setup(context.Background(), testCacheHome, "testdata", os.Environ()) + s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) require.NoError(t, err) _, err = os.Stat(filepath.Join(firstPath(s), "node.exe")) if errors.Is(err, fs.ErrNotExist) { @@ -42,7 +43,7 @@ func TestRuntime21(t *testing.T) { Version: "21", } - s, err := r.Setup(context.Background(), testCacheHome, "testdata", os.Environ()) + s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) require.NoError(t, err) _, err = os.Stat(filepath.Join(firstPath(s), "node.exe")) if errors.Is(err, fs.ErrNotExist) { diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index c031cb16..ae24f92a 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -17,12 +17,16 @@ import ( runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/repos/download" + "github.com/gptscript-ai/gptscript/pkg/types" ) //go:embed python.json var releasesData []byte -const uvVersion = "uv==0.2.27" +const ( + uvVersion = "uv==0.2.33" + requirementsTxt = "requirements.txt" +) type Release struct { OS string `json:"os,omitempty"` @@ -43,7 +47,10 @@ func (r *Runtime) ID() string { return "python" + r.Version } -func (r *Runtime) Supports(cmd []string) bool { +func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { + if _, hasRequirements := tool.MetaData[requirementsTxt]; !hasRequirements && !tool.Source.IsGit() { + return false + } if runtimeEnv.Matches(cmd, r.ID()) { return true } @@ -112,7 +119,7 @@ func (r *Runtime) copyPythonForWindows(binDir string) error { return nil } -func (r *Runtime) Setup(ctx context.Context, dataRoot, toolSource string, env []string) ([]string, error) { +func (r *Runtime) Setup(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { return nil, err @@ -145,7 +152,7 @@ func (r *Runtime) Setup(ctx context.Context, dataRoot, toolSource string, env [] } } - if err := r.runPip(ctx, toolSource, binPath, append(env, newEnv...)); err != nil { + if err := r.runPip(ctx, tool, toolSource, binPath, append(env, newEnv...)); err != nil { return nil, err } @@ -170,9 +177,19 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { return "", "", fmt.Errorf("failed to find an python runtime for %s", r.Version) } -func (r *Runtime) runPip(ctx context.Context, toolSource, binDir string, env []string) error { +func (r *Runtime) runPip(ctx context.Context, tool types.Tool, toolSource, binDir string, env []string) error { log.InfofCtx(ctx, "Running pip in %s", toolSource) - for _, req := range []string{"requirements-gptscript.txt", "requirements.txt"} { + if content, ok := tool.MetaData[requirementsTxt]; ok { + reqFile := filepath.Join(toolSource, requirementsTxt) + if err := os.WriteFile(reqFile, []byte(content+"\n"), 0644); err != nil { + return err + } + cmd := debugcmd.New(ctx, uvBin(binDir), "pip", "install", "-r", reqFile) + cmd.Env = env + return cmd.Run() + } + + for _, req := range []string{"requirements-gptscript.txt", requirementsTxt} { reqFile := filepath.Join(toolSource, req) if s, err := os.Stat(reqFile); err == nil && !s.IsDir() { cmd := debugcmd.New(ctx, uvBin(binDir), "pip", "install", "-r", reqFile) diff --git a/pkg/repos/runtimes/python/python_test.go b/pkg/repos/runtimes/python/python_test.go index fc2ededc..0e483305 100644 --- a/pkg/repos/runtimes/python/python_test.go +++ b/pkg/repos/runtimes/python/python_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/adrg/xdg" + "github.com/gptscript-ai/gptscript/pkg/types" "github.com/samber/lo" "github.com/stretchr/testify/require" ) @@ -27,7 +28,7 @@ func TestRuntime(t *testing.T) { Version: "3.12", } - s, err := r.Setup(context.Background(), testCacheHome, "testdata", os.Environ()) + s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) require.NoError(t, err) _, err = os.Stat(filepath.Join(firstPath(s), "python.exe")) if errors.Is(err, os.ErrNotExist) { diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 60f1cfc3..424c84c1 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -997,3 +997,24 @@ func TestToolRefAll(t *testing.T) { r := tester.NewRunner(t) r.RunDefault() } + +func TestRuntimes(t *testing.T) { + r := tester.NewRunner(t) + r.RespondWith(tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "py", + Arguments: "{}", + }, + }, tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "node", + Arguments: "{}", + }, + }, tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "bash", + Arguments: "{}", + }, + }) + r.RunDefault() +} diff --git a/pkg/tests/testdata/TestRuntimes/call1-resp.golden b/pkg/tests/testdata/TestRuntimes/call1-resp.golden new file mode 100644 index 00000000..1d53670a --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call1-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimes/call1.golden b/pkg/tests/testdata/TestRuntimes/call1.golden new file mode 100644 index 00000000..67c7d9f7 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call1.golden @@ -0,0 +1,37 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimes/call2-resp.golden b/pkg/tests/testdata/TestRuntimes/call2-resp.golden new file mode 100644 index 00000000..4806793c --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call2-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimes/call2.golden b/pkg/tests/testdata/TestRuntimes/call2.golden new file mode 100644 index 00000000..da456a5d --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call2.golden @@ -0,0 +1,70 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "py worked\r\n" + } + ], + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimes/call3-resp.golden b/pkg/tests/testdata/TestRuntimes/call3-resp.golden new file mode 100644 index 00000000..1103f824 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call3-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 2, + "id": "call_3", + "function": { + "name": "bash", + "arguments": "{}" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimes/call3.golden b/pkg/tests/testdata/TestRuntimes/call3.golden new file mode 100644 index 00000000..f0792540 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call3.golden @@ -0,0 +1,103 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "py worked\r\n" + } + ], + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + }, + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "node worked\n" + } + ], + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimes/call4-resp.golden b/pkg/tests/testdata/TestRuntimes/call4-resp.golden new file mode 100644 index 00000000..8135a8c9 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call4-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 4" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimes/call4.golden b/pkg/tests/testdata/TestRuntimes/call4.golden new file mode 100644 index 00000000..04ac31f5 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/call4.golden @@ -0,0 +1,136 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimes/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "py worked\r\n" + } + ], + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + }, + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "node worked\n" + } + ], + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + }, + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 2, + "id": "call_3", + "function": { + "name": "bash", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "bash works\n" + } + ], + "toolCall": { + "index": 2, + "id": "call_3", + "function": { + "name": "bash", + "arguments": "{}" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimes/test.gpt b/pkg/tests/testdata/TestRuntimes/test.gpt new file mode 100644 index 00000000..db3ede64 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimes/test.gpt @@ -0,0 +1,58 @@ +name: first +tools: py, node, bash + +Dummy + +--- +name: py + +#!/usr/bin/env python3 + +import requests +import platform + +# this is dumb hack to get the line endings to always be \r\n so the golden files match +# on both linux and windows +if platform.system() == 'Windows': + print('py worked') +else: + print('py worked\r') + +--- +!metadata:py:requirements.txt + +requests + +--- +name: node + +#!/usr/bin/env node + +import chalk from 'chalk'; +console.log("node worked") + +--- +!metadata:node:package.json + +{ + "name": "chalk-example", + "version": "1.0.0", + "type": "module", + "description": "A simple example project to demonstrate the use of chalk", + "main": "example.js", + "scripts": { + "start": "node example.js" + }, + "author": "Your Name", + "license": "MIT", + "dependencies": { + "chalk": "^5.0.0" + } + } + +--- +name: bash + +#!/bin/bash + +echo bash works \ No newline at end of file diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index ef75c0f5..a36c5e91 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -11,7 +11,6 @@ import ( "github.com/adrg/xdg" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/loader" - "github.com/gptscript-ai/gptscript/pkg/repos" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/types" @@ -178,7 +177,6 @@ func NewRunner(t *testing.T) *Runner { require.NoError(t, err) rm := runtimes.Default(cacheDir) - rm.(*repos.Manager).SetSupportLocal() run, err := runner.New(c, credentials.NoopStore{}, runner.Options{ Sequential: true, diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 54d5d817..61a67fa4 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -166,6 +166,7 @@ type Tool struct { ID string `json:"id,omitempty"` ToolMapping map[string][]ToolReference `json:"toolMapping,omitempty"` + MetaData map[string]string `json:"metaData,omitempty"` LocalTools map[string]string `json:"localTools,omitempty"` Source ToolSource `json:"source,omitempty"` WorkingDir string `json:"workingDir,omitempty"` @@ -793,6 +794,10 @@ type ToolSource struct { Repo *Repo `json:"repo,omitempty"` } +func (t ToolSource) IsGit() bool { + return t.Repo != nil && t.Repo.VCS == "git" +} + func (t ToolSource) String() string { return fmt.Sprintf("%s:%d", t.Location, t.LineNo) } From d8b1ea856423ce9f8b8b8e98f039a24c0ea24b79 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Tue, 6 Aug 2024 13:34:30 -0700 Subject: [PATCH 075/270] fix: windows absolute path logic --- pkg/loader/loader.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index 3d2ae8ed..1dfaa0e2 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -68,10 +68,8 @@ func openFile(path string) (io.ReadCloser, bool, error) { } func loadLocal(base *source, name string) (*source, bool, error) { - // We want to keep all strings in / format, and only convert to platform specific when reading - // This is why we use path instead of filepath. filePath := name - if !path.IsAbs(name) { + if !filepath.IsAbs(name) { filePath = path.Join(base.Path, name) } From 4fd8e8a2ad24f48e48f50c65368488c1ea48046b Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Tue, 6 Aug 2024 15:28:08 -0700 Subject: [PATCH 076/270] fix: add comment back in correct place, right above the place we mutate path Signed-off-by: Taylor Price --- pkg/loader/loader.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index 1dfaa0e2..80342f2b 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -70,6 +70,8 @@ func openFile(path string) (io.ReadCloser, bool, error) { func loadLocal(base *source, name string) (*source, bool, error) { filePath := name if !filepath.IsAbs(name) { + // We want to keep all strings in / format, and only convert to platform specific when reading + // This is why we use path instead of filepath. filePath = path.Join(base.Path, name) } From c0507a2c32dd543a43e46526710fab12b81c04f8 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 6 Aug 2024 21:02:19 -0400 Subject: [PATCH 077/270] feat: allow providers to be restarted if they stop By not caching the client, gptscript is able to restart the provider daemon if it stops. If the daemon is still running, then there is little overhead because the daemon URL is cached and the tool will not be completely reprocessed. The model to provider mapping is still cached so that the client can be recreated when necessary. Signed-off-by: Donnie Adams --- pkg/remote/remote.go | 52 +++++++++++++++----------------------------- 1 file changed, 18 insertions(+), 34 deletions(-) diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 6d83e6cc..89863529 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -22,10 +22,9 @@ import ( ) type Client struct { - clientsLock sync.Mutex + modelsLock sync.Mutex cache *cache.Client - clients map[string]*openai.Client - models map[string]*openai.Client + modelToProvider map[string]string runner *runner.Runner envs []string credStore credentials.CredentialStore @@ -43,14 +42,19 @@ func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credent } func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { - c.clientsLock.Lock() - client, ok := c.models[messageRequest.Model] - c.clientsLock.Unlock() + c.modelsLock.Lock() + provider, ok := c.modelToProvider[messageRequest.Model] + c.modelsLock.Unlock() if !ok { return nil, fmt.Errorf("failed to find remote model %s", messageRequest.Model) } + client, err := c.load(ctx, provider) + if err != nil { + return nil, err + } + toolName, modelName := types.SplitToolRef(messageRequest.Model) if modelName == "" { // modelName is empty, then the messageRequest.Model is not of the form 'modelName from provider' @@ -96,19 +100,19 @@ func (c *Client) Supports(ctx context.Context, modelString string) (bool, error) return false, nil } - client, err := c.load(ctx, providerName) + _, err := c.load(ctx, providerName) if err != nil { return false, err } - c.clientsLock.Lock() - defer c.clientsLock.Unlock() + c.modelsLock.Lock() + defer c.modelsLock.Unlock() - if c.models == nil { - c.models = map[string]*openai.Client{} + if c.modelToProvider == nil { + c.modelToProvider = map[string]string{} } - c.models[modelString] = client + c.modelToProvider[modelString] = providerName return true, nil } @@ -141,24 +145,11 @@ func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Clie } func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, error) { - c.clientsLock.Lock() - defer c.clientsLock.Unlock() - - client, ok := c.clients[toolName] - if ok { - return client, nil - } - - if c.clients == nil { - c.clients = make(map[string]*openai.Client) - } - if isHTTPURL(toolName) { remoteClient, err := c.clientFromURL(ctx, toolName) if err != nil { return nil, err } - c.clients[toolName] = remoteClient return remoteClient, nil } @@ -174,14 +165,8 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err return nil, err } - if strings.HasSuffix(url, "/") { - url += "v1" - } else { - url += "/v1" - } - - client, err = openai.NewClient(ctx, c.credStore, openai.Options{ - BaseURL: url, + client, err := openai.NewClient(ctx, c.credStore, openai.Options{ + BaseURL: strings.TrimSuffix(url, "/") + "/v1", Cache: c.cache, CacheKey: prg.EntryToolID, }) @@ -189,7 +174,6 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err return nil, err } - c.clients[toolName] = client return client, nil } From 8fc57e02c84adf3a1f6e68cd9d7bfd7d9023023a Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 7 Aug 2024 11:19:58 -0400 Subject: [PATCH 078/270] feat: allow disabling of the cache when parsing tools Signed-off-by: Donnie Adams --- Makefile | 6 ++++-- pkg/cli/gptscript.go | 2 +- pkg/cli/parse.go | 3 ++- pkg/input/input.go | 4 ++-- pkg/loader/url.go | 6 ++++-- pkg/sdkserver/routes.go | 2 +- pkg/sdkserver/types.go | 3 ++- 7 files changed, 16 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index e0d93a1d..5b1b6309 100644 --- a/Makefile +++ b/Makefile @@ -52,12 +52,14 @@ init-docs: docker run --rm --workdir=/docs -v $${PWD}/docs:/docs node:18-buster yarn install # Ensure docs build without errors. Makes sure generated docs are in-sync with CLI. -validate-docs: +validate-docs: gen-docs docker run --rm --workdir=/docs -v $${PWD}/docs:/docs node:18-buster yarn build - go run tools/gendocs/main.go if [ -n "$$(git status --porcelain --untracked-files=no)" ]; then \ git status --porcelain --untracked-files=no; \ echo "Encountered dirty repo!"; \ git diff; \ exit 1 \ ;fi + +gen-docs: + go run tools/gendocs/main.go \ No newline at end of file diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4458d87b..536e339b 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -83,7 +83,7 @@ func New() *cobra.Command { root, &Eval{gptscript: root}, &Credential{root: root}, - &Parse{}, + &Parse{gptscript: root}, &Fmt{}, &Getenv{}, &SDKServer{ diff --git a/pkg/cli/parse.go b/pkg/cli/parse.go index 8599bd97..081116c1 100644 --- a/pkg/cli/parse.go +++ b/pkg/cli/parse.go @@ -12,6 +12,7 @@ import ( type Parse struct { PrettyPrint bool `usage:"Indent the json output" short:"p"` + gptscript *GPTScript } func (e *Parse) Customize(cmd *cobra.Command) { @@ -26,7 +27,7 @@ func locationName(l string) string { } func (e *Parse) Run(_ *cobra.Command, args []string) error { - content, err := input.FromLocation(args[0]) + content, err := input.FromLocation(args[0], e.gptscript.DisableCache) if err != nil { return err } diff --git a/pkg/input/input.go b/pkg/input/input.go index 3d480431..f930753f 100644 --- a/pkg/input/input.go +++ b/pkg/input/input.go @@ -55,13 +55,13 @@ func FromFile(file string) (string, error) { } // FromLocation takes a string that can be a file path or a URL to a file and returns the content of that file. -func FromLocation(s string) (string, error) { +func FromLocation(s string, disableCache bool) (string, error) { // Attempt to read the file first, if that fails, try to load the URL. Finally, // return an error if both fail. content, err := FromFile(s) if err != nil { log.Debugf("failed to read file %s (due to %v) attempting to load the URL...", s, err) - content, err = loader.ContentFromURL(s) + content, err = loader.ContentFromURL(s, disableCache) if err != nil { return "", err } diff --git a/pkg/loader/url.go b/pkg/loader/url.go index 41400790..72970546 100644 --- a/pkg/loader/url.go +++ b/pkg/loader/url.go @@ -207,8 +207,10 @@ func getWithDefaults(req *http.Request) ([]byte, string, error) { panic("unreachable") } -func ContentFromURL(url string) (string, error) { - cache, err := cache.New() +func ContentFromURL(url string, disableCache bool) (string, error) { + cache, err := cache.New(cache.Options{ + DisableCache: disableCache, + }) if err != nil { return "", fmt.Errorf("failed to create cache: %w", err) } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 2e709e3f..c0d7a41b 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -227,7 +227,7 @@ func (s *server) parse(w http.ResponseWriter, r *http.Request) { if reqObject.Content != "" { out, err = parser.Parse(strings.NewReader(reqObject.Content), reqObject.Options) } else { - content, loadErr := input.FromLocation(reqObject.File) + content, loadErr := input.FromLocation(reqObject.File, reqObject.DisableCache) if loadErr != nil { logger.Errorf(loadErr.Error()) writeError(logger, w, http.StatusInternalServerError, loadErr) diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 9736f045..ade035b2 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -86,7 +86,8 @@ type parseRequest struct { parser.Options `json:",inline"` content `json:",inline"` - File string `json:"file"` + DisableCache bool `json:"disableCache"` + File string `json:"file"` } type modelsRequest struct { From dc4dcfce04772d5dae360b5602d3670e9cc5217d Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 7 Aug 2024 12:37:51 -0400 Subject: [PATCH 079/270] fix: always set GPTSCRIPT_CREDENTIAL_EXPIRATION env var when credentials are used (#727) Signed-off-by: Grant Linville --- pkg/runner/runner.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index c2137bea..a8d88fee 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -968,19 +968,19 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } else { log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } + } - if c.ExpiresAt != nil && (nearestExpiration == nil || nearestExpiration.After(*c.ExpiresAt)) { - nearestExpiration = c.ExpiresAt - } + if c.ExpiresAt != nil && (nearestExpiration == nil || nearestExpiration.After(*c.ExpiresAt)) { + nearestExpiration = c.ExpiresAt } for k, v := range c.Env { env = append(env, fmt.Sprintf("%s=%s", k, v)) } + } - if nearestExpiration != nil { - env = append(env, fmt.Sprintf("%s=%s", credentials.CredentialExpiration, nearestExpiration.Format(time.RFC3339))) - } + if nearestExpiration != nil { + env = append(env, fmt.Sprintf("%s=%s", credentials.CredentialExpiration, nearestExpiration.Format(time.RFC3339))) } return env, nil From 40cc9785f3e19e7998e3f4fa001c246372917064 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 7 Aug 2024 09:43:54 -0700 Subject: [PATCH 080/270] bug: change quoting behavior When reading the #! interpreter line variables in quotes "${FOO}" will be evaluated and passed as a single argument to the command. Unquoted variables like ${FOO} may result in multiple argument. For example, if FOO="Hello World" that will result in two arguments "Hello" and "World", where "${FOO}" will result in one argument "Hello World" --- pkg/engine/cmd.go | 93 ++++++++++++++++++++++++++++ pkg/engine/cmd_test.go | 135 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 228 insertions(+) create mode 100644 pkg/engine/cmd_test.go diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 57dfd477..311c743a 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -123,6 +123,9 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate } cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input) if err != nil { + if toolCategory == NoCategory { + return fmt.Sprintf("ERROR: got (%v) while parsing command", err), nil + } return "", err } defer stop() @@ -268,6 +271,12 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T }) } + // After we determined the interpreter we again interpret the args by env vars + args, err = replaceVariablesForInterpreter(interpreter, envMap) + if err != nil { + return nil, nil, err + } + if runtime.GOOS == "windows" && (args[0] == "/bin/bash" || args[0] == "/bin/sh") { args[0] = path.Base(args[0]) } @@ -314,3 +323,87 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T cmd.Env = compressEnv(envvars) return cmd, stop, nil } + +func replaceVariablesForInterpreter(interpreter string, envMap map[string]string) ([]string, error) { + var parts []string + for i, part := range splitByQuotes(interpreter) { + if i%2 == 0 { + part = os.Expand(part, func(s string) string { + return envMap[s] + }) + // We protect newly resolved env vars from getting replaced when we do the second Expand + // after shlex. Yeah, crazy. I'm guessing this isn't secure, but just trying to avoid a foot gun. + part = os.Expand(part, func(s string) string { + return "${__" + s + "}" + }) + } + parts = append(parts, part) + } + + parts, err := shlex.Split(strings.Join(parts, "")) + if err != nil { + return nil, err + } + + for i, part := range parts { + parts[i] = os.Expand(part, func(s string) string { + if strings.HasPrefix(s, "__") { + return "${" + s[2:] + "}" + } + return envMap[s] + }) + } + + return parts, nil +} + +// splitByQuotes will split a string by parsing matching double quotes (with \ as the escape character). +// The return value conforms to the following properties +// 1. s == strings.Join(result, "") +// 2. Even indexes are strings that were not in quotes. +// 3. Odd indexes are strings that were quoted. +// +// Example: s = `In a "quoted string" quotes can be escaped with \"` +// +// result = [`In a `, `"quoted string"`, ` quotes can be escaped with \"`] +func splitByQuotes(s string) (result []string) { + var ( + buf strings.Builder + inEscape, inQuote bool + ) + + for _, c := range s { + if inEscape { + buf.WriteRune(c) + inEscape = false + continue + } + + switch c { + case '"': + if inQuote { + buf.WriteRune(c) + } + result = append(result, buf.String()) + buf.Reset() + if !inQuote { + buf.WriteRune(c) + } + inQuote = !inQuote + case '\\': + inEscape = true + buf.WriteRune(c) + default: + buf.WriteRune(c) + } + } + + if buf.Len() > 0 { + if inQuote { + result = append(result, "") + } + result = append(result, buf.String()) + } + + return +} diff --git a/pkg/engine/cmd_test.go b/pkg/engine/cmd_test.go new file mode 100644 index 00000000..15f72036 --- /dev/null +++ b/pkg/engine/cmd_test.go @@ -0,0 +1,135 @@ +// File: cmd_test.go +package engine + +import "testing" + +func TestSplitByQuotes(t *testing.T) { + tests := []struct { + name string + input string + expected []string + }{ + { + name: "NoQuotes", + input: "Hello World", + expected: []string{"Hello World"}, + }, + { + name: "ValidQuote", + input: `"Hello" "World"`, + expected: []string{``, `"Hello"`, ` `, `"World"`}, + }, + { + name: "ValidQuoteWithEscape", + input: `"Hello\" World"`, + expected: []string{``, `"Hello\" World"`}, + }, + { + name: "Nothing", + input: "", + expected: []string{}, + }, + { + name: "SpaceInsideQuote", + input: `"Hello World"`, + expected: []string{``, `"Hello World"`}, + }, + { + name: "SingleChar", + input: "H", + expected: []string{"H"}, + }, + { + name: "SingleQuote", + input: `"Hello`, + expected: []string{``, ``, `"Hello`}, + }, + { + name: "ThreeQuotes", + input: `Test "Hello "World" End\"`, + expected: []string{`Test `, `"Hello "`, `World`, ``, `" End\"`}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := splitByQuotes(tt.input) + if !equal(got, tt.expected) { + t.Errorf("splitByQuotes() = %v, want %v", got, tt.expected) + } + }) + } +} + +// Helper function to assert equality of two string slices. +func equal(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// Testing for replaceVariablesForInterpreter +func TestReplaceVariablesForInterpreter(t *testing.T) { + tests := []struct { + name string + interpreter string + envMap map[string]string + expected []string + shouldFail bool + }{ + { + name: "No quotes", + interpreter: "/bin/bash -c ${COMMAND} tail", + envMap: map[string]string{"COMMAND": "echo Hello!"}, + expected: []string{"/bin/bash", "-c", "echo", "Hello!", "tail"}, + }, + { + name: "Quotes Variables", + interpreter: `/bin/bash -c "${COMMAND}" tail`, + envMap: map[string]string{"COMMAND": "Hello, World!"}, + expected: []string{"/bin/bash", "-c", "Hello, World!", "tail"}, + }, + { + name: "Double escape", + interpreter: `/bin/bash -c "${COMMAND}" ${TWO} tail`, + envMap: map[string]string{ + "COMMAND": "Hello, World!", + "TWO": "${COMMAND}", + }, + expected: []string{"/bin/bash", "-c", "Hello, World!", "${COMMAND}", "tail"}, + }, + { + name: "aws cli issue", + interpreter: "aws ${ARGS}", + envMap: map[string]string{ + "ARGS": `ec2 describe-instances --region us-east-1 --query 'Reservations[*].Instances[*].{Instance:InstanceId,State:State.Name}'`, + }, + expected: []string{ + `aws`, + `ec2`, + `describe-instances`, + `--region`, `us-east-1`, + `--query`, `Reservations[*].Instances[*].{Instance:InstanceId,State:State.Name}`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := replaceVariablesForInterpreter(tt.interpreter, tt.envMap) + if (err != nil) != tt.shouldFail { + t.Errorf("replaceVariablesForInterpreter() error = %v, want %v", err, tt.shouldFail) + return + } + if !equal(got, tt.expected) { + t.Errorf("replaceVariablesForInterpreter() = %v, want %v", got, tt.expected) + } + }) + } +} From ab1768a2177282a1a9b4bd4f2b62066ce42c2087 Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Wed, 7 Aug 2024 11:20:38 -0700 Subject: [PATCH 081/270] Fix: Fix query not added if there are multple entries Signed-off-by: Daishan Peng --- pkg/openapi/run.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go index 17199851..2efc2309 100644 --- a/pkg/openapi/run.go +++ b/pkg/openapi/run.go @@ -177,6 +177,7 @@ func HandleAuths(req *http.Request, envMap map[string]string, infoSets [][]Secur // We're using this info set, because no environment variables were missing. // Set up the request as needed. + v := url.Values{} for _, info := range infoSet { envNames := maps.Values(info.getCredentialNamesAndEnvVars(req.URL.Hostname())) switch info.Type { @@ -185,9 +186,7 @@ func HandleAuths(req *http.Request, envMap map[string]string, infoSets [][]Secur case "header": req.Header.Set(info.APIKeyName, envMap[envNames[0]]) case "query": - v := url.Values{} v.Add(info.APIKeyName, envMap[envNames[0]]) - req.URL.RawQuery = v.Encode() case "cookie": req.AddCookie(&http.Cookie{ Name: info.APIKeyName, @@ -203,6 +202,9 @@ func HandleAuths(req *http.Request, envMap map[string]string, infoSets [][]Secur } } } + if len(v) > 0 { + req.URL.RawQuery = v.Encode() + } return nil } From bb2340bd53e483c7cb9c2ae2b8f6c4919a7fcd5c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 7 Aug 2024 16:14:24 -0400 Subject: [PATCH 082/270] fix: make vars capital in smoke tests A recent change in gptscript got rid of the lowercase env vars because they don't work on Windows. This change updates the smoke tests. Signed-off-by: Donnie Adams --- pkg/tests/smoke/testdata/Bob/test.gpt | 2 +- pkg/tests/smoke/testdata/BobAsShell/test.gpt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/tests/smoke/testdata/Bob/test.gpt b/pkg/tests/smoke/testdata/Bob/test.gpt index fe8ffb62..20f533e2 100644 --- a/pkg/tests/smoke/testdata/Bob/test.gpt +++ b/pkg/tests/smoke/testdata/Bob/test.gpt @@ -7,4 +7,4 @@ name: bob description: I'm Bob, a friendly guy. args: question: The question to ask Bob. -When asked how I am doing, respond with exactly "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" +When asked how I am doing, respond with exactly "Thanks for asking "${QUESTION}", I'm doing great fellow friendly AI tool!" diff --git a/pkg/tests/smoke/testdata/BobAsShell/test.gpt b/pkg/tests/smoke/testdata/BobAsShell/test.gpt index f04920bf..a0edb9c4 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/test.gpt +++ b/pkg/tests/smoke/testdata/BobAsShell/test.gpt @@ -10,4 +10,4 @@ args: question: The question to ask Bob. #!/bin/bash -echo "Thanks for asking ${question}, I'm doing great fellow friendly AI tool!" +echo "Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!" From 04f42cdc4b1aacf82241eb62b1e545bba014269c Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Wed, 7 Aug 2024 23:25:33 -0400 Subject: [PATCH 083/270] chore: pin ui tool version to v0.9.4 Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 536e339b..d7221715 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -386,7 +386,9 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // The UI must run in daemon mode. r.Daemon = true // Use the UI tool as the first argument. - args = append([]string{uiTool()}, args...) + args = append([]string{ + env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v0.9.4"), + }, args...) } ctx := cmd.Context() @@ -503,15 +505,3 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { return r.PrintOutput(toolInput, s) } - -// uiTool returns the versioned UI tool reference for the current GPTScript version. -// For release versions, a reference with a matching release tag is returned. -// For all other versions, a reference to main is returned. -func uiTool() string { - ref := "github.com/gptscript-ai/ui" - if tag := version.Tag; !strings.Contains(tag, "v0.0.0-dev") { - ref = fmt.Sprintf("%s@%s", ref, tag) - } - - return env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", ref) -} From 26476d27a34513b123301ba382e07372d1bc6a18 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 8 Aug 2024 12:43:50 -0700 Subject: [PATCH 084/270] chore: move metadata field to tooldef struct --- pkg/types/tool.go | 21 ++++++++++++++++++--- pkg/types/tool_test.go | 16 ++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 61a67fa4..bb49e6f1 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -157,8 +157,9 @@ func (p Parameters) ToolRefNames() []string { type ToolDef struct { Parameters `json:",inline"` - Instructions string `json:"instructions,omitempty"` - BuiltinFunc BuiltinFunc `json:"-"` + Instructions string `json:"instructions,omitempty"` + BuiltinFunc BuiltinFunc `json:"-"` + MetaData map[string]string `json:"metaData,omitempty"` } type Tool struct { @@ -166,7 +167,6 @@ type Tool struct { ID string `json:"id,omitempty"` ToolMapping map[string][]ToolReference `json:"toolMapping,omitempty"` - MetaData map[string]string `json:"metaData,omitempty"` LocalTools map[string]string `json:"localTools,omitempty"` Source ToolSource `json:"source,omitempty"` WorkingDir string `json:"workingDir,omitempty"` @@ -489,6 +489,21 @@ func (t ToolDef) String() string { _, _ = fmt.Fprintln(buf, t.Instructions) } + if t.Name != "" { + keys := maps.Keys(t.MetaData) + sort.Strings(keys) + for _, key := range keys { + buf.WriteString("---\n") + buf.WriteString("!metadata:") + buf.WriteString(t.Name) + buf.WriteString(":") + buf.WriteString(key) + buf.WriteString("\n") + buf.WriteString(t.MetaData[key]) + buf.WriteString("\n") + } + } + return buf.String() } diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index a47014a1..e95c2248 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -36,6 +36,13 @@ func TestToolDef_String(t *testing.T) { ExportCredentials: []string{"ExportCredential1", "ExportCredential2"}, Type: ToolTypeContext, }, + MetaData: map[string]string{ + "package.json": `{ +// blah blah some ugly JSON +} +`, + "requirements.txt": `requests=5`, + }, Instructions: "This is a sample instruction", } @@ -68,6 +75,15 @@ Share Credential: ExportCredential2 Chat: true This is a sample instruction +--- +!metadata:Tool Sample:package.json +{ +// blah blah some ugly JSON +} + +--- +!metadata:Tool Sample:requirements.txt +requests=5 `).Equal(t, tool.String()) } From 227d8530ad3a4490145a61eea9eeeefb6043f625 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Thu, 8 Aug 2024 21:13:25 -0400 Subject: [PATCH 085/270] chore: remove ui dispatch from release workflow The UI must be updated independently from now on for electron releases to work properly. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/release.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4c1c22bf..f710e953 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,13 +12,6 @@ jobs: release-tag: runs-on: ubuntu-22.04 steps: - - name: trigger ui repo tag workflow - uses: peter-evans/repository-dispatch@v3 - with: - token: ${{ secrets.DISPATCH_PAT }} - repository: gptscript-ai/ui - event-type: release - client-payload: '{"tag": "${{ github.ref_name }}"}' - name: Checkout uses: actions/checkout@v4 with: From 2e51afe0ac4ee2875c7693c1d8d5c439e29abf59 Mon Sep 17 00:00:00 2001 From: John Engelman Date: Mon, 24 Jun 2024 13:57:26 +0530 Subject: [PATCH 086/270] feat: Add support for tools from github enterprise. --- .../04-command-line-reference/gptscript.md | 65 +++++++------- pkg/cli/gptscript.go | 44 ++++++---- pkg/loader/github/github.go | 87 ++++++++++++++----- pkg/loader/github/github_test.go | 57 ++++++++++++ 4 files changed, 178 insertions(+), 75 deletions(-) diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index de29a97f..b7de5e86 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -12,38 +12,39 @@ gptscript [flags] PROGRAM_FILE [INPUT...] ### Options ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) - --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) - -h, --help help for gptscript - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) - --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) - --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) - --ui Launch the UI ($GPTSCRIPT_UI) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) + --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) + --github-enterprise-hostname string The host name for a Github Enterprise instance to enable for remote loading ($GPTSCRIPT_GITHUB_ENTERPRISE_HOSTNAME) + -h, --help help for gptscript + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) + --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) + --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) + --ui Launch the UI ($GPTSCRIPT_UI) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4458d87b..aafdeacf 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -23,6 +23,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/input" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/loader/github" "github.com/gptscript-ai/gptscript/pkg/monitor" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/openai" @@ -54,25 +55,26 @@ type GPTScript struct { Output string `usage:"Save output to a file, or - for stdout" short:"o"` EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` - SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` - Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` - ListModels bool `usage:"List the models available and exit" local:"true"` - ListTools bool `usage:"List built-in tools and exit" local:"true"` - ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` - Chdir string `usage:"Change current working directory" short:"C"` - Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` - Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` - CredentialContext string `usage:"Context name in which to store credentials" default:"default"` - CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` - ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` - ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` - ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` - Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` - UI bool `usage:"Launch the UI" local:"true" name:"ui"` - DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` - SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` - DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"` + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` + SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` + Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` + ListModels bool `usage:"List the models available and exit" local:"true"` + ListTools bool `usage:"List built-in tools and exit" local:"true"` + ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` + Chdir string `usage:"Change current working directory" short:"C"` + Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` + Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` + CredentialContext string `usage:"Context name in which to store credentials" default:"default"` + CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` + ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` + ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` + ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` + Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` + UI bool `usage:"Launch the UI" local:"true" name:"ui"` + DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` + SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"` + GithubEnterpriseHostname string `usage:"The host name for a Github Enterprise instance to enable for remote loading" local:"true"` readData []byte } @@ -334,6 +336,10 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { return err } + if r.GithubEnterpriseHostname != "" { + loader.AddVSC(github.LoaderForPrefix(r.GithubEnterpriseHostname)) + } + // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { if os.Getenv(system.BinEnvVar) == "" { diff --git a/pkg/loader/github/github.go b/pkg/loader/github/github.go index 2fb01c3d..7b6e79ec 100644 --- a/pkg/loader/github/github.go +++ b/pkg/loader/github/github.go @@ -2,6 +2,7 @@ package github import ( "context" + "crypto/tls" "encoding/json" "fmt" "io" @@ -18,52 +19,63 @@ import ( "github.com/gptscript-ai/gptscript/pkg/types" ) -const ( - GithubPrefix = "github.com/" - githubRepoURL = "https://github.com/%s/%s.git" - githubDownloadURL = "https://raw.githubusercontent.com/%s/%s/%s/%s" - githubCommitURL = "https://api.github.com/repos/%s/%s/commits/%s" -) +type Config struct { + Prefix string + RepoURL string + DownloadURL string + CommitURL string + AuthToken string +} var ( - githubAuthToken = os.Getenv("GITHUB_AUTH_TOKEN") - log = mvl.Package() + log = mvl.Package() + defaultGithubConfig = &Config{ + Prefix: "github.com/", + RepoURL: "https://github.com/%s/%s.git", + DownloadURL: "https://raw.githubusercontent.com/%s/%s/%s/%s", + CommitURL: "https://api.github.com/repos/%s/%s/commits/%s", + AuthToken: os.Getenv("GITHUB_AUTH_TOKEN"), + } ) func init() { loader.AddVSC(Load) } -func getCommitLsRemote(ctx context.Context, account, repo, ref string) (string, error) { - url := fmt.Sprintf(githubRepoURL, account, repo) +func getCommitLsRemote(ctx context.Context, account, repo, ref string, config *Config) (string, error) { + url := fmt.Sprintf(config.RepoURL, account, repo) return git.LsRemote(ctx, url, ref) } // regexp to match a git commit id var commitRegexp = regexp.MustCompile("^[a-f0-9]{40}$") -func getCommit(ctx context.Context, account, repo, ref string) (string, error) { +func getCommit(ctx context.Context, account, repo, ref string, config *Config) (string, error) { if commitRegexp.MatchString(ref) { return ref, nil } - url := fmt.Sprintf(githubCommitURL, account, repo, ref) + url := fmt.Sprintf(config.CommitURL, account, repo, ref) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return "", fmt.Errorf("failed to create request of %s/%s at %s: %w", account, repo, url, err) } - if githubAuthToken != "" { - req.Header.Add("Authorization", "Bearer "+githubAuthToken) + if config.AuthToken != "" { + req.Header.Add("Authorization", "Bearer "+config.AuthToken) } - resp, err := http.DefaultClient.Do(req) + client := http.DefaultClient + if req.Host == config.Prefix && strings.ToLower(os.Getenv("GH_ENTERPRISE_SKIP_VERIFY")) == "true" { + client = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}} + } + resp, err := client.Do(req) if err != nil { return "", err } else if resp.StatusCode != http.StatusOK { c, _ := io.ReadAll(resp.Body) resp.Body.Close() - commit, fallBackErr := getCommitLsRemote(ctx, account, repo, ref) + commit, fallBackErr := getCommitLsRemote(ctx, account, repo, ref, config) if fallBackErr == nil { return commit, nil } @@ -88,8 +100,28 @@ func getCommit(ctx context.Context, account, repo, ref string) (string, error) { return commit.SHA, nil } -func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string, *types.Repo, bool, error) { - if !strings.HasPrefix(urlName, GithubPrefix) { +func LoaderForPrefix(prefix string) func(context.Context, *cache.Client, string) (string, string, *types.Repo, bool, error) { + return func(ctx context.Context, c *cache.Client, urlName string) (string, string, *types.Repo, bool, error) { + return LoadWithConfig(ctx, c, urlName, NewGithubEnterpriseConfig(prefix)) + } +} + +func Load(ctx context.Context, c *cache.Client, urlName string) (string, string, *types.Repo, bool, error) { + return LoadWithConfig(ctx, c, urlName, defaultGithubConfig) +} + +func NewGithubEnterpriseConfig(prefix string) *Config { + return &Config{ + Prefix: prefix, + RepoURL: fmt.Sprintf("https://%s/%%s/%%s.git", prefix), + DownloadURL: fmt.Sprintf("https://raw.%s/%%s/%%s/%%s/%%s", prefix), + CommitURL: fmt.Sprintf("https://%s/api/v3/repos/%%s/%%s/commits/%%s", prefix), + AuthToken: os.Getenv("GH_ENTERPRISE_TOKEN"), + } +} + +func LoadWithConfig(ctx context.Context, _ *cache.Client, urlName string, config *Config) (string, string, *types.Repo, bool, error) { + if !strings.HasPrefix(urlName, config.Prefix) { return "", "", nil, false, nil } @@ -107,12 +139,12 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string, account, repo := parts[1], parts[2] path := strings.Join(parts[3:], "/") - ref, err := getCommit(ctx, account, repo, ref) + ref, err := getCommit(ctx, account, repo, ref, config) if err != nil { return "", "", nil, false, err } - downloadURL := fmt.Sprintf(githubDownloadURL, account, repo, ref, path) + downloadURL := fmt.Sprintf(config.DownloadURL, account, repo, ref, path) if path == "" || path == "/" || !strings.Contains(parts[len(parts)-1], ".") { var ( testPath string @@ -124,13 +156,20 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string, } else { testPath = path + "/" + ext } - testURL = fmt.Sprintf(githubDownloadURL, account, repo, ref, testPath) + testURL = fmt.Sprintf(config.DownloadURL, account, repo, ref, testPath) if i == len(types.DefaultFiles)-1 { // no reason to test the last one, we are just going to use it. Being that the default list is only // two elements this loop could have been one check, but hey over-engineered code ftw. break } - if resp, err := http.Head(testURL); err == nil { + headReq, err := http.NewRequest("HEAD", testURL, nil) + if err != nil { + break + } + if config.AuthToken != "" { + headReq.Header.Add("Authorization", "Bearer "+config.AuthToken) + } + if resp, err := http.DefaultClient.Do(headReq); err == nil { _ = resp.Body.Close() if resp.StatusCode == 200 { break @@ -141,9 +180,9 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string, path = testPath } - return downloadURL, githubAuthToken, &types.Repo{ + return downloadURL, config.AuthToken, &types.Repo{ VCS: "git", - Root: fmt.Sprintf(githubRepoURL, account, repo), + Root: fmt.Sprintf(config.RepoURL, account, repo), Path: gpath.Dir(path), Name: gpath.Base(path), Revision: ref, diff --git a/pkg/loader/github/github_test.go b/pkg/loader/github/github_test.go index d627ee5e..483722bc 100644 --- a/pkg/loader/github/github_test.go +++ b/pkg/loader/github/github_test.go @@ -2,6 +2,10 @@ package github import ( "context" + "fmt" + "net/http" + "net/http/httptest" + "os" "testing" "github.com/gptscript-ai/gptscript/pkg/types" @@ -44,3 +48,56 @@ func TestLoad(t *testing.T) { Revision: "172dfb00b48c6adbbaa7e99270933f95887d1b91", }).Equal(t, repo) } + +func TestLoad_GithubEnterprise(t *testing.T) { + gheToken := "mytoken" + os.Setenv("GH_ENTERPRISE_SKIP_VERIFY", "true") + os.Setenv("GH_ENTERPRISE_TOKEN", gheToken) + s := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v3/repos/gptscript-ai/gptscript/commits/172dfb0": + _, _ = w.Write([]byte(`{"sha": "172dfb00b48c6adbbaa7e99270933f95887d1b91"}`)) + default: + w.WriteHeader(404) + } + })) + defer s.Close() + + serverAddr := s.Listener.Addr().String() + + url, token, repo, ok, err := LoadWithConfig(context.Background(), nil, fmt.Sprintf("%s/gptscript-ai/gptscript/pkg/loader/testdata/tool@172dfb0", serverAddr), NewGithubEnterpriseConfig(serverAddr)) + require.NoError(t, err) + assert.True(t, ok) + autogold.Expect(fmt.Sprintf("https://raw.%s/gptscript-ai/gptscript/172dfb00b48c6adbbaa7e99270933f95887d1b91/pkg/loader/testdata/tool/tool.gpt", serverAddr)).Equal(t, url) + autogold.Expect(&types.Repo{ + VCS: "git", Root: fmt.Sprintf("https://%s/gptscript-ai/gptscript.git", serverAddr), + Path: "pkg/loader/testdata/tool", + Name: "tool.gpt", + Revision: "172dfb00b48c6adbbaa7e99270933f95887d1b91", + }).Equal(t, repo) + autogold.Expect(gheToken).Equal(t, token) + + url, token, repo, ok, err = Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/agent@172dfb0") + require.NoError(t, err) + assert.True(t, ok) + autogold.Expect("https://raw.githubusercontent.com/gptscript-ai/gptscript/172dfb00b48c6adbbaa7e99270933f95887d1b91/pkg/loader/testdata/agent/agent.gpt").Equal(t, url) + autogold.Expect(&types.Repo{ + VCS: "git", Root: "https://github.com/gptscript-ai/gptscript.git", + Path: "pkg/loader/testdata/agent", + Name: "agent.gpt", + Revision: "172dfb00b48c6adbbaa7e99270933f95887d1b91", + }).Equal(t, repo) + autogold.Expect("").Equal(t, token) + + url, token, repo, ok, err = Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/bothtoolagent@172dfb0") + require.NoError(t, err) + assert.True(t, ok) + autogold.Expect("https://raw.githubusercontent.com/gptscript-ai/gptscript/172dfb00b48c6adbbaa7e99270933f95887d1b91/pkg/loader/testdata/bothtoolagent/agent.gpt").Equal(t, url) + autogold.Expect(&types.Repo{ + VCS: "git", Root: "https://github.com/gptscript-ai/gptscript.git", + Path: "pkg/loader/testdata/bothtoolagent", + Name: "agent.gpt", + Revision: "172dfb00b48c6adbbaa7e99270933f95887d1b91", + }).Equal(t, repo) + autogold.Expect("").Equal(t, token) +} From b8071a888f1bd376da00ffc911db5ec5fce89f2d Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 9 Aug 2024 21:45:38 -0400 Subject: [PATCH 087/270] fix: use the default model provider when listing models Signed-off-by: Donnie Adams --- pkg/cli/gptscript.go | 3 +++ pkg/sdkserver/routes.go | 4 ++++ pkg/server/server.go | 3 ++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4926c6fd..2d7e90d9 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -406,6 +406,9 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { defer gptScript.Close(true) if r.ListModels { + if r.DefaultModelProvider != "" { + args = append(args, r.DefaultModelProvider) + } return r.listModels(ctx, gptScript, args) } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index c0d7a41b..98957624 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -127,6 +127,10 @@ func (s *server) listModels(w http.ResponseWriter, r *http.Request) { providers = reqObject.Providers } + if s.gptscriptOpts.DefaultModelProvider != "" { + providers = append(providers, s.gptscriptOpts.DefaultModelProvider) + } + out, err := s.client.ListModels(r.Context(), providers...) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to list models: %w", err)) diff --git a/pkg/server/server.go b/pkg/server/server.go index d1c57100..00734c38 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -24,5 +24,6 @@ func ContextWithNewRunID(ctx context.Context) context.Context { } func RunIDFromContext(ctx context.Context) string { - return ctx.Value(execKey{}).(string) + runID, _ := ctx.Value(execKey{}).(string) + return runID } From a383d4f513400b5af0ce69217373dafe72ef058a Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 9 Aug 2024 12:09:21 -0700 Subject: [PATCH 088/270] bug: fix more path issues on windows Stop evaluating env vars locally, but instead use the shell/cmd.exe. --- pkg/engine/cmd.go | 145 ++++++++++------------------------------- pkg/engine/cmd_test.go | 135 -------------------------------------- pkg/engine/daemon.go | 1 + 3 files changed, 37 insertions(+), 244 deletions(-) delete mode 100644 pkg/engine/cmd_test.go diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 311c743a..14b41183 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -11,7 +11,6 @@ import ( "os" "os/exec" "path" - "path/filepath" "runtime" "sort" "strings" @@ -121,7 +120,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate var extraEnv = []string{ strings.TrimSpace("GPTSCRIPT_CONTEXT=" + strings.Join(instructions, "\n")), } - cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input) + cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input, true) if err != nil { if toolCategory == NoCategory { return fmt.Sprintf("ERROR: got (%v) while parsing command", err), nil @@ -244,7 +243,11 @@ func appendInputAsEnv(env []string, input string) []string { return env } -func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.Tool, input string) (*exec.Cmd, func(), error) { +func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.Tool, input string, useShell bool) (*exec.Cmd, func(), error) { + if runtime.GOOS == "windows" { + useShell = false + } + envvars := append(e.Env[:], extraEnv...) envvars = appendInputAsEnv(envvars, input) if log.IsDebug() { @@ -254,9 +257,17 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T interpreter, rest, _ := strings.Cut(tool.Instructions, "\n") interpreter = strings.TrimSpace(interpreter)[2:] - args, err := shlex.Split(interpreter) - if err != nil { - return nil, nil, err + var ( + args []string + err error + ) + if useShell { + args = strings.Fields(interpreter) + } else { + args, err = shlex.Split(interpreter) + if err != nil { + return nil, nil, err + } } envvars, err = e.getRuntimeEnv(ctx, tool, args, envvars) @@ -265,17 +276,6 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T } envvars, envMap := envAsMapAndDeDup(envvars) - for i, arg := range args { - args[i] = os.Expand(arg, func(s string) string { - return envMap[s] - }) - } - - // After we determined the interpreter we again interpret the args by env vars - args, err = replaceVariablesForInterpreter(interpreter, envMap) - if err != nil { - return nil, nil, err - } if runtime.GOOS == "windows" && (args[0] == "/bin/bash" || args[0] == "/bin/sh") { args[0] = path.Base(args[0]) @@ -286,8 +286,7 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T } var ( - cmdArgs = args[1:] - stop = func() {} + stop = func() {} ) if strings.TrimSpace(rest) != "" { @@ -305,105 +304,33 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T stop() return nil, nil, err } - cmdArgs = append(cmdArgs, f.Name()) - } - - // This is a workaround for Windows, where the command interpreter is constructed with unix style paths - // It converts unix style paths to windows style paths - if runtime.GOOS == "windows" { - parts := strings.Split(args[0], "/") - if parts[len(parts)-1] == "gptscript-go-tool" { - parts[len(parts)-1] = "gptscript-go-tool.exe" - } - - args[0] = filepath.Join(parts...) + args = append(args, f.Name()) } - cmd := exec.CommandContext(ctx, env.Lookup(envvars, args[0]), cmdArgs...) - cmd.Env = compressEnv(envvars) - return cmd, stop, nil -} - -func replaceVariablesForInterpreter(interpreter string, envMap map[string]string) ([]string, error) { - var parts []string - for i, part := range splitByQuotes(interpreter) { - if i%2 == 0 { - part = os.Expand(part, func(s string) string { + // Expand and/or normalize env references + for i, arg := range args { + args[i] = os.Expand(arg, func(s string) string { + if strings.HasPrefix(s, "!") { + return envMap[s[1:]] + } + if !useShell { return envMap[s] - }) - // We protect newly resolved env vars from getting replaced when we do the second Expand - // after shlex. Yeah, crazy. I'm guessing this isn't secure, but just trying to avoid a foot gun. - part = os.Expand(part, func(s string) string { - return "${__" + s + "}" - }) - } - parts = append(parts, part) - } - - parts, err := shlex.Split(strings.Join(parts, "")) - if err != nil { - return nil, err - } - - for i, part := range parts { - parts[i] = os.Expand(part, func(s string) string { - if strings.HasPrefix(s, "__") { - return "${" + s[2:] + "}" } - return envMap[s] + return "${" + s + "}" }) } - return parts, nil -} - -// splitByQuotes will split a string by parsing matching double quotes (with \ as the escape character). -// The return value conforms to the following properties -// 1. s == strings.Join(result, "") -// 2. Even indexes are strings that were not in quotes. -// 3. Odd indexes are strings that were quoted. -// -// Example: s = `In a "quoted string" quotes can be escaped with \"` -// -// result = [`In a `, `"quoted string"`, ` quotes can be escaped with \"`] -func splitByQuotes(s string) (result []string) { - var ( - buf strings.Builder - inEscape, inQuote bool - ) - - for _, c := range s { - if inEscape { - buf.WriteRune(c) - inEscape = false - continue - } - - switch c { - case '"': - if inQuote { - buf.WriteRune(c) - } - result = append(result, buf.String()) - buf.Reset() - if !inQuote { - buf.WriteRune(c) - } - inQuote = !inQuote - case '\\': - inEscape = true - buf.WriteRune(c) - default: - buf.WriteRune(c) - } + if runtime.GOOS == "windows" { + args[0] = strings.ReplaceAll(args[0], "/", "\\") } - if buf.Len() > 0 { - if inQuote { - result = append(result, "") - } - result = append(result, buf.String()) + if useShell { + args = append([]string{"/bin/sh", "-c"}, strings.Join(args, " ")) + } else { + args[0] = env.Lookup(envvars, args[0]) } - return + cmd := exec.CommandContext(ctx, args[0], args[1:]...) + cmd.Env = compressEnv(envvars) + return cmd, stop, nil } diff --git a/pkg/engine/cmd_test.go b/pkg/engine/cmd_test.go deleted file mode 100644 index 15f72036..00000000 --- a/pkg/engine/cmd_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// File: cmd_test.go -package engine - -import "testing" - -func TestSplitByQuotes(t *testing.T) { - tests := []struct { - name string - input string - expected []string - }{ - { - name: "NoQuotes", - input: "Hello World", - expected: []string{"Hello World"}, - }, - { - name: "ValidQuote", - input: `"Hello" "World"`, - expected: []string{``, `"Hello"`, ` `, `"World"`}, - }, - { - name: "ValidQuoteWithEscape", - input: `"Hello\" World"`, - expected: []string{``, `"Hello\" World"`}, - }, - { - name: "Nothing", - input: "", - expected: []string{}, - }, - { - name: "SpaceInsideQuote", - input: `"Hello World"`, - expected: []string{``, `"Hello World"`}, - }, - { - name: "SingleChar", - input: "H", - expected: []string{"H"}, - }, - { - name: "SingleQuote", - input: `"Hello`, - expected: []string{``, ``, `"Hello`}, - }, - { - name: "ThreeQuotes", - input: `Test "Hello "World" End\"`, - expected: []string{`Test `, `"Hello "`, `World`, ``, `" End\"`}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := splitByQuotes(tt.input) - if !equal(got, tt.expected) { - t.Errorf("splitByQuotes() = %v, want %v", got, tt.expected) - } - }) - } -} - -// Helper function to assert equality of two string slices. -func equal(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -// Testing for replaceVariablesForInterpreter -func TestReplaceVariablesForInterpreter(t *testing.T) { - tests := []struct { - name string - interpreter string - envMap map[string]string - expected []string - shouldFail bool - }{ - { - name: "No quotes", - interpreter: "/bin/bash -c ${COMMAND} tail", - envMap: map[string]string{"COMMAND": "echo Hello!"}, - expected: []string{"/bin/bash", "-c", "echo", "Hello!", "tail"}, - }, - { - name: "Quotes Variables", - interpreter: `/bin/bash -c "${COMMAND}" tail`, - envMap: map[string]string{"COMMAND": "Hello, World!"}, - expected: []string{"/bin/bash", "-c", "Hello, World!", "tail"}, - }, - { - name: "Double escape", - interpreter: `/bin/bash -c "${COMMAND}" ${TWO} tail`, - envMap: map[string]string{ - "COMMAND": "Hello, World!", - "TWO": "${COMMAND}", - }, - expected: []string{"/bin/bash", "-c", "Hello, World!", "${COMMAND}", "tail"}, - }, - { - name: "aws cli issue", - interpreter: "aws ${ARGS}", - envMap: map[string]string{ - "ARGS": `ec2 describe-instances --region us-east-1 --query 'Reservations[*].Instances[*].{Instance:InstanceId,State:State.Name}'`, - }, - expected: []string{ - `aws`, - `ec2`, - `describe-instances`, - `--region`, `us-east-1`, - `--query`, `Reservations[*].Instances[*].{Instance:InstanceId,State:State.Name}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := replaceVariablesForInterpreter(tt.interpreter, tt.envMap) - if (err != nil) != tt.shouldFail { - t.Errorf("replaceVariablesForInterpreter() error = %v, want %v", err, tt.shouldFail) - return - } - if !equal(got, tt.expected) { - t.Errorf("replaceVariablesForInterpreter() = %v, want %v", got, tt.expected) - } - }) - } -} diff --git a/pkg/engine/daemon.go b/pkg/engine/daemon.go index 4cdab995..113aa1ba 100644 --- a/pkg/engine/daemon.go +++ b/pkg/engine/daemon.go @@ -133,6 +133,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { }, tool, "{}", + false, ) if err != nil { return url, err From cb46358ec8b92cab677856f4592bbe2010563ab8 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 9 Aug 2024 11:44:39 -0700 Subject: [PATCH 089/270] feat: manage node/python runtime for local files in dev --- pkg/repos/get.go | 18 ++- pkg/repos/runtimes/busybox/busybox.go | 4 + pkg/repos/runtimes/golang/golang.go | 4 + pkg/repos/runtimes/node/node.go | 26 +++- pkg/repos/runtimes/python/python.go | 38 +++-- pkg/tests/runner_test.go | 23 +++ .../TestRuntimesLocalDev/call1-resp.golden | 16 +++ .../TestRuntimesLocalDev/call1.golden | 37 +++++ .../TestRuntimesLocalDev/call2-resp.golden | 16 +++ .../TestRuntimesLocalDev/call2.golden | 70 +++++++++ .../TestRuntimesLocalDev/call3-resp.golden | 16 +++ .../TestRuntimesLocalDev/call3.golden | 103 +++++++++++++ .../TestRuntimesLocalDev/call4-resp.golden | 9 ++ .../TestRuntimesLocalDev/call4.golden | 136 ++++++++++++++++++ .../TestRuntimesLocalDev/package.json | 15 ++ .../TestRuntimesLocalDev/requirements.txt | 1 + .../testdata/TestRuntimesLocalDev/test.gpt | 34 +++++ 17 files changed, 552 insertions(+), 14 deletions(-) create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call1-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call1.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call2-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call2.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call3-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call3.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call4-resp.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/call4.golden create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/package.json create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/requirements.txt create mode 100644 pkg/tests/testdata/TestRuntimesLocalDev/test.gpt diff --git a/pkg/repos/get.go b/pkg/repos/get.go index fc675c58..b43bc63b 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -28,6 +28,7 @@ type Runtime interface { ID() string Supports(tool types.Tool, cmd []string) bool Setup(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) ([]string, error) + GetHash(tool types.Tool) (string, error) } type noopRuntime struct { @@ -37,6 +38,10 @@ func (n noopRuntime) ID() string { return "none" } +func (n noopRuntime) GetHash(_ types.Tool) (string, error) { + return "", nil +} + func (n noopRuntime) Supports(_ types.Tool, _ []string) bool { return false } @@ -183,8 +188,13 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e locker.Lock(tool.ID) defer locker.Unlock(tool.ID) + runtimeHash, err := runtime.GetHash(tool) + if err != nil { + return "", nil, err + } + target := filepath.Join(m.storageDir, tool.Source.Repo.Revision, tool.Source.Repo.Path, tool.Source.Repo.Name, runtime.ID()) - targetFinal := filepath.Join(target, tool.Source.Repo.Path) + targetFinal := filepath.Join(target, tool.Source.Repo.Path+runtimeHash) doneFile := targetFinal + ".done" envData, err := os.ReadFile(doneFile) if err == nil { @@ -251,7 +261,11 @@ func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []st for _, runtime := range m.runtimes { if runtime.Supports(tool, cmd) { log.Debugf("Runtime %s supports %v", runtime.ID(), cmd) - return m.setup(ctx, runtime, tool, env) + wd, env, err := m.setup(ctx, runtime, tool, env) + if isLocal { + wd = tool.WorkingDir + } + return wd, env, err } } diff --git a/pkg/repos/runtimes/busybox/busybox.go b/pkg/repos/runtimes/busybox/busybox.go index 542ba94a..481ed1fe 100644 --- a/pkg/repos/runtimes/busybox/busybox.go +++ b/pkg/repos/runtimes/busybox/busybox.go @@ -33,6 +33,10 @@ func (r *Runtime) ID() string { return "busybox" } +func (r *Runtime) GetHash(_ types.Tool) (string, error) { + return "", nil +} + func (r *Runtime) Supports(_ types.Tool, cmd []string) bool { if runtime.GOOS != "windows" { return false diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index b19cfe90..882e8a0b 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -35,6 +35,10 @@ func (r *Runtime) ID() string { return "go" + r.Version } +func (r *Runtime) GetHash(_ types.Tool) (string, error) { + return "", nil +} + func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { return tool.Source.IsGit() && len(cmd) > 0 && cmd[0] == "${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool" diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index fde5103d..d0a9d8cb 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -39,10 +39,7 @@ func (r *Runtime) ID() string { return "node" + r.Version } -func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { - if _, hasPackageJSON := tool.MetaData[packageJSON]; !hasPackageJSON && !tool.Source.IsGit() { - return false - } +func (r *Runtime) Supports(_ types.Tool, cmd []string) bool { for _, testCmd := range []string{"node", "npx", "npm"} { if r.supports(testCmd, cmd) { return true @@ -61,6 +58,15 @@ func (r *Runtime) supports(testCmd string, cmd []string) bool { return runtimeEnv.Matches(cmd, testCmd) } +func (r *Runtime) GetHash(tool types.Tool) (string, error) { + if !tool.Source.IsGit() && tool.WorkingDir != "" { + if s, err := os.Stat(filepath.Join(tool.WorkingDir, packageJSON)); err == nil { + return hash.Digest(tool.WorkingDir + s.ModTime().String())[:7], nil + } + } + return "", nil +} + func (r *Runtime) Setup(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { @@ -74,6 +80,8 @@ func (r *Runtime) Setup(ctx context.Context, tool types.Tool, dataRoot, toolSour if _, ok := tool.MetaData[packageJSON]; ok { newEnv = append(newEnv, "GPTSCRIPT_TMPDIR="+toolSource) + } else if !tool.Source.IsGit() && tool.WorkingDir != "" { + newEnv = append(newEnv, "GPTSCRIPT_TMPDIR="+tool.WorkingDir, "GPTSCRIPT_RUNTIME_DEV=true") } return newEnv, nil @@ -120,6 +128,16 @@ func (r *Runtime) runNPM(ctx context.Context, tool types.Tool, toolSource, binDi if err := os.WriteFile(filepath.Join(toolSource, packageJSON), []byte(contents+"\n"), 0644); err != nil { return err } + } else if !tool.Source.IsGit() { + if tool.WorkingDir == "" { + return nil + } + if _, err := os.Stat(filepath.Join(tool.WorkingDir, packageJSON)); errors.Is(fs.ErrNotExist, err) { + return nil + } else if err != nil { + return err + } + cmd.Dir = tool.WorkingDir } return cmd.Run() } diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index ae24f92a..87b072e5 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -24,8 +24,9 @@ import ( var releasesData []byte const ( - uvVersion = "uv==0.2.33" - requirementsTxt = "requirements.txt" + uvVersion = "uv==0.2.33" + requirementsTxt = "requirements.txt" + gptscriptRequirementsTxt = "requirements-gptscript.txt" ) type Release struct { @@ -47,10 +48,7 @@ func (r *Runtime) ID() string { return "python" + r.Version } -func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { - if _, hasRequirements := tool.MetaData[requirementsTxt]; !hasRequirements && !tool.Source.IsGit() { - return false - } +func (r *Runtime) Supports(_ types.Tool, cmd []string) bool { if runtimeEnv.Matches(cmd, r.ID()) { return true } @@ -177,6 +175,22 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { return "", "", fmt.Errorf("failed to find an python runtime for %s", r.Version) } +func (r *Runtime) GetHash(tool types.Tool) (string, error) { + if !tool.Source.IsGit() && tool.WorkingDir != "" { + if _, ok := tool.MetaData[requirementsTxt]; ok { + return "", nil + } + for _, req := range []string{gptscriptRequirementsTxt, requirementsTxt} { + reqFile := filepath.Join(tool.WorkingDir, req) + if s, err := os.Stat(reqFile); err == nil && !s.IsDir() { + return hash.Digest(tool.WorkingDir + s.ModTime().String())[:7], nil + } + } + } + + return "", nil +} + func (r *Runtime) runPip(ctx context.Context, tool types.Tool, toolSource, binDir string, env []string) error { log.InfofCtx(ctx, "Running pip in %s", toolSource) if content, ok := tool.MetaData[requirementsTxt]; ok { @@ -189,8 +203,16 @@ func (r *Runtime) runPip(ctx context.Context, tool types.Tool, toolSource, binDi return cmd.Run() } - for _, req := range []string{"requirements-gptscript.txt", requirementsTxt} { - reqFile := filepath.Join(toolSource, req) + reqPath := toolSource + if !tool.Source.IsGit() { + if tool.WorkingDir == "" { + return nil + } + reqPath = tool.WorkingDir + } + + for _, req := range []string{gptscriptRequirementsTxt, requirementsTxt} { + reqFile := filepath.Join(reqPath, req) if s, err := os.Stat(reqFile); err == nil && !s.IsDir() { cmd := debugcmd.New(ctx, uvBin(binDir), "pip", "install", "-r", reqFile) cmd.Env = env diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 424c84c1..141e6aff 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -1018,3 +1018,26 @@ func TestRuntimes(t *testing.T) { }) r.RunDefault() } + +func TestRuntimesLocalDev(t *testing.T) { + r := tester.NewRunner(t) + r.RespondWith(tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "py", + Arguments: "{}", + }, + }, tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "node", + Arguments: "{}", + }, + }, tester.Result{ + Func: types.CompletionFunctionCall{ + Name: "bash", + Arguments: "{}", + }, + }) + r.RunDefault() + _ = os.RemoveAll("testdata/TestRuntimesLocalDev/node_modules") + _ = os.RemoveAll("testdata/TestRuntimesLocalDev/package-lock.json") +} diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call1-resp.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call1-resp.golden new file mode 100644 index 00000000..1d53670a --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call1-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call1.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call1.golden new file mode 100644 index 00000000..7e775029 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call1.golden @@ -0,0 +1,37 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call2-resp.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call2-resp.golden new file mode 100644 index 00000000..4806793c --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call2-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call2.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call2.golden new file mode 100644 index 00000000..cc1fd1b7 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call2.golden @@ -0,0 +1,70 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "py worked\r\n" + } + ], + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call3-resp.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call3-resp.golden new file mode 100644 index 00000000..1103f824 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call3-resp.golden @@ -0,0 +1,16 @@ +`{ + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 2, + "id": "call_3", + "function": { + "name": "bash", + "arguments": "{}" + } + } + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call3.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call3.golden new file mode 100644 index 00000000..7c928c07 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call3.golden @@ -0,0 +1,103 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "py worked\r\n" + } + ], + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + }, + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "node worked\n" + } + ], + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call4-resp.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call4-resp.golden new file mode 100644 index 00000000..8135a8c9 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call4-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 4" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/call4.golden b/pkg/tests/testdata/TestRuntimesLocalDev/call4.golden new file mode 100644 index 00000000..b95b880d --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/call4.golden @@ -0,0 +1,136 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:py", + "name": "py", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:node", + "name": "node", + "parameters": null + } + }, + { + "function": { + "toolID": "testdata/TestRuntimesLocalDev/test.gpt:bash", + "name": "bash", + "parameters": null + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Dummy" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "py worked\r\n" + } + ], + "toolCall": { + "index": 0, + "id": "call_1", + "function": { + "name": "py", + "arguments": "{}" + } + }, + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "node worked\n" + } + ], + "toolCall": { + "index": 1, + "id": "call_2", + "function": { + "name": "node", + "arguments": "{}" + } + }, + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 2, + "id": "call_3", + "function": { + "name": "bash", + "arguments": "{}" + } + } + } + ], + "usage": {} + }, + { + "role": "tool", + "content": [ + { + "text": "bash works\n" + } + ], + "toolCall": { + "index": 2, + "id": "call_3", + "function": { + "name": "bash", + "arguments": "{}" + } + }, + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/package.json b/pkg/tests/testdata/TestRuntimesLocalDev/package.json new file mode 100644 index 00000000..d5f400a1 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/package.json @@ -0,0 +1,15 @@ +{ + "name": "chalk-example", + "version": "1.0.0", + "type": "module", + "description": "A simple example project to demonstrate the use of chalk", + "main": "example.js", + "scripts": { + "start": "node example.js" + }, + "author": "Your Name", + "license": "MIT", + "dependencies": { + "chalk": "^5.0.0" + } +} diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/requirements.txt b/pkg/tests/testdata/TestRuntimesLocalDev/requirements.txt new file mode 100644 index 00000000..f2293605 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/requirements.txt @@ -0,0 +1 @@ +requests diff --git a/pkg/tests/testdata/TestRuntimesLocalDev/test.gpt b/pkg/tests/testdata/TestRuntimesLocalDev/test.gpt new file mode 100644 index 00000000..454ffce0 --- /dev/null +++ b/pkg/tests/testdata/TestRuntimesLocalDev/test.gpt @@ -0,0 +1,34 @@ +name: first +tools: py, node, bash + +Dummy + +--- +name: py + +#!/usr/bin/env python3 + +import requests +import platform + +# this is dumb hack to get the line endings to always be \r\n so the golden files match +# on both linux and windows +if platform.system() == 'Windows': + print('py worked') +else: + print('py worked\r') + +--- +name: node + +#!/usr/bin/env node + +import chalk from 'chalk'; +console.log("node worked") + +--- +name: bash + +#!/bin/bash + +echo bash works \ No newline at end of file From ca8ab3aedb8ed535c9512f23acd01fbaaa494c75 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sat, 10 Aug 2024 22:47:30 -0700 Subject: [PATCH 090/270] feat: enable github release binary downloads for go tools --- pkg/repos/get.go | 34 +++-- pkg/repos/runtimes/busybox/busybox.go | 4 + pkg/repos/runtimes/golang/golang.go | 181 ++++++++++++++++++++++++++ pkg/repos/runtimes/node/node.go | 4 + pkg/repos/runtimes/python/python.go | 4 + 5 files changed, 217 insertions(+), 10 deletions(-) diff --git a/pkg/repos/get.go b/pkg/repos/get.go index b43bc63b..416f4c61 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -27,6 +27,7 @@ const credentialHelpersRepo = "github.com/gptscript-ai/gptscript-credential-help type Runtime interface { ID() string Supports(tool types.Tool, cmd []string) bool + Binary(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) (bool, []string, error) Setup(ctx context.Context, tool types.Tool, dataRoot, toolSource string, env []string) ([]string, error) GetHash(tool types.Tool) (string, error) } @@ -46,6 +47,10 @@ func (n noopRuntime) Supports(_ types.Tool, _ []string) bool { return false } +func (n noopRuntime) Binary(_ context.Context, _ types.Tool, _, _ string, _ []string) (bool, []string, error) { + return false, nil, nil +} + func (n noopRuntime) Setup(_ context.Context, _ types.Tool, _, _ string, _ []string) ([]string, error) { return nil, nil } @@ -211,21 +216,30 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e _ = os.RemoveAll(doneFile) _ = os.RemoveAll(target) - if tool.Source.Repo.VCS == "git" { - if err := git.Checkout(ctx, m.gitDir, tool.Source.Repo.Root, tool.Source.Repo.Revision, target); err != nil { - return "", nil, err + var ( + newEnv []string + isBinary bool + ) + + if isBinary, newEnv, err = runtime.Binary(ctx, tool, m.runtimeDir, targetFinal, env); err != nil { + return "", nil, err + } else if !isBinary { + if tool.Source.Repo.VCS == "git" { + if err := git.Checkout(ctx, m.gitDir, tool.Source.Repo.Root, tool.Source.Repo.Revision, target); err != nil { + return "", nil, err + } + } else { + if err := os.MkdirAll(target, 0755); err != nil { + return "", nil, err + } } - } else { - if err := os.MkdirAll(target, 0755); err != nil { + + newEnv, err = runtime.Setup(ctx, tool, m.runtimeDir, targetFinal, env) + if err != nil { return "", nil, err } } - newEnv, err := runtime.Setup(ctx, tool, m.runtimeDir, targetFinal, env) - if err != nil { - return "", nil, err - } - out, err := os.Create(doneFile + ".tmp") if err != nil { return "", nil, err diff --git a/pkg/repos/runtimes/busybox/busybox.go b/pkg/repos/runtimes/busybox/busybox.go index 481ed1fe..e4604b06 100644 --- a/pkg/repos/runtimes/busybox/busybox.go +++ b/pkg/repos/runtimes/busybox/busybox.go @@ -49,6 +49,10 @@ func (r *Runtime) Supports(_ types.Tool, cmd []string) bool { return false } +func (r *Runtime) Binary(_ context.Context, _ types.Tool, _, _ string, _ []string) (bool, []string, error) { + return false, nil, nil +} + func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, _ string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 882e8a0b..9e472e90 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -4,10 +4,14 @@ import ( "bufio" "bytes" "context" + "crypto/sha256" _ "embed" + "encoding/hex" "errors" "fmt" + "io" "io/fs" + "net/http" "os" "path/filepath" "runtime" @@ -44,6 +48,183 @@ func (r *Runtime) Supports(tool types.Tool, cmd []string) bool { len(cmd) > 0 && cmd[0] == "${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool" } +type release struct { + account, repo, label string +} + +func (r release) checksumTxt() string { + return fmt.Sprintf( + "https://github.com/%s/%s/releases/download/%s/checksums.txt", + r.account, + r.repo, + r.label) +} + +func (r release) binURL() string { + return fmt.Sprintf( + "https://github.com/%s/%s/releases/download/%s/%s", + r.account, + r.repo, + r.label, + r.srcBinName()) +} + +func (r release) targetBinName() string { + suffix := "" + if runtime.GOOS == "windows" { + suffix = ".exe" + } + + return "gptscript-go-tool" + suffix +} + +func (r release) srcBinName() string { + suffix := "" + if runtime.GOOS == "windows" { + suffix = ".exe" + } + + return r.repo + "-" + + runtime.GOOS + "-" + + runtime.GOARCH + suffix +} + +func getLatestRelease(tool types.Tool) (*release, bool) { + if tool.Source.Repo == nil || !strings.HasPrefix(tool.Source.Repo.Root, "https://github.com/") { + return nil, false + } + + parts := strings.Split(strings.TrimPrefix(strings.TrimSuffix(tool.Source.Repo.Root, ".git"), "https://"), "/") + if len(parts) != 3 { + return nil, false + } + + client := http.Client{ + CheckRedirect: func(_ *http.Request, _ []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + resp, err := client.Get(fmt.Sprintf("https://github.com/%s/%s/releases/latest", parts[1], parts[2])) + if err != nil || resp.StatusCode != http.StatusFound { + // ignore error + return nil, false + } + defer resp.Body.Close() + + target := resp.Header.Get("Location") + if target == "" { + return nil, false + } + + account, repo := parts[1], parts[2] + parts = strings.Split(target, "/") + label := parts[len(parts)-1] + + return &release{ + account: account, + repo: repo, + label: label, + }, true +} + +func get(ctx context.Context, url string) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } else if resp.StatusCode != http.StatusOK { + _ = resp.Body.Close() + return nil, fmt.Errorf("bad HTTP status code: %d", resp.StatusCode) + } + + return resp, nil +} + +func downloadBin(ctx context.Context, checksum, src, url, bin string) error { + resp, err := get(ctx, url) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := os.MkdirAll(filepath.Join(src, "bin"), 0755); err != nil { + return err + } + + targetFile, err := os.Create(filepath.Join(src, "bin", bin)) + if err != nil { + return err + } + + digest := sha256.New() + + if _, err := io.Copy(io.MultiWriter(targetFile, digest), resp.Body); err != nil { + return err + } + + if err := targetFile.Close(); err != nil { + return nil + } + + if got := hex.EncodeToString(digest.Sum(nil)); got != checksum { + return fmt.Errorf("checksum mismatch %s != %s", got, checksum) + } + + if err := os.Chmod(targetFile.Name(), 0755); err != nil { + return err + } + + return nil +} + +func getChecksum(ctx context.Context, rel *release) string { + resp, err := get(ctx, rel.checksumTxt()) + if err != nil { + // ignore error + return "" + } + defer resp.Body.Close() + + scan := bufio.NewScanner(resp.Body) + for scan.Scan() { + fields := strings.Fields(scan.Text()) + if len(fields) != 2 || fields[1] != rel.srcBinName() { + continue + } + return fields[0] + } + + return "" +} + +func (r *Runtime) Binary(ctx context.Context, tool types.Tool, _, toolSource string, env []string) (bool, []string, error) { + if !tool.Source.IsGit() { + return false, nil, nil + } + + rel, ok := getLatestRelease(tool) + if !ok { + return false, nil, nil + } + + checksum := getChecksum(ctx, rel) + if checksum == "" { + return false, nil, nil + } + + if err := downloadBin(ctx, checksum, toolSource, rel.binURL(), rel.targetBinName()); err != nil { + // ignore error + return false, nil, nil + } + + return true, env, nil +} + func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, toolSource string, env []string) ([]string, error) { binPath, err := r.getRuntime(ctx, dataRoot) if err != nil { diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index d0a9d8cb..01a752e6 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -39,6 +39,10 @@ func (r *Runtime) ID() string { return "node" + r.Version } +func (r *Runtime) Binary(_ context.Context, _ types.Tool, _, _ string, _ []string) (bool, []string, error) { + return false, nil, nil +} + func (r *Runtime) Supports(_ types.Tool, cmd []string) bool { for _, testCmd := range []string{"node", "npx", "npm"} { if r.supports(testCmd, cmd) { diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index 87b072e5..ee4bf571 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -175,6 +175,10 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { return "", "", fmt.Errorf("failed to find an python runtime for %s", r.Version) } +func (r *Runtime) Binary(_ context.Context, _ types.Tool, _, _ string, _ []string) (bool, []string, error) { + return false, nil, nil +} + func (r *Runtime) GetHash(tool types.Tool) (string, error) { if !tool.Source.IsGit() && tool.WorkingDir != "" { if _, ok := tool.MetaData[requirementsTxt]; ok { From b05ceb7a5d55deb9bf5ca13872e8706e781a28de Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 12 Aug 2024 09:43:50 -0400 Subject: [PATCH 091/270] fix: support share credentials in context tools (#782) Signed-off-by: Grant Linville --- pkg/types/tool.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index bb49e6f1..7e08f604 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -752,6 +752,16 @@ func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]Too result.AddAll(referencedTool.GetToolRefsFromNames(referencedTool.ExportCredentials)) } + contextToolRefs, err := t.getDirectContextToolRefs(prg) + if err != nil { + return nil, err + } + + for _, contextToolRef := range contextToolRefs { + contextTool := prg.ToolSet[contextToolRef.ToolID] + result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportCredentials)) + } + return result.List() } From f356013661be6858d27f0b85338bafbec3ad42f2 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 12 Aug 2024 09:43:45 -0400 Subject: [PATCH 092/270] fix: stop running tool for providers that are runnning A previous change stopped caching clients so that they would be restarted whenever needed. However, the running of the tool produces unwanted output if the provider is already running. This change includes a way to tell whether the provider is running or needs to be restarted. Signed-off-by: Donnie Adams --- pkg/engine/daemon.go | 15 +++++++++++++-- pkg/remote/remote.go | 42 +++++++++++++++++++++++++++++++----------- 2 files changed, 44 insertions(+), 13 deletions(-) diff --git a/pkg/engine/daemon.go b/pkg/engine/daemon.go index 113aa1ba..f0a1c10c 100644 --- a/pkg/engine/daemon.go +++ b/pkg/engine/daemon.go @@ -18,8 +18,9 @@ import ( var ports Ports type Ports struct { - daemonPorts map[string]int64 - daemonLock sync.Mutex + daemonPorts map[string]int64 + daemonsRunning map[string]struct{} + daemonLock sync.Mutex startPort, endPort int64 usedPorts map[int64]struct{} @@ -28,6 +29,13 @@ type Ports struct { daemonWG sync.WaitGroup } +func IsDaemonRunning(url string) bool { + ports.daemonLock.Lock() + defer ports.daemonLock.Unlock() + _, ok := ports.daemonsRunning[url] + return ok +} + func SetPorts(start, end int64) { ports.daemonLock.Lock() defer ports.daemonLock.Unlock() @@ -164,8 +172,10 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { if ports.daemonPorts == nil { ports.daemonPorts = map[string]int64{} + ports.daemonsRunning = map[string]struct{}{} } ports.daemonPorts[tool.ID] = port + ports.daemonsRunning[url] = struct{}{} killedCtx, cancel := context.WithCancelCause(ctx) defer cancel(nil) @@ -185,6 +195,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { defer ports.daemonLock.Unlock() delete(ports.daemonPorts, tool.ID) + delete(ports.daemonsRunning, url) ports.daemonWG.Done() }() diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 89863529..6a21413b 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -22,8 +22,9 @@ import ( ) type Client struct { - modelsLock sync.Mutex + clientsLock sync.Mutex cache *cache.Client + clients map[string]clientInfo modelToProvider map[string]string runner *runner.Runner envs []string @@ -38,13 +39,15 @@ func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credent envs: envs, credStore: credStore, defaultProvider: defaultProvider, + modelToProvider: make(map[string]string), + clients: make(map[string]clientInfo), } } func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { - c.modelsLock.Lock() + c.clientsLock.Lock() provider, ok := c.modelToProvider[messageRequest.Model] - c.modelsLock.Unlock() + c.clientsLock.Unlock() if !ok { return nil, fmt.Errorf("failed to find remote model %s", messageRequest.Model) @@ -105,12 +108,8 @@ func (c *Client) Supports(ctx context.Context, modelString string) (bool, error) return false, err } - c.modelsLock.Lock() - defer c.modelsLock.Unlock() - - if c.modelToProvider == nil { - c.modelToProvider = map[string]string{} - } + c.clientsLock.Lock() + defer c.clientsLock.Unlock() c.modelToProvider[modelString] = providerName return true, nil @@ -145,11 +144,23 @@ func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Clie } func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, error) { + c.clientsLock.Lock() + defer c.clientsLock.Unlock() + + client, ok := c.clients[toolName] + if ok && !isHTTPURL(toolName) && engine.IsDaemonRunning(client.url) { + return client.client, nil + } + if isHTTPURL(toolName) { remoteClient, err := c.clientFromURL(ctx, toolName) if err != nil { return nil, err } + c.clients[toolName] = clientInfo{ + client: remoteClient, + url: toolName, + } return remoteClient, nil } @@ -165,7 +176,7 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err return nil, err } - client, err := openai.NewClient(ctx, c.credStore, openai.Options{ + oClient, err := openai.NewClient(ctx, c.credStore, openai.Options{ BaseURL: strings.TrimSuffix(url, "/") + "/v1", Cache: c.cache, CacheKey: prg.EntryToolID, @@ -174,7 +185,11 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err return nil, err } - return client, nil + c.clients[toolName] = clientInfo{ + client: oClient, + url: url, + } + return client.client, nil } func (c *Client) retrieveAPIKey(ctx context.Context, env, url string) (string, error) { @@ -185,3 +200,8 @@ func isLocalhost(url string) bool { return strings.HasPrefix(url, "http://localhost") || strings.HasPrefix(url, "http://127.0.0.1") || strings.HasPrefix(url, "https://localhost") || strings.HasPrefix(url, "https://127.0.0.1") } + +type clientInfo struct { + client *openai.Client + url string +} From 9696f8d64d06aa4809eccb0a7b018fea63cbd39a Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 12 Aug 2024 13:45:28 -0400 Subject: [PATCH 093/270] fix: share credentials in context tools (#785) Signed-off-by: Grant Linville --- pkg/types/tool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 7e08f604..789215b6 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -752,7 +752,7 @@ func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]Too result.AddAll(referencedTool.GetToolRefsFromNames(referencedTool.ExportCredentials)) } - contextToolRefs, err := t.getDirectContextToolRefs(prg) + contextToolRefs, err := t.GetContextTools(prg) if err != nil { return nil, err } From e1e200226466f7069b8dec0fa45c7782d970b43d Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 12 Aug 2024 11:15:47 -0700 Subject: [PATCH 094/270] chore: support looking up go binary releases by tag --- pkg/repos/runtimes/golang/golang.go | 34 +++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 9e472e90..f82b628a 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -7,6 +7,7 @@ import ( "crypto/sha256" _ "embed" "encoding/hex" + "encoding/json" "errors" "fmt" "io" @@ -89,6 +90,13 @@ func (r release) srcBinName() string { runtime.GOARCH + suffix } +type tag struct { + Name string `json:"name,omitempty"` + Commit struct { + Sha string `json:"sha,omitempty"` + } `json:"commit"` +} + func getLatestRelease(tool types.Tool) (*release, bool) { if tool.Source.Repo == nil || !strings.HasPrefix(tool.Source.Repo.Root, "https://github.com/") { return nil, false @@ -105,7 +113,30 @@ func getLatestRelease(tool types.Tool) (*release, bool) { }, } - resp, err := client.Get(fmt.Sprintf("https://github.com/%s/%s/releases/latest", parts[1], parts[2])) + account, repo := parts[1], parts[2] + + resp, err := client.Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/tags", account, repo)) + if err != nil || resp.StatusCode != http.StatusOK { + // ignore error + return nil, false + } + defer resp.Body.Close() + + var tags []tag + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, false + } + for _, tag := range tags { + if tag.Commit.Sha == tool.Source.Repo.Revision { + return &release{ + account: account, + repo: repo, + label: tag.Name, + }, true + } + } + + resp, err = client.Get(fmt.Sprintf("https://github.com/%s/%s/releases/latest", account, repo)) if err != nil || resp.StatusCode != http.StatusFound { // ignore error return nil, false @@ -117,7 +148,6 @@ func getLatestRelease(tool types.Tool) (*release, bool) { return nil, false } - account, repo := parts[1], parts[2] parts = strings.Split(target, "/") label := parts[len(parts)-1] From bacc628a26a97b1874f2d9e9c73a61d32e113c51 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 12 Aug 2024 12:37:07 -0700 Subject: [PATCH 095/270] chore: fallback to pure go git if git isn't found --- go.mod | 21 ++++++++- go.sum | 72 +++++++++++++++++++++++++++++-- pkg/repos/git/cmd.go | 4 ++ pkg/repos/git/git.go | 8 +++- pkg/repos/git/git_go.go | 91 +++++++++++++++++++++++++++++++++++++++ pkg/repos/git/git_test.go | 2 +- 6 files changed, 190 insertions(+), 8 deletions(-) create mode 100644 pkg/repos/git/git_go.go diff --git a/go.mod b/go.mod index c84dae97..4cf0ba73 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/docker/docker-credential-helpers v0.8.1 github.com/fatih/color v1.17.0 github.com/getkin/kin-openapi v0.124.0 + github.com/go-git/go-git/v5 v5.12.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 @@ -28,7 +29,7 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc @@ -43,6 +44,9 @@ require ( atomicgo.dev/cursor v0.2.0 // indirect atomicgo.dev/keyboard v0.2.9 // indirect atomicgo.dev/schedule v0.1.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/alecthomas/chroma/v2 v2.8.0 // indirect github.com/andybalholm/brotli v1.0.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect @@ -53,14 +57,20 @@ require ( github.com/charmbracelet/glamour v0.7.0 // indirect github.com/charmbracelet/lipgloss v0.11.0 // indirect github.com/charmbracelet/x/ansi v0.1.1 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/connesc/cipherio v0.2.1 // indirect github.com/containerd/console v1.0.4 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.4.0 // indirect github.com/dsnet/compress v0.0.1 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/swag v0.22.8 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect @@ -71,8 +81,10 @@ require ( github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.16.5 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect @@ -91,27 +103,32 @@ require ( github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pterm/pterm v0.12.79 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/ulikunitz/xz v0.5.10 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect + golang.org/x/crypto v0.25.0 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/tools v0.23.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect mvdan.cc/gofumpt v0.6.0 // indirect ) diff --git a/go.sum b/go.sum index 7e2d7b75..1b518130 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,8 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= @@ -38,8 +40,13 @@ github.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/ github.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE= github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4= github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink= @@ -50,6 +57,10 @@ github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= @@ -61,6 +72,7 @@ github.com/bodgit/sevenzip v1.3.0 h1:1ljgELgtHqvgIp8W8kgeEGHIWP4ch3xGI8uOBZgLVKY github.com/bodgit/sevenzip v1.3.0/go.mod h1:omwNcgZTEooWM8gA/IJ2Nk/+ZQ94+GsytRzOJJ8FBlM= github.com/bodgit/windows v1.0.0 h1:rLQ/XjsleZvx4fR1tB/UxQrK+SJ2OFHzfPjLWWOhDIA= github.com/bodgit/windows v1.0.0/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng= github.com/charmbracelet/glamour v0.7.0/go.mod h1:jUMh5MeihljJPQbJ/wf4ldw2+yBP59+ctV36jASy7ps= @@ -78,6 +90,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/connesc/cipherio v0.2.1 h1:FGtpTPMbKNNWByNrr9aEBtaJtXjqOzkIXNYJp6OEycw= github.com/connesc/cipherio v0.2.1/go.mod h1:ukY0MWJDFnJEbXMQtOcn2VmTpRfzcTz4OoVrWGGJZcA= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= @@ -88,6 +103,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -100,6 +117,10 @@ github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRK github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= @@ -112,6 +133,16 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/getkin/kin-openapi v0.124.0 h1:VSFNMB9C9rTKBnQ/fpyDU8ytMTr4dWI9QovSKj9kz/M= github.com/getkin/kin-openapi v0.124.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= @@ -124,6 +155,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -200,12 +233,16 @@ github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 h1:iCHtR9CQyktQ5+f3dMVZfwD2KWJUgm7M0gdL9NGr8KA= github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= @@ -268,10 +305,14 @@ github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 h1:3bMMZ1f+GPXFQ1uNaYbO/uECWvSfqEA+ZEXn1rFAT88= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -304,10 +345,14 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= @@ -317,13 +362,14 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= @@ -337,6 +383,8 @@ github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95 github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -363,7 +411,12 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -413,9 +466,12 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= @@ -448,6 +504,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -456,6 +513,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -468,6 +526,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -480,8 +539,10 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= @@ -492,10 +553,12 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= @@ -575,8 +638,11 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/repos/git/cmd.go b/pkg/repos/git/cmd.go index ad6d7350..3cdcff09 100644 --- a/pkg/repos/git/cmd.go +++ b/pkg/repos/git/cmd.go @@ -17,6 +17,10 @@ func newGitCommand(ctx context.Context, args ...string) *debugcmd.WrappedCmd { } func LsRemote(ctx context.Context, repo, ref string) (string, error) { + if usePureGo() { + return lsRemotePureGo(ctx, repo, ref) + } + cmd := newGitCommand(ctx, "ls-remote", repo, ref) if err := cmd.Run(); err != nil { return "", err diff --git a/pkg/repos/git/git.go b/pkg/repos/git/git.go index 978f3a6d..0c9c22be 100644 --- a/pkg/repos/git/git.go +++ b/pkg/repos/git/git.go @@ -29,7 +29,11 @@ func Checkout(ctx context.Context, base, repo, commit, toDir string) error { return err } - if err := Fetch(ctx, base, repo, commit); err != nil { + if usePureGo() { + return checkoutPureGo(ctx, base, repo, commit, toDir) + } + + if err := fetch(ctx, base, repo, commit); err != nil { return err } @@ -41,7 +45,7 @@ func gitDir(base, repo string) string { return filepath.Join(base, "repos", hash.Digest(repo)) } -func Fetch(ctx context.Context, base, repo, commit string) error { +func fetch(ctx context.Context, base, repo, commit string) error { gitDir := gitDir(base, repo) if found, err := exists(gitDir); err != nil { return err diff --git a/pkg/repos/git/git_go.go b/pkg/repos/git/git_go.go new file mode 100644 index 00000000..aa76c765 --- /dev/null +++ b/pkg/repos/git/git_go.go @@ -0,0 +1,91 @@ +package git + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "sync" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/storage/memory" +) + +var ( + gitCheck sync.Once + externalGit bool +) + +func usePureGo() bool { + if os.Getenv("GPTSCRIPT_PURE_GO_GIT") == "true" { + return true + } + gitCheck.Do(func() { + _, err := exec.LookPath("git") + externalGit = err == nil + }) + return !externalGit +} + +func lsRemotePureGo(_ context.Context, repo, ref string) (string, error) { + // Clone the repository in memory + r := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{ + Name: "origin", + URLs: []string{repo}, + }) + + refs, err := r.List(&git.ListOptions{ + PeelingOption: git.AppendPeeled, + }) + if err != nil { + return "", fmt.Errorf("failed to list remote refs: %w", err) + } + + for _, checkRef := range refs { + if checkRef.Name().Short() == ref { + return checkRef.Hash().String(), nil + } + } + + return "", fmt.Errorf("failed to find remote ref %q", ref) +} + +func checkoutPureGo(ctx context.Context, _, repo, commit, toDir string) error { + log.InfofCtx(ctx, "Checking out %s to %s", commit, toDir) + // Clone the repository + r, err := git.PlainCloneContext(ctx, toDir, false, &git.CloneOptions{ + URL: repo, + NoCheckout: true, + }) + if err != nil { + return fmt.Errorf("failed to clone the repo: %w", err) + } + + // Fetch the specific commit + err = r.Fetch(&git.FetchOptions{ + RefSpecs: []config.RefSpec{ + config.RefSpec(fmt.Sprintf("+%s:%s", commit, commit)), + }, + }) + if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) { + return fmt.Errorf("failed to fetch the commit: %w", err) + } + + // Checkout the specific commit + w, err := r.Worktree() + if err != nil { + return fmt.Errorf("failed to get worktree: %w", err) + } + + err = w.Checkout(&git.CheckoutOptions{ + Hash: plumbing.NewHash(commit), + }) + if err != nil { + return fmt.Errorf("failed to checkout the commit: %w", err) + } + + return nil +} diff --git a/pkg/repos/git/git_test.go b/pkg/repos/git/git_test.go index 5b66d49f..573bf0bb 100644 --- a/pkg/repos/git/git_test.go +++ b/pkg/repos/git/git_test.go @@ -17,7 +17,7 @@ var ( ) func TestFetch(t *testing.T) { - err := Fetch(context.Background(), testCacheHome, + err := fetch(context.Background(), testCacheHome, "https://github.com/gptscript-ai/dalle-image-generation.git", testCommit) require.NoError(t, err) From 4197b39ed68fad8f631b8c124e71dbda0691a47b Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 12 Aug 2024 15:56:11 -0400 Subject: [PATCH 096/270] fix: address panic when listing models with default model provider Apparently, this code path is exercised by listing models and not by using the provider with LLM calls. Signed-off-by: Donnie Adams --- pkg/remote/remote.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 6a21413b..baa54677 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -189,7 +189,7 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err client: oClient, url: url, } - return client.client, nil + return oClient, nil } func (c *Client) retrieveAPIKey(ctx context.Context, env, url string) (string, error) { From 071c5f2284bd3d09607086e1a1866eedae11cb48 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 12 Aug 2024 13:00:29 -0700 Subject: [PATCH 097/270] bug: check for git on mac by using xcode-select --- pkg/repos/git/git_go.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/repos/git/git_go.go b/pkg/repos/git/git_go.go index aa76c765..8f6517a2 100644 --- a/pkg/repos/git/git_go.go +++ b/pkg/repos/git/git_go.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "os/exec" + "runtime" "sync" "github.com/go-git/go-git/v5" @@ -24,8 +25,13 @@ func usePureGo() bool { return true } gitCheck.Do(func() { - _, err := exec.LookPath("git") - externalGit = err == nil + if runtime.GOOS == "darwin" { + if exec.Command("xcode-select", "-p").Run() == nil { + externalGit = true + } + } else if _, err := exec.LookPath("git"); err == nil { + externalGit = true + } }) return !externalGit } From 707d48380f6ed55566828c64f098d3603327370b Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 12 Aug 2024 16:04:29 -0700 Subject: [PATCH 098/270] bug: include shared context from context tools referenced by "tools:" --- pkg/tests/testdata/TestToolRefAll/call1.golden | 2 +- pkg/tests/testdata/TestToolRefAll/test.gpt | 8 ++++++++ pkg/types/tool.go | 12 +++++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/pkg/tests/testdata/TestToolRefAll/call1.golden b/pkg/tests/testdata/TestToolRefAll/call1.golden index 4957014d..ef36e3fb 100644 --- a/pkg/tests/testdata/TestToolRefAll/call1.golden +++ b/pkg/tests/testdata/TestToolRefAll/call1.golden @@ -52,7 +52,7 @@ "role": "system", "content": [ { - "text": "\nContext Body\nMain tool" + "text": "\nShared context\n\nContext Body\nMain tool" } ], "usage": {} diff --git a/pkg/tests/testdata/TestToolRefAll/test.gpt b/pkg/tests/testdata/TestToolRefAll/test.gpt index 93c4ea05..423cf766 100644 --- a/pkg/tests/testdata/TestToolRefAll/test.gpt +++ b/pkg/tests/testdata/TestToolRefAll/test.gpt @@ -11,11 +11,19 @@ Agent body --- name: context type: context +share context: sharedcontext #!sys.echo Context Body +--- +name: sharedcontext + +#!sys.echo + +Shared context + --- name: none param: noneArg: stuff diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 789215b6..b59a1953 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -546,7 +546,17 @@ func (t Tool) getExportedTools(prg Program) ([]ToolReference, error) { func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} result.AddAll(t.getDirectContextToolRefs(prg)) - result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeContext)) + + contextRefs, err := t.getCompletionToolRefs(prg, nil, ToolTypeContext) + if err != nil { + return nil, err + } + + for _, contextRef := range contextRefs { + result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) + result.Add(contextRef) + } + return result.List() } From 89cff8726f180fe0511980ff1ae1ae162b914f64 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 12 Aug 2024 22:13:23 -0700 Subject: [PATCH 099/270] feat: add sys.model.provider.credential --- go.mod | 2 +- go.sum | 4 +- pkg/builtin/builtin.go | 42 +++++++++++--- pkg/credentials/credential.go | 1 + pkg/engine/cmd.go | 2 +- pkg/engine/engine.go | 8 ++- pkg/llm/proxy.go | 104 ++++++++++++++++++++++++++++++++++ pkg/llm/registry.go | 57 ++++++++++++++++++- pkg/openai/client.go | 7 +++ pkg/runner/runner.go | 27 ++++++--- pkg/tests/tester/runner.go | 4 ++ pkg/types/tool.go | 11 +++- pkg/types/toolstring.go | 2 +- 13 files changed, 245 insertions(+), 26 deletions(-) create mode 100644 pkg/llm/proxy.go diff --git a/go.mod b/go.mod index 4cf0ba73..3cfbc98e 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 + github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e diff --git a/go.sum b/go.sum index 1b518130..85a3f76e 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 h1:vYnXoIyCXzaCEw0sYifQ4bDpsv3/fO/dZ2suEsTwCIo= -github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 h1:EQiFTZv+BnOWJX2B9XdF09fL2Zj7h19n1l23TpWCafc= +github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index f6811549..23db5152 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -26,14 +26,15 @@ import ( ) var SafeTools = map[string]struct{}{ - "sys.abort": {}, - "sys.chat.finish": {}, - "sys.chat.history": {}, - "sys.chat.current": {}, - "sys.echo": {}, - "sys.prompt": {}, - "sys.time.now": {}, - "sys.context": {}, + "sys.abort": {}, + "sys.chat.finish": {}, + "sys.chat.history": {}, + "sys.chat.current": {}, + "sys.echo": {}, + "sys.prompt": {}, + "sys.time.now": {}, + "sys.context": {}, + "sys.model.provider.credential": {}, } var tools = map[string]types.Tool{ @@ -248,6 +249,15 @@ var tools = map[string]types.Tool{ BuiltinFunc: SysContext, }, }, + "sys.model.provider.credential": { + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Description: "A credential tool to set the OPENAI_API_KEY and OPENAI_BASE_URL to give access to the default model provider", + Arguments: types.ObjectSchema(), + }, + BuiltinFunc: SysModelProviderCredential, + }, + }, } func ListTools() (result []types.Tool) { @@ -678,6 +688,22 @@ func invalidArgument(input string, err error) string { return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) } +func SysModelProviderCredential(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { + engineContext, _ := engine.FromContext(ctx) + auth, url, err := engineContext.Engine.Model.ProxyInfo() + if err != nil { + return "", err + } + data, err := json.Marshal(map[string]any{ + "env": map[string]string{ + "OPENAI_API_KEY": auth, + "OPENAI_BASE_URL": url, + }, + "ephemeral": true, + }) + return string(data), err +} + func SysContext(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { engineContext, _ := engine.FromContext(ctx) diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index 3d1e2192..f589a065 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -24,6 +24,7 @@ type Credential struct { ToolName string `json:"toolName"` Type CredentialType `json:"type"` Env map[string]string `json:"env"` + Ephemeral bool `json:"ephemeral,omitempty"` ExpiresAt *time.Time `json:"expiresAt"` RefreshToken string `json:"refreshToken"` } diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 14b41183..960bcfe8 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -109,7 +109,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate } }() - return tool.BuiltinFunc(ctx.WrappedContext(), e.Env, input, progress) + return tool.BuiltinFunc(ctx.WrappedContext(e), e.Env, input, progress) } var instructions []string diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index d3daa674..20ca43a9 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -16,6 +16,7 @@ import ( type Model interface { Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) + ProxyInfo() (string, string, error) } type RuntimeManager interface { @@ -79,6 +80,7 @@ type Context struct { Parent *Context LastReturn *Return CurrentReturn *Return + Engine *Engine Program *types.Program // Input is saved only so that we can render display text, don't use otherwise Input string @@ -250,8 +252,10 @@ func FromContext(ctx context.Context) (*Context, bool) { return c, ok } -func (c *Context) WrappedContext() context.Context { - return context.WithValue(c.Ctx, engineContext{}, c) +func (c *Context) WrappedContext(e *Engine) context.Context { + cp := *c + cp.Engine = e + return context.WithValue(c.Ctx, engineContext{}, &cp) } func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { diff --git a/pkg/llm/proxy.go b/pkg/llm/proxy.go new file mode 100644 index 00000000..7c3091b3 --- /dev/null +++ b/pkg/llm/proxy.go @@ -0,0 +1,104 @@ +package llm + +import ( + "bytes" + "encoding/json" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + "path" + "strings" + + "github.com/gptscript-ai/gptscript/pkg/builtin" + "github.com/gptscript-ai/gptscript/pkg/openai" +) + +func (r *Registry) ProxyInfo() (string, string, error) { + r.proxyLock.Lock() + defer r.proxyLock.Unlock() + + if r.proxyURL != "" { + return r.proxyToken, r.proxyURL, nil + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", "", err + } + + go func() { + _ = http.Serve(l, r) + r.proxyLock.Lock() + defer r.proxyLock.Unlock() + _ = l.Close() + r.proxyURL = "" + }() + + r.proxyURL = "http://" + l.Addr().String() + return r.proxyToken, r.proxyURL, nil +} + +func (r *Registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if r.proxyToken != strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer ") { + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + return + } + + inBytes, err := io.ReadAll(req.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + var ( + model string + data = map[string]any{} + ) + + if json.Unmarshal(inBytes, &data) == nil { + model, _ = data["model"].(string) + } + + if model == "" { + model = builtin.GetDefaultModel() + } + + c, err := r.getClient(req.Context(), model) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + oai, ok := c.(*openai.Client) + if !ok { + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + + auth, targetURL := oai.ProxyInfo() + if targetURL == "" { + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + + newURL, err := url.Parse(targetURL) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + newURL.Path = path.Join(newURL.Path, req.URL.Path) + + rp := httputil.ReverseProxy{ + Director: func(proxyReq *http.Request) { + proxyReq.Body = io.NopCloser(bytes.NewReader(inBytes)) + proxyReq.URL = newURL + proxyReq.Header.Del("Authorization") + proxyReq.Header.Add("Authorization", "Bearer "+auth) + proxyReq.Host = newURL.Hostname() + }, + } + rp.ServeHTTP(w, req) +} diff --git a/pkg/llm/registry.go b/pkg/llm/registry.go index c568b43c..8129c788 100644 --- a/pkg/llm/registry.go +++ b/pkg/llm/registry.go @@ -5,7 +5,10 @@ import ( "errors" "fmt" "sort" + "sync" + "github.com/google/uuid" + "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/openai" "github.com/gptscript-ai/gptscript/pkg/remote" "github.com/gptscript-ai/gptscript/pkg/types" @@ -18,11 +21,16 @@ type Client interface { } type Registry struct { - clients []Client + proxyToken string + proxyURL string + proxyLock sync.Mutex + clients []Client } func NewRegistry() *Registry { - return &Registry{} + return &Registry{ + proxyToken: env.VarOrDefault("GPTSCRIPT_INTERNAL_PROXY_TOKEN", uuid.New().String()), + } } func (r *Registry) AddClient(client Client) error { @@ -44,6 +52,10 @@ func (r *Registry) ListModels(ctx context.Context, providers ...string) (result func (r *Registry) fastPath(modelName string) Client { // This is optimization hack to avoid doing List Models + if len(r.clients) == 1 { + return r.clients[0] + } + if len(r.clients) != 2 { return nil } @@ -66,6 +78,47 @@ func (r *Registry) fastPath(modelName string) Client { return r.clients[0] } +func (r *Registry) getClient(ctx context.Context, modelName string) (Client, error) { + if c := r.fastPath(modelName); c != nil { + return c, nil + } + + var errs []error + var oaiClient *openai.Client + for _, client := range r.clients { + ok, err := client.Supports(ctx, modelName) + if err != nil { + // If we got an OpenAI invalid auth error back, store the OpenAI client for later. + if errors.Is(err, openai.InvalidAuthError{}) { + oaiClient = client.(*openai.Client) + } + + errs = append(errs, err) + } else if ok { + return client, nil + } + } + + if len(errs) > 0 && oaiClient != nil { + // Prompt the user to enter their OpenAI API key and try again. + if err := oaiClient.RetrieveAPIKey(ctx); err != nil { + return nil, err + } + ok, err := oaiClient.Supports(ctx, modelName) + if err != nil { + return nil, err + } else if ok { + return oaiClient, nil + } + } + + if len(errs) == 0 { + return nil, fmt.Errorf("failed to find a model provider for model [%s]", modelName) + } + + return nil, errors.Join(errs...) +} + func (r *Registry) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { if messageRequest.Model == "" { return nil, fmt.Errorf("model is required") diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 53252895..42a1a39e 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -130,6 +130,13 @@ func NewClient(ctx context.Context, credStore credentials.CredentialStore, opts }, nil } +func (c *Client) ProxyInfo() (token, urlBase string) { + if c.invalidAuth { + return "", "" + } + return c.c.GetAPIKeyAndBaseURL() +} + func (c *Client) ValidAuth() error { if c.invalidAuth { return InvalidAuthError{} diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index a8d88fee..f92b0705 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -872,6 +872,11 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env return nil, fmt.Errorf("failed to parse credential tool %q: %w", ref.Reference, err) } + if callCtx.Program.ToolSet[ref.ToolID].IsNoop() { + // ignore empty tools + continue + } + credName := toolName if credentialAlias != "" { credName = credentialAlias @@ -944,6 +949,10 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env return nil, fmt.Errorf("invalid state: credential tool [%s] can not result in a continuation", ref.Reference) } + if *res.Result == "" { + continue + } + if err := json.Unmarshal([]byte(*res.Result), &c); err != nil { return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", ref.Reference, err) } @@ -958,15 +967,17 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } } - // Only store the credential if the tool is on GitHub or has an alias, and the credential is non-empty. - if (isGitHubTool(toolName) && callCtx.Program.ToolSet[ref.ToolID].Source.Repo != nil) || credentialAlias != "" { - if isEmpty { - log.Warnf("Not saving empty credential for tool %s", toolName) - } else if err := r.credStore.Add(callCtx.Ctx, *c); err != nil { - return nil, fmt.Errorf("failed to add credential for tool %s: %w", toolName, err) + if !c.Ephemeral { + // Only store the credential if the tool is on GitHub or has an alias, and the credential is non-empty. + if (isGitHubTool(toolName) && callCtx.Program.ToolSet[ref.ToolID].Source.Repo != nil) || credentialAlias != "" { + if isEmpty { + log.Warnf("Not saving empty credential for tool %s", toolName) + } else if err := r.credStore.Add(callCtx.Ctx, *c); err != nil { + return nil, fmt.Errorf("failed to add credential for tool %s: %w", toolName, err) + } + } else { + log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } - } else { - log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } } diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index a36c5e91..66337ff5 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -31,6 +31,10 @@ type Result struct { Err error } +func (c *Client) ProxyInfo() (string, string, error) { + return "test-auth", "test-url", nil +} + func (c *Client) Call(_ context.Context, messageRequest types.CompletionRequest, _ chan<- types.CompletionStatus) (resp *types.CompletionMessage, respErr error) { msgData, err := json.MarshalIndent(messageRequest, "", " ") require.NoError(c.t, err) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index b59a1953..57ce3fbf 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -753,7 +753,16 @@ func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]Too result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeCredential)) - toolRefs, err := t.getCompletionToolRefs(prg, agentGroup) + toolRefs, err := result.List() + if err != nil { + return nil, err + } + for _, toolRef := range toolRefs { + referencedTool := prg.ToolSet[toolRef.ToolID] + result.AddAll(referencedTool.GetToolRefsFromNames(referencedTool.ExportCredentials)) + } + + toolRefs, err = t.getCompletionToolRefs(prg, agentGroup) if err != nil { return nil, err } diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 64f53638..2be6d0fc 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -74,7 +74,7 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { return fmt.Sprintf("Removing `%s`", args["location"]), nil case "sys.write": return fmt.Sprintf("Writing `%s`", args["filename"]), nil - case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.current", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": + case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.current", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now", "sys.model.provider.credential": return "", nil default: return "", fmt.Errorf("unknown tool for display string: %s", id) From a4f3253870cd9d55ca1a3c5f42eca67098d5b56d Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 13 Aug 2024 09:40:50 -0700 Subject: [PATCH 100/270] bug: allow asterick on go binary checksum files --- pkg/repos/runtimes/golang/golang.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index f82b628a..3514fadb 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -223,10 +223,9 @@ func getChecksum(ctx context.Context, rel *release) string { scan := bufio.NewScanner(resp.Body) for scan.Scan() { fields := strings.Fields(scan.Text()) - if len(fields) != 2 || fields[1] != rel.srcBinName() { - continue + if len(fields) == 2 && (fields[1] == rel.srcBinName() || fields[1] == "*"+rel.srcBinName()) { + return fields[0] } - return fields[0] } return "" From 5c608125e1af9b9a0bba0d34079eebecc576fbac Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 13 Aug 2024 14:43:19 -0400 Subject: [PATCH 101/270] feat: download binaries for cred helpers Signed-off-by: Donnie Adams --- pkg/cli/credential.go | 2 +- pkg/cli/credential_delete.go | 2 +- pkg/cli/credential_show.go | 2 +- pkg/credentials/util.go | 4 +- pkg/engine/engine.go | 2 +- pkg/gptscript/gptscript.go | 2 +- pkg/repos/get.go | 74 ++++++++++++++--------------- pkg/repos/runtimes/golang/golang.go | 55 ++++++++++++--------- pkg/runner/runtimemanager.go | 2 +- 9 files changed, 77 insertions(+), 68 deletions(-) diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index b0c4a30a..cb000125 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -58,7 +58,7 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) } - if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg, opts.Env); err != nil { + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { return err } diff --git a/pkg/cli/credential_delete.go b/pkg/cli/credential_delete.go index 9c986c54..4e9919df 100644 --- a/pkg/cli/credential_delete.go +++ b/pkg/cli/credential_delete.go @@ -40,7 +40,7 @@ func (c *Delete) Run(cmd *cobra.Command, args []string) error { opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) } - if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg, opts.Env); err != nil { + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { return err } diff --git a/pkg/cli/credential_show.go b/pkg/cli/credential_show.go index ccfe3675..fac1b719 100644 --- a/pkg/cli/credential_show.go +++ b/pkg/cli/credential_show.go @@ -42,7 +42,7 @@ func (c *Show) Run(cmd *cobra.Command, args []string) error { opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) } - if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg, opts.Env); err != nil { + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { return err } diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go index 367b4d1d..70f31e97 100644 --- a/pkg/credentials/util.go +++ b/pkg/credentials/util.go @@ -5,7 +5,7 @@ import ( ) type CredentialHelperDirs struct { - RevisionFile, LastCheckedFile, BinDir, RepoDir, HelperDir string + RevisionFile, LastCheckedFile, BinDir string } func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { @@ -13,7 +13,5 @@ func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { RevisionFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "revision"), LastCheckedFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "last-checked"), BinDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "bin"), - RepoDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "repo"), - HelperDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers"), } } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index d3daa674..cb8fe273 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -21,7 +21,7 @@ type Model interface { type RuntimeManager interface { GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) EnsureCredentialHelpers(ctx context.Context) error - SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error + SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig) error } type Engine struct { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 43f429fc..755fe632 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -99,7 +99,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { opts.Runner.RuntimeManager = runtimes.Default(cacheClient.CacheDir()) } - if err := opts.Runner.RuntimeManager.SetUpCredentialHelpers(context.Background(), cliCfg, opts.Env); err != nil { + if err := opts.Runner.RuntimeManager.SetUpCredentialHelpers(context.Background(), cliCfg); err != nil { return nil, err } diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 416f4c61..8981d1fa 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -8,6 +8,7 @@ import ( "io/fs" "os" "path/filepath" + "runtime" "strings" "sync" "time" @@ -15,15 +16,13 @@ import ( "github.com/BurntSushi/locker" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" - "github.com/gptscript-ai/gptscript/pkg/loader/github" "github.com/gptscript-ai/gptscript/pkg/repos/git" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/golang" "github.com/gptscript-ai/gptscript/pkg/types" ) -const credentialHelpersRepo = "github.com/gptscript-ai/gptscript-credential-helpers" - type Runtime interface { ID() string Supports(tool types.Tool, cmd []string) bool @@ -68,7 +67,6 @@ type credHelperConfig struct { lock sync.Mutex initialized bool cliCfg *config.CLIConfig - env []string } func New(cacheDir string, runtimes ...Runtime) *Manager { @@ -90,7 +88,7 @@ func (m *Manager) EnsureCredentialHelpers(ctx context.Context) error { defer m.credHelperConfig.lock.Unlock() if !m.credHelperConfig.initialized { - if err := m.deferredSetUpCredentialHelpers(ctx, m.credHelperConfig.cliCfg, m.credHelperConfig.env); err != nil { + if err := m.deferredSetUpCredentialHelpers(ctx, m.credHelperConfig.cliCfg); err != nil { return err } m.credHelperConfig.initialized = true @@ -99,27 +97,28 @@ func (m *Manager) EnsureCredentialHelpers(ctx context.Context) error { return nil } -func (m *Manager) SetUpCredentialHelpers(_ context.Context, cliCfg *config.CLIConfig, env []string) error { +func (m *Manager) SetUpCredentialHelpers(_ context.Context, cliCfg *config.CLIConfig) error { m.credHelperConfig = &credHelperConfig{ cliCfg: cliCfg, - env: env, } return nil } -func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig, env []string) error { +func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig) error { var ( - helperName = cliCfg.CredentialsStore - suffix string + helperName = cliCfg.CredentialsStore + distInfo, suffix string ) - if helperName == "wincred" { - suffix = ".exe" - } - - // The file helper is built-in and does not need to be compiled. + // The file helper is built-in and does not need to be downloaded. if helperName == "file" { return nil } + switch helperName { + case "wincred": + suffix = ".exe" + default: + distInfo = fmt.Sprintf("-%s-%s", runtime.GOOS, runtime.GOARCH) + } locker.Lock("gptscript-credential-helpers") defer locker.Unlock("gptscript-credential-helpers") @@ -137,13 +136,7 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co } } - // Load the credential helpers repo information. - _, _, repo, _, err := github.Load(ctx, nil, credentialHelpersRepo) - if err != nil { - return err - } - - if err := os.MkdirAll(m.credHelperDirs.HelperDir, 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(m.credHelperDirs.LastCheckedFile), 0755); err != nil { return err } @@ -152,37 +145,44 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co return err } - var needsBuild bool + tool := types.Tool{ + Source: types.ToolSource{ + Repo: &types.Repo{ + Root: runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_HELPERS_ROOT", "https://github.com/gptscript-ai/gptscript-credential-helpers.git"), + }, + }, + } + tag, err := golang.GetLatestTag(tool) + if err != nil { + return err + } + var needsDownloaded bool // Check the last revision shasum and see if it is different from the current one. lastRevision, err := os.ReadFile(m.credHelperDirs.RevisionFile) - if (err == nil && strings.TrimSpace(string(lastRevision)) != repo.Revision) || errors.Is(err, fs.ErrNotExist) { + if (err == nil && strings.TrimSpace(string(lastRevision)) != tool.Source.Repo.Root+tag) || errors.Is(err, fs.ErrNotExist) { // Need to pull the latest version. - needsBuild = true - if err := git.Checkout(ctx, m.gitDir, repo.Root, repo.Revision, filepath.Join(m.credHelperDirs.RepoDir, repo.Revision)); err != nil { - return err - } + needsDownloaded = true // Update the revision file to the new revision. - if err := os.WriteFile(m.credHelperDirs.RevisionFile, []byte(repo.Revision), 0644); err != nil { + if err = os.WriteFile(m.credHelperDirs.RevisionFile, []byte(tool.Source.Repo.Root+tag), 0644); err != nil { return err } } else if err != nil { return err } - if !needsBuild { - // Check for the existence of the gptscript-credential-osxkeychain binary. - // If it's there, we have no need to build it and can just return. - if _, err := os.Stat(filepath.Join(m.credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { + if !needsDownloaded { + // Check for the existence of the credential helper binary. + // If it's there, we have no need to download it and can just return. + if _, err = os.Stat(filepath.Join(m.credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { return nil } } // Find the Go runtime and use it to build the credential helper. - for _, runtime := range m.runtimes { - if strings.HasPrefix(runtime.ID(), "go") { - goRuntime := runtime.(*golang.Runtime) - return goRuntime.BuildCredentialHelper(ctx, helperName, m.credHelperDirs, m.runtimeDir, repo.Revision, env) + for _, rt := range m.runtimes { + if strings.HasPrefix(rt.ID(), "go") { + return rt.(*golang.Runtime).DownloadCredentialHelper(ctx, tool, helperName, distInfo, suffix, m.credHelperDirs.BinDir) } } diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 3514fadb..52e8fe0b 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -18,7 +18,6 @@ import ( "runtime" "strings" - "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/debugcmd" runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" @@ -97,6 +96,14 @@ type tag struct { } `json:"commit"` } +func GetLatestTag(tool types.Tool) (string, error) { + r, ok := getLatestRelease(tool) + if !ok { + return "", fmt.Errorf("failed to get latest release for %s", tool.Name) + } + return r.label, nil +} + func getLatestRelease(tool types.Tool) (*release, bool) { if tool.Source.Repo == nil || !strings.HasPrefix(tool.Source.Repo.Root, "https://github.com/") { return nil, false @@ -116,11 +123,14 @@ func getLatestRelease(tool types.Tool) (*release, bool) { account, repo := parts[1], parts[2] resp, err := client.Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/tags", account, repo)) - if err != nil || resp.StatusCode != http.StatusOK { + if err != nil { // ignore error return nil, false } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, false + } var tags []tag if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { @@ -137,11 +147,14 @@ func getLatestRelease(tool types.Tool) (*release, bool) { } resp, err = client.Get(fmt.Sprintf("https://github.com/%s/%s/releases/latest", account, repo)) - if err != nil || resp.StatusCode != http.StatusFound { + if err != nil { // ignore error return nil, false } defer resp.Body.Close() + if resp.StatusCode != http.StatusFound { + return nil, false + } target := resp.Header.Get("Location") if target == "" { @@ -212,7 +225,7 @@ func downloadBin(ctx context.Context, checksum, src, url, bin string) error { return nil } -func getChecksum(ctx context.Context, rel *release) string { +func getChecksum(ctx context.Context, rel *release, artifactName string) string { resp, err := get(ctx, rel.checksumTxt()) if err != nil { // ignore error @@ -223,7 +236,7 @@ func getChecksum(ctx context.Context, rel *release) string { scan := bufio.NewScanner(resp.Body) for scan.Scan() { fields := strings.Fields(scan.Text()) - if len(fields) == 2 && (fields[1] == rel.srcBinName() || fields[1] == "*"+rel.srcBinName()) { + if len(fields) == 2 && (fields[1] == artifactName || fields[1] == "*"+artifactName) { return fields[0] } } @@ -241,7 +254,7 @@ func (r *Runtime) Binary(ctx context.Context, tool types.Tool, _, toolSource str return false, nil, nil } - checksum := getChecksum(ctx, rel) + checksum := getChecksum(ctx, rel, rel.srcBinName()) if checksum == "" { return false, nil, nil } @@ -268,30 +281,28 @@ func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, toolSource return newEnv, nil } -func (r *Runtime) BuildCredentialHelper(ctx context.Context, helperName string, credHelperDirs credentials.CredentialHelperDirs, dataRoot, revision string, env []string) error { +func (r *Runtime) DownloadCredentialHelper(ctx context.Context, tool types.Tool, helperName, distInfo, suffix string, binDir string) error { if helperName == "file" { return nil } - var suffix string - if helperName == "wincred" { - suffix = ".exe" + rel, ok := getLatestRelease(tool) + if !ok { + return fmt.Errorf("failed to find %s release", r.ID()) + } + binaryName := "gptscript-credential-" + helperName + checksum := getChecksum(ctx, rel, binaryName+distInfo+suffix) + if checksum == "" { + return fmt.Errorf("failed to find %s release checksum for os=%s arch=%s", r.ID(), runtime.GOOS, runtime.GOARCH) } - binPath, err := r.getRuntime(ctx, dataRoot) - if err != nil { - return err + url, _ := strings.CutSuffix(rel.binURL(), rel.srcBinName()) + url += binaryName + distInfo + suffix + if err := downloadBin(ctx, checksum, strings.TrimSuffix(binDir, "bin"), url, binaryName+suffix); err != nil { + return fmt.Errorf("failed to download %s release for os=%s arch=%s: %w", r.ID(), runtime.GOOS, runtime.GOARCH, err) } - newEnv := runtimeEnv.AppendPath(env, binPath) - log.InfofCtx(ctx, "Building credential helper %s", helperName) - cmd := debugcmd.New(ctx, filepath.Join(binPath, "go"), - "build", "-buildvcs=false", "-o", - filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix), - fmt.Sprintf("./%s/cmd/", helperName)) - cmd.Env = stripGo(append(env, newEnv...)) - cmd.Dir = filepath.Join(credHelperDirs.RepoDir, revision) - return cmd.Run() + return nil } func (r *Runtime) getReleaseAndDigest() (string, string, error) { diff --git a/pkg/runner/runtimemanager.go b/pkg/runner/runtimemanager.go index e1c5a4c6..ed191d15 100644 --- a/pkg/runner/runtimemanager.go +++ b/pkg/runner/runtimemanager.go @@ -45,6 +45,6 @@ func (r runtimeManagerLogger) EnsureCredentialHelpers(ctx context.Context) error return r.rm.EnsureCredentialHelpers(mvl.WithInfo(ctx, r)) } -func (r runtimeManagerLogger) SetUpCredentialHelpers(_ context.Context, _ *config.CLIConfig, _ []string) error { +func (r runtimeManagerLogger) SetUpCredentialHelpers(_ context.Context, _ *config.CLIConfig) error { panic("not implemented") } From 983cd8c9e58342e520c91a7a2c4a1c556ec85fec Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 13 Aug 2024 15:07:17 -0700 Subject: [PATCH 102/270] chore: add load method to sdk server --- pkg/repos/runtimes/default.go | 2 +- pkg/repos/runtimes/node/SHASUMS256.txt.asc | 165 +++++++-------------- pkg/repos/runtimes/node/node_test.go | 14 -- pkg/sdkserver/routes.go | 38 +++++ pkg/sdkserver/types.go | 9 ++ 5 files changed, 102 insertions(+), 126 deletions(-) diff --git a/pkg/repos/runtimes/default.go b/pkg/repos/runtimes/default.go index 3782e26e..d4eb4db5 100644 --- a/pkg/repos/runtimes/default.go +++ b/pkg/repos/runtimes/default.go @@ -22,7 +22,7 @@ var Runtimes = []repos.Runtime{ Version: "3.10", }, &node.Runtime{ - Version: "21", + Version: "20", Default: true, }, &golang.Runtime{ diff --git a/pkg/repos/runtimes/node/SHASUMS256.txt.asc b/pkg/repos/runtimes/node/SHASUMS256.txt.asc index 093da96b..faaeab97 100644 --- a/pkg/repos/runtimes/node/SHASUMS256.txt.asc +++ b/pkg/repos/runtimes/node/SHASUMS256.txt.asc @@ -1,117 +1,60 @@ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 -43a881788549e1b3425eb5f2b92608f438f146e08213de09c5bd5ff841cae7ae node-v20.11.1-aix-ppc64.tar.gz -3f8e77b775372c0b27d2b85ce899d80339691f480e64dde43d4eb01504a58679 node-v20.11.1-arm64.msi -e0065c61f340e85106a99c4b54746c5cee09d59b08c5712f67f99e92aa44995d node-v20.11.1-darwin-arm64.tar.gz -fd771bf3881733bfc0622128918ae6baf2ed1178146538a53c30ac2f7006af5b node-v20.11.1-darwin-arm64.tar.xz -c52e7fb0709dbe63a4cbe08ac8af3479188692937a7bd8e776e0eedfa33bb848 node-v20.11.1-darwin-x64.tar.gz -ed69f1f300beb75fb4cad45d96aacd141c3ddca03b6d77c76b42cb258202363d node-v20.11.1-darwin-x64.tar.xz -0aa42c91b441e945ff43bd3a837759c58b436de57dcd033d02e5cbcd2fba1f87 node-v20.11.1-headers.tar.gz -edce238817acf5adce3123366b55304aff2a1f0849231d1b49f42370e454b6f8 node-v20.11.1-headers.tar.xz -e34ab2fc2726b4abd896bcbff0250e9b2da737cbd9d24267518a802ed0606f3b node-v20.11.1-linux-arm64.tar.gz -c957f29eb4e341903520caf362534f0acd1db7be79c502ae8e283994eed07fe1 node-v20.11.1-linux-arm64.tar.xz -e42791f76ece283c7a4b97fbf716da72c5128c54a9779f10f03ae74a4bcfb8f6 node-v20.11.1-linux-armv7l.tar.gz -28e0120d2d150a8f41717899d33167b8b32053778665583d49ff971bfd188d1b node-v20.11.1-linux-armv7l.tar.xz -9823305ac3a66925a9b61d8032f6bbb4c3e33c28e7f957ebb27e49732feffb23 node-v20.11.1-linux-ppc64le.tar.gz -51343cacf5cdf5c4b5e93e919d19dd373d6ef43d5f2c666eae299f26e31d08b5 node-v20.11.1-linux-ppc64le.tar.xz -4c66b2f247fdd8720853321526d7cda483018fcb32014b75c30f3a54ecacaea7 node-v20.11.1-linux-s390x.tar.gz -b32616b705cd0ddbb230b95c693e3d7a37becc2ced9bcadea8dc824cceed6be0 node-v20.11.1-linux-s390x.tar.xz -bf3a779bef19452da90fb88358ec2c57e0d2f882839b20dc6afc297b6aafc0d7 node-v20.11.1-linux-x64.tar.gz -d8dab549b09672b03356aa2257699f3de3b58c96e74eb26a8b495fbdc9cf6fbe node-v20.11.1-linux-x64.tar.xz -f1cd449fcbeb1b948e8498cb8edd9655fa319d109a7f4c5bd96a9b122b91538a node-v20.11.1-win-arm64.7z -e85461ec124956a2853c4ee6e13c4f4889d63c88beb3d530c1ee0c4b51dc10e7 node-v20.11.1-win-arm64.zip -fb9b5348259988a562a48eed7349e7e716c0bec78d98ad0a336b2993a8b3bf34 node-v20.11.1-win-x64.7z -bc032628d77d206ffa7f133518a6225a9c5d6d9210ead30d67e294ff37044bda node-v20.11.1-win-x64.zip -c2b1863d8979546804a39fc63d0a9bc9c6e49cb2f6c9d1e52844a24629b24765 node-v20.11.1-win-x86.7z -b98e95f78416d1359b647cfa09ba2a48b76d41b56a776df822bf36ffe8e76a2d node-v20.11.1-win-x86.zip -c54f5f7e2416e826fd84e878f28e3b53363ae9c3f60a140af4434b2453b5ae89 node-v20.11.1-x64.msi -63e2aed4dabb96eed6903a3974e006d3c29c218472aac60ae3c3c7de00df13b1 node-v20.11.1-x86.msi -c46019a095a1549d000e85da13f17972a448e0be5854a51786ecccde7278a012 node-v20.11.1.pkg -4af1ba6ea848cc05908b8a62b02fb27684dd52b2a7988ee82b0cfa72deb90b94 node-v20.11.1.tar.gz -77813edbf3f7f16d2d35d3353443dee4e61d5ee84d9e3138c7538a3c0ca5209e node-v20.11.1.tar.xz -a5a9d30a8f7d56e00ccb27c1a7d24c8d0bc96a2689ebba8eb7527698793496f1 win-arm64/node.exe -93529170cebe57c0f4830a4cc6a261b6cc9bcf0cd8b3e88ac4995a5015031d79 win-arm64/node.lib -c14c6e927406b8683cbfb8a67ca4c8fd5093ca7812b5b1627e3d6a53d3674565 win-arm64/node_pdb.7z -68034cd09d8dfaa755d1b280da13e20388cc486ac57b037b3e11dfe2d6b74284 win-arm64/node_pdb.zip -bc585910690318aaebe3c57669cb83ca9d1e5791efd63195e238f54686e6c2ec win-x64/node.exe -53a982d490cb9fcc4b231a8b95147de423b36186bc6f4ba5697b20117fdcbd5d win-x64/node.lib -ccac9f2f5219ed858aeddb306d6493478ba9675c7cbf009e83742437d6752c4f win-x64/node_pdb.7z -bec5da4035c84580843978a59ef9bcc1c0eaca881cf9e1c94e63a1862cf14421 win-x64/node_pdb.zip -3829137e062b1e2eb9947ef05e4b717ae578a8fce1c5c60fe4f6ae7ef2ec0240 win-x86/node.exe -c5321bb65dcecb3989f9b8f6ec56369c16627ca4bade0c78afb6b88f7dde50e4 win-x86/node.lib -20ca60ced1fc21f15ea952b4406aec6bde39d20eab11cf042040628841b2249e win-x86/node_pdb.7z -bef05cebedce5949ae35e87e7d4789c16fa73caf478483fcf92e5dbb9ba5d774 win-x86/node_pdb.zip +5eb1b7ea405c86be0a21ec3850997c89df238d6e4659a0b990aa793a8cbfd9cf node-v20.16.0-aix-ppc64.tar.gz +f366fe5903dcb3b6cd495c8add77c87a32772085718a672d52ad17d9d91d2018 node-v20.16.0-arm64.msi +fc7355e778b181575153b7dea4879e8021776eeb376c43c50f65893d2ea70aa3 node-v20.16.0-darwin-arm64.tar.gz +5043e98cdf859963b1a0aff54c1f1813a2a8059e4179403171860d664ca090f2 node-v20.16.0-darwin-arm64.tar.xz +e18942cd706e4d69a4845ddacee2f1c17a72e853a229e3d2623d2edeb7efde72 node-v20.16.0-darwin-x64.tar.gz +9df751ac5edbb2181335200060dff14de25f828eaed70d8b48459d2c203aeedc node-v20.16.0-darwin-x64.tar.xz +6cc5690a67b9b1e1fa8cedaeca41f1bdb5e1af1f7948761c798d33d99f789a5c node-v20.16.0-headers.tar.gz +a1464c304980d3ab41922cda7025ebc2ec0dc2a0b89d9b9183c589560810feaa node-v20.16.0-headers.tar.xz +551588f8f5ca05c04efb53f1b2bb7d9834603327bdc82d60a944d385569866e1 node-v20.16.0-linux-arm64.tar.gz +1d9929e72f692179f884cd676b2dfabd879cb77defa7869dc8cfc802619277fb node-v20.16.0-linux-arm64.tar.xz +1c77c52ab507ddee479012f0b4bf523dd8400df4504447d623632353076e2e27 node-v20.16.0-linux-armv7l.tar.gz +a23a49029e8c7788c701eb3ace553260b7676a5a2ea9965ba92e4817008fbefe node-v20.16.0-linux-armv7l.tar.xz +80b515595e46afb9bae77f61083a4ca7c21bbdb627f69ff53fd5dca3a26773fb node-v20.16.0-linux-ppc64le.tar.gz +86cf6e8c93a9e517bfcfdfb4ad2774105312679ad21e03da75ab516ebc10e2dc node-v20.16.0-linux-ppc64le.tar.xz +ae7a9f6e631a0bede76a501d8b1d806f56b97acfa5a1d6833bab5ce90a404e5e node-v20.16.0-linux-s390x.tar.gz +6c38ac5c516a6a36ee6e0426975e6466795db30b9ced04e59f0f33fe6b3d657e node-v20.16.0-linux-s390x.tar.xz +b3f874ea84e440d69ed02ca92429d0eccd17737fde86db69c1c153d16ec654f2 node-v20.16.0-linux-x64.tar.gz +c30af7dfea46de7d8b9b370fa33b8b15440bc93f0a686af8601bbb48b82f16c0 node-v20.16.0-linux-x64.tar.xz +55852a420ca41db9f128f97e0dd8751199c23d63f5a7978432fd7c9e0c74c323 node-v20.16.0.pkg +8f24bf9abe455a09ab30f9ae8edda1e945ed678a4b1c3b07ee0f901fdc0ff4fd node-v20.16.0.tar.gz +cd6c8fc3ff2606aadbc7155db6f7e77247d2d0065ac18e2f7f049095584b8b46 node-v20.16.0.tar.xz +52e5666a379acd8533d9ccab66c2321a6ffc83766248419bfbd41ba8bc071244 node-v20.16.0-win-arm64.7z +af5a85ea299fcebd34c3c726a47a926e73171f9b657a6eaa796c011597241bf8 node-v20.16.0-win-arm64.zip +1b3961054a484476872715d9ca04bc491d797fde6336db514b6e6fcbb71fae9d node-v20.16.0-win-x64.7z +4e88373ac5ae859ad4d50cc3c5fa86eb3178d089b72e64c4dbe6eeac5d7b5979 node-v20.16.0-win-x64.zip +76f1806fde0b09ed4044f29ea140fb2bea9bce745b9892ec4aeb6537344db6f1 node-v20.16.0-win-x86.7z +1adc1f086595ecbc98da40eccb42fa1691b6c6c0658ff875dda19e4e02b1d5f0 node-v20.16.0-win-x86.zip +813306c94e6f5f061a5789f037d48f57d52240284a679e5ace4a0f73f8f2feeb node-v20.16.0-x64.msi +2bb8c3084384c95c47c4191c38098d5ecf55c0f02c1de5e0968730dec957ea15 node-v20.16.0-x86.msi +7e773fba3a19eac5ccbe85c1f87a05d7b112ecf41440076e6b6de1c7bffa0fdf win-arm64/node.exe +a4f01329c1c211082ac3ed387ff6651530040bbf7250ec419ce8f95b10d7804a win-arm64/node.lib +e1bec70ae9529cc637a21de850c070125f8016070451094d72f96408001200a2 win-arm64/node_pdb.7z +bc5b60eecd3b6c92b35755adef2e4aad01e021a3c434d46c2555a49056c5bcf7 win-arm64/node_pdb.zip +ba221658a3b68bd583e3068903eb675b5206d86a883c084ed95502e8f634b82a win-x64/node.exe +87056190b7cd06f40058f8e059efd328cdcc7600b825afa102c0aa5039865af5 win-x64/node.lib +bf2ad1e1f4e7c3853d5209fe9ef24ad7117edafc71f6401ec0121d8b681b8c3c win-x64/node_pdb.7z +5386f3c3af1af1b325b43b574043c5a7e830b3e9e7df0370ae0797ce4f39b375 win-x64/node_pdb.zip +b7b8d6b5fdd1c073b6f5f6d15bc849f4b5f92c4a66f23e77294f4bdf5f51e9f6 win-x86/node.exe +fa02ae7feca7eb6c4a0f1b929126df400719f5d18a2ec4b7d12c52fbe0b13814 win-x86/node.lib +328b2dcc91255c1c75faa8ce7eb687a8960ae09555d3bca0ae8e0dac4238c873 win-x86/node_pdb.7z +71b1e6b75c61227342ba6f1edb9014445dbee857d6cb14dce3d9b8d94c694d55 win-x86/node_pdb.zip -----BEGIN PGP SIGNATURE----- -iQGzBAEBCAAdFiEEiQwI24V5Fi/uDfnbi+q0389VXvQFAmXM+TcACgkQi+q0389V -XvQl3AwAqqm2uBMDzd+BlR1sG7y/eUtUYPVdwmCh0DeFXPHxuaIbFf0PGMEgcV8u -kn3OBF4pnSCPZNbJYJsLO1S+b/5Vk+Vlkq1WkOxqQHUHmM9GcJUuShadl0YaDNen -WXXMoYKWqMRJ6fQ3tRRh+vbMSXtsLqXT8TMVJq+Qb7a7yj4QRjw/Dd+8uKGGIhBY -U04HWsz33RJLu6AUnhF03eO1N8E1V48JptklDx5ZkY8GYa3F6jQsFld+jhmkZ9tg -4q9NDNijVpj56UsUhLAYD0J9IKS18tvQxNrKmBGUSZjFOByVhbUdLXnSMtW1i1U9 -cYhP6Q5wg/fnjqCfQ90TauoJZOblKIL/PHlf6cQGPrrRa1bz3xGyCAIve5KFhLxf -Vfj1ctk2ktzmuNhjAu5G/1VALQUNpiTm4Yz433JpoMMZ3mTHN+fuALOX4TQbdLRz -HKphTz02436348XC9bNz2cvjm74cy9fqwjQ/y84AmxiTJMFPg0XqICg4tu9rd49d -8FJc4TLZ -=r/CD ------END PGP SIGNATURE----- - ------BEGIN PGP SIGNED MESSAGE----- -Hash: SHA256 - -c31d5b60c4a284c6855bd468d4aae4436a16b351362b2971d3c0db2a471d3f24 node-v21.7.0-aix-ppc64.tar.gz -7d7dc37aa30b6dbb52d698d01cfed1d99056c82396eadd41a49fc55a873c423d node-v21.7.0-arm64.msi -f48ad51cf3c2814bbf61c8c26efd810e5e22dcac980786fd7ac5b54365233d2c node-v21.7.0-darwin-arm64.tar.gz -0805239d8a7402dae49e0033b7ad8208fed498dbeee9a3194863e52f6f3c6d7f node-v21.7.0-darwin-arm64.tar.xz -3f81adca80b523b413e26f03f855f4a2ae52d9af20f0cda2e259dd26a0608607 node-v21.7.0-darwin-x64.tar.gz -6a755416292854f2be38e74704ccf09edeba247718e9f047f5e1939b0dba17bd node-v21.7.0-darwin-x64.tar.xz -628d9e4f866e3828b77ce812dc99f33d3e7c673d0c499f13eadff6fa6ccb4383 node-v21.7.0-headers.tar.gz -627d6552d2120660a51f74fff0d40573f4a35d8545462250d30592ce0ba4eec7 node-v21.7.0-headers.tar.xz -520a3e5c83a05a782b1f4959f150c2fdc03e2ea056e855ef6bbb74f6ccf7aa7d node-v21.7.0-linux-arm64.tar.gz -73ce1e4e956532e0916fc7014f5b649573bd2b5870fef5cfc26cc42f58358ae7 node-v21.7.0-linux-arm64.tar.xz -723abb32135ad4baa6e9671447a72f5c9a5bfc681fc540b0e4864e965171b6ed node-v21.7.0-linux-armv7l.tar.gz -8a367a3bf667f0bb3abb9e8121326911d47a31886419ad052d5a52d8c6531d9d node-v21.7.0-linux-armv7l.tar.xz -c2290cb35b11ee2b0f0ae34ad3c8372652688ff2dc3d9a89ada46c2b84ea5dda node-v21.7.0-linux-ppc64le.tar.gz -b85348211a4d195de2f850a17cdec77aedc8fc1c402864b2bc3501608e6c9c47 node-v21.7.0-linux-ppc64le.tar.xz -90b8678ed113950613edeae5eaf298cf795c72005fac6ffd9b7fbb90ddd86738 node-v21.7.0-linux-s390x.tar.gz -99a09f4c790f3210a6d26032bf69713ba199cf2e73af43e04b1b1d9bd1c8db76 node-v21.7.0-linux-s390x.tar.xz -0fce039e2b6af00766492127a49f959ae92ed22fede4c49e9a8c2543aadbd6e2 node-v21.7.0-linux-x64.tar.gz -68510c3851133a21c6a6f9940e58c5bc8fed39f1d91a08e34c5070dd0615fef1 node-v21.7.0-linux-x64.tar.xz -d680d5c3d0b2476a97d11b30cbbdaf1d7f92ffd1cc89e5c640782a6b52480666 node-v21.7.0-win-arm64.7z -11b11b9a3f2db7b5076cf16655e05cd63dc3d8843cd4836ecb12e11315f03441 node-v21.7.0-win-arm64.zip -31c8b4721f37e30ca8e2131a4cb848fc7347f67bf87618e82959b58481f17bc4 node-v21.7.0-win-x64.7z -204de88f4073b08ae3dbe4c412b071eee565fc681e163be205d5cc88065f0322 node-v21.7.0-win-x64.zip -b17ef0c5557e61610774cae5beb0f877699ab419c4672e9c6e3bb3da3d571ed1 node-v21.7.0-win-x86.7z -6aba3fe2258d5c0c40a89e81dfe90113a67489f2a67fd05b7f216b63b4c7bb02 node-v21.7.0-win-x86.zip -512945cf8816e1e906143ea2ee6816f8744a3d114ea38f3540c3ebe685fe3e3a node-v21.7.0-x64.msi -4bedb6069c94a71fd6f0b8fbea280468d5ecdcf209eef6da1a45808e8b15cba6 node-v21.7.0-x86.msi -ccac99782e587c6090b6ad82979210fa0c352322636a6cf290d37eb41152d0b5 node-v21.7.0.pkg -26d6b600e1076f132d4175a90ddc1a709263e75d81967300aa1ffbd86103b991 node-v21.7.0.tar.gz -e41eefe1e59624ee7f312c38f8f7dfc11595641acb2293d21176f03d2763e9d4 node-v21.7.0.tar.xz -25511d1e05d7d0b049945c5ef1cf2a4daa5d6ad16692ccd2c1399142a1c57a65 win-arm64/node.exe -7920932f7be355dbf4568492ab7d104fc059f689ba1a46ac0d6568884c8d201a win-arm64/node.lib -40c423a2b126fc5b6858f8617f0f8537fd8f8d2fa73a5c918607f3ccd386f8c9 win-arm64/node_pdb.7z -dec9eaa91a431ea0f3c243605f0556dbe6459df5c04c10df7935d678a6b3fca4 win-arm64/node_pdb.zip -c486fe72a3663379105538e886ef9d2deacad1deaa64b338e570cb086be592d3 win-x64/node.exe -96d09c2055c2f252122c86b65d2aabd5f90b1a075844f24bf8bcdbab05baf53e win-x64/node.lib -08990dd6bcce80710d59ef76cd74ab98b5bed36b0d2584ca3acbc029f92db4fc win-x64/node_pdb.7z -1a27a25c92f6339b3aa77588063cca537af389aee26bfdf1d0ef505d790e63a3 win-x64/node_pdb.zip -4aaa5b3a95ee4ab932a80b9708c31662a9c4a99d19fea7cb1f7b0ff79d8399ed win-x86/node.exe -6e2502e84c3a0e2da643f6399b59381ade5b525f544a5bcabae923188b8f9998 win-x86/node.lib -d0cd5494364039f558c76d4fc7a1db69739149873e10a5200fb9e2a0ab12fe10 win-x86/node_pdb.7z -354031f3f9576733ebeeccbcafcc691c8326427153a48978ff5cd6f2c8ef5d36 win-x86/node_pdb.zip ------BEGIN PGP SIGNATURE----- - -iQGzBAEBCAAdFiEEiQwI24V5Fi/uDfnbi+q0389VXvQFAmXouAIACgkQi+q0389V -XvRp+wv+IPHjBUmVC6YzAxFhRD4GHVUgjckfSbP2jH/acre1mYgm9LJ//7l2GaJy -oEOO85WaHgaKCHCdv9GBc3dDbbt1n9J2IGmBqcdE8e9cRko5qhBoVUvW7p7Ki7ci -nAq5DS3YkpWAocsY/k+LyR0Ky8mW466ARAucTp9kuZmxB2FW53B0bYK57++1qGuo -tr9kJPoGYQB0cUiTSwTaMbOIdl/4CL+a9J7mIrpaDVW5g3PnNy5y1vgDvtuU7Qcn -uEucciBlOn0Ib4mBnky+NX1ThL9WNwLjaivxdioFgc0E4sMwf0CjF3vMUuEvI8qi -PJ5lYndsHI4fdh1SbcgoFNZzTkMZbTr9xcZIGLzLkMX8r+ztLTiFtiLIUSQq0jgm -fqKQghuDN2SVi7WW4KAa7K1285zmV7L27N9mnNWH4ujTqCW73Wdo2XkG/TwM3yEC -5o+YookAV6RHT1X6RPJ8rQaC0BrBgpm/MQH1kvH4vUyF2HRVZ2ZgEYorvKtOwf9D -f7v3IC9J -=/YNz +iQIzBAEBCAAdFiEEzGj1oxBv9EgyLkjtJ/XjjVsKIV8FAmag7tcACgkQJ/XjjVsK +IV/hlRAAjvdBRTWPfjXzTqxQODXLZps1HREXRZAa8C8bbAoagCJ1jfm6d8yVUegH +Bl5FqDAutGfTlEhXtQqBmnbQPv4Ahj156cVYtp3dFrxPF15bP+o9q+Un5+R1zcfX +kH9W26G7IvfrtFJkDClpBlPKYE5HcDrrJBNfvALf4th17bkVHMpr04oJz5IwGV2M +petLMwqFxcqQ+15tzRW42Z6EhWHvNaMveab6SM4JEqBxvqB8K+m4nsw2ER7ycU5b +Isa0bUsxtRICtSX0yzzdzEYrFXZmb9eXZRVfJ4sBpUhw0xtBmHn3c1MZH0qez+Nm +tbc6pcgGv9cUSXauBeD8rrYMzQHcrhihd51i9a3Cen3RDy/dtuNx9jEXnxfkY+n9 +wkwKb4Lask962L+yTHQCfJ+JQxgouADxqzMxhcup1iiHXCd7pSBSoeAvd5Z1AeGX +qBYrLU9mcyIuLrbtADSfnWmXWs2k1hgnP3UXMBhu/GuobQf9kJ2Gwwx5Gp0aB8z9 +4EA+oUXnkM2kJF0MYVMXL+z8VcQpHgyVPujglhNn/a4WdCVTr1jKptNqqnriH9zl +bHMZuiKbAt8RL9rQ3XuFD1sN9k1z/mj8bCHES2WVta+3kCmY9u+eKXNdXJYt+5Xh +bGxwXP5T+Z8Yzc9FVmgPzZzVddCX74Yug0j8BUyE3vPaDq32H6M= +=cZH5 -----END PGP SIGNATURE----- diff --git a/pkg/repos/runtimes/node/node_test.go b/pkg/repos/runtimes/node/node_test.go index 014619c8..ce2fcde1 100644 --- a/pkg/repos/runtimes/node/node_test.go +++ b/pkg/repos/runtimes/node/node_test.go @@ -37,17 +37,3 @@ func TestRuntime(t *testing.T) { } require.NoError(t, err) } - -func TestRuntime21(t *testing.T) { - r := Runtime{ - Version: "21", - } - - s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(firstPath(s), "node.exe")) - if errors.Is(err, fs.ErrNotExist) { - _, err = os.Stat(filepath.Join(firstPath(s), "node")) - } - require.NoError(t, err) -} diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 98957624..e19f5708 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -50,6 +50,8 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /run", s.execHandler) mux.HandleFunc("POST /evaluate", s.execHandler) + mux.HandleFunc("POST /load", s.load) + mux.HandleFunc("POST /parse", s.parse) mux.HandleFunc("POST /fmt", s.fmtDocument) @@ -212,6 +214,42 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { s.execAndStream(ctx, programLoader, logger, w, opts, reqObject.ChatState, reqObject.Input, reqObject.SubTool, def) } +// load will load the file and return the corresponding Program. +func (s *server) load(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + reqObject := new(loadRequest) + if err := json.NewDecoder(r.Body).Decode(reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + logger.Debugf("parsing file: file=%s, content=%s", reqObject.File, reqObject.Content) + + var ( + prg types.Program + err error + cache = s.client.Cache + ) + + if reqObject.DisableCache { + cache = nil + } + + if reqObject.Content != "" { + prg, err = loader.ProgramFromSource(r.Context(), reqObject.Content, reqObject.SubTool, loader.Options{Cache: cache}) + } else if reqObject.File != "" { + prg, err = loader.Program(r.Context(), reqObject.File, reqObject.SubTool, loader.Options{Cache: cache}) + } else { + prg, err = loader.ProgramFromSource(r.Context(), reqObject.ToolDefs.String(), reqObject.SubTool, loader.Options{Cache: cache}) + } + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": map[string]any{"program": prg}}) +} + // parse will parse the file and return the corresponding Document. func (s *server) parse(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index ade035b2..b24ca645 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -82,6 +82,15 @@ func (f *file) String() string { return f.File } +type loadRequest struct { + content `json:",inline"` + + ToolDefs toolDefs `json:"toolDefs,inline"` + DisableCache bool `json:"disableCache"` + SubTool string `json:"subTool,omitempty"` + File string `json:"file"` +} + type parseRequest struct { parser.Options `json:",inline"` content `json:",inline"` From b522585a9f94a37aa17449c64071809d83d9bbc9 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 14 Aug 2024 14:38:27 -0400 Subject: [PATCH 103/270] chore: upgrade Go to 1.23.0 Signed-off-by: Donnie Adams --- .github/workflows/integration.yaml | 2 +- .github/workflows/main.yaml | 4 +--- .github/workflows/release.yaml | 4 +--- .github/workflows/test.yaml | 2 +- .github/workflows/validate-docs.yaml | 2 +- Makefile | 2 +- go.mod | 3 +-- pkg/loader/openapi.go | 2 +- pkg/openapi/getschema.go | 2 +- pkg/repos/runtimes/default.go | 2 +- pkg/repos/runtimes/golang/digests.txt | 20 ++++++++++---------- pkg/repos/runtimes/golang/golang_test.go | 2 +- pkg/repos/runtimes/golang/testdata/go.mod | 2 +- pkg/repos/runtimes/node/node.go | 2 +- pkg/sdkserver/routes.go | 2 +- 15 files changed, 24 insertions(+), 29 deletions(-) diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index e0b78be2..22f4a678 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -29,7 +29,7 @@ jobs: - uses: actions/setup-go@v5 with: cache: false - go-version: "1.22" + go-version: "1.23" - name: Build if: matrix.os == 'ubuntu-22.04' run: make build diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index ae26c52c..7a2114ea 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -26,9 +26,7 @@ jobs: uses: actions/setup-go@v5 with: cache: false - # This can't be upgraded until the issue with sys.daemon on Windows is resolved - # After the issue is resolved, this can be set to 1.22 - go-version: "1.22.4" + go-version: "1.23" - name: Run GoReleaser uses: goreleaser/goreleaser-action@v6 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f710e953..8b5b0eae 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -20,9 +20,7 @@ jobs: uses: actions/setup-go@v5 with: cache: false - # This can't be upgraded until the issue with sys.daemon on Windows is resolved - # After the issue is resolved, this can be set to 1.22 - go-version: "1.22.4" + go-version: "1.23" - name: Run GoReleaser uses: goreleaser/goreleaser-action@v6 with: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index f3829c35..f4da7e62 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -29,7 +29,7 @@ jobs: - uses: actions/setup-go@v5 with: cache: false - go-version: "1.22" + go-version: "1.23" - name: Validate if: matrix.os == 'ubuntu-22.04' run: make validate diff --git a/.github/workflows/validate-docs.yaml b/.github/workflows/validate-docs.yaml index 18368355..f7e3a016 100644 --- a/.github/workflows/validate-docs.yaml +++ b/.github/workflows/validate-docs.yaml @@ -17,6 +17,6 @@ jobs: - uses: actions/setup-go@v5 with: cache: false - go-version: "1.22" + go-version: "1.23" - run: make init-docs - run: make validate-docs diff --git a/Makefile b/Makefile index 5b1b6309..4a52694a 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ smoke: build smoke: go test -v -tags='smoke' ./pkg/tests/smoke/... -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v1.60.1 lint: if ! command -v golangci-lint &> /dev/null; then \ echo "Could not find golangci-lint, installing version $(GOLANGCI_LINT_VERSION)."; \ diff --git a/go.mod b/go.mod index 4cf0ba73..0d38ca0c 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,6 @@ module github.com/gptscript-ai/gptscript -// This can't be upgraded until the issue with sys.daemon on Windows is resolved -go 1.22.4 +go 1.23.0 require ( github.com/AlecAivazis/survey/v2 v2.3.7 diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index bc469a4e..cf3c3f34 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -86,7 +86,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) pathObj := pathMap[pathString] // Handle path-level server override, if one exists pathServer := defaultServer - if pathObj.Servers != nil && len(pathObj.Servers) > 0 { + if len(pathObj.Servers) > 0 { pathServer, err = parseServer(pathObj.Servers[0]) if err != nil { return nil, err diff --git a/pkg/openapi/getschema.go b/pkg/openapi/getschema.go index 3550afcf..d2966aac 100644 --- a/pkg/openapi/getschema.go +++ b/pkg/openapi/getschema.go @@ -83,7 +83,7 @@ func GetSchema(operationID, defaultHost string, t *openapi3.T) (string, Operatio for path, pathItem := range t.Paths.Map() { // Handle path-level server override, if one exists. pathServer := defaultServer - if pathItem.Servers != nil && len(pathItem.Servers) > 0 { + if len(pathItem.Servers) > 0 { pathServer, err = parseServer(pathItem.Servers[0]) if err != nil { return "", OperationInfo{}, false, err diff --git a/pkg/repos/runtimes/default.go b/pkg/repos/runtimes/default.go index d4eb4db5..a93fb735 100644 --- a/pkg/repos/runtimes/default.go +++ b/pkg/repos/runtimes/default.go @@ -26,7 +26,7 @@ var Runtimes = []repos.Runtime{ Default: true, }, &golang.Runtime{ - Version: "1.22.1", + Version: "1.23.0", }, } diff --git a/pkg/repos/runtimes/golang/digests.txt b/pkg/repos/runtimes/golang/digests.txt index 8a1b82c6..df86facf 100644 --- a/pkg/repos/runtimes/golang/digests.txt +++ b/pkg/repos/runtimes/golang/digests.txt @@ -1,10 +1,10 @@ -3bc971772f4712fec0364f4bc3de06af22a00a12daab10b6f717fdcd13156cc0 go1.22.1.darwin-amd64.tar.gz -943e4f9f038239f9911c44366f52ab9202f6ee13610322a668fe42406fb3deef go1.22.1.darwin-amd64.pkg -f6a9cec6b8a002fcc9c0ee24ec04d67f430a52abc3cfd613836986bcc00d8383 go1.22.1.darwin-arm64.tar.gz -5f10b95e2678618f85ba9d87fbed506b3b87efc9d5a8cafda939055cb97949ba go1.22.1.darwin-arm64.pkg -8484df36d3d40139eaf0fe5e647b006435d826cc12f9ae72973bf7ec265e0ae4 go1.22.1.linux-386.tar.gz -aab8e15785c997ae20f9c88422ee35d962c4562212bb0f879d052a35c8307c7f go1.22.1.linux-amd64.tar.gz -e56685a245b6a0c592fc4a55f0b7803af5b3f827aaa29feab1f40e491acf35b8 go1.22.1.linux-arm64.tar.gz -8cb7a90e48c20daed39a6ac8b8a40760030ba5e93c12274c42191d868687c281 go1.22.1.linux-armv6l.tar.gz -0c5ebb7eb39b7884ec99f92b425d4c03a96a72443562aafbf6e7d15c42a3108a go1.22.1.windows-386.zip -cf9c66a208a106402a527f5b956269ca506cfe535fc388e828d249ea88ed28ba go1.22.1.windows-amd64.zip +ffd070acf59f054e8691b838f274d540572db0bd09654af851e4e76ab88403dc go1.23.0.darwin-amd64.tar.gz +bc91d2573939a01731413fac0884c329606c1c168883692131ce772669caf27b go1.23.0.darwin-amd64.pkg +b770812aef17d7b2ea406588e2b97689e9557aac7e646fe76218b216e2c51406 go1.23.0.darwin-arm64.tar.gz +d73ae741ed449ea842238f76f4b02935277eb867689f84ace0640965b2caf700 go1.23.0.darwin-arm64.pkg +0e8a7340c2632e6fb5088d60f95b52be1f8303143e04cd34e9b2314fafc24edd go1.23.0.linux-386.tar.gz +905a297f19ead44780548933e0ff1a1b86e8327bb459e92f9c0012569f76f5e3 go1.23.0.linux-amd64.tar.gz +62788056693009bcf7020eedc778cdd1781941c6145eab7688bd087bce0f8659 go1.23.0.linux-arm64.tar.gz +0efa1338e644d7f74064fa7f1016b5da7872b2df0070ea3b56e4fef63192e35b go1.23.0.linux-armv6l.tar.gz +09448fedec0cdf98ad12397222e0c8bfc835b1d0894c0015ced653534b8d7427 go1.23.0.windows-386.zip +d4be481ef73079ee0ad46081d278923aa3fd78db1b3cf147172592f73e14c1ac go1.23.0.windows-amd64.zip diff --git a/pkg/repos/runtimes/golang/golang_test.go b/pkg/repos/runtimes/golang/golang_test.go index 56098a51..f3d888fd 100644 --- a/pkg/repos/runtimes/golang/golang_test.go +++ b/pkg/repos/runtimes/golang/golang_test.go @@ -25,7 +25,7 @@ func TestRuntime(t *testing.T) { os.RemoveAll("testdata/bin") }) r := Runtime{ - Version: "1.22.1", + Version: "1.23.0", } s, err := r.Setup(context.Background(), types.Tool{}, testCacheHome, "testdata", os.Environ()) diff --git a/pkg/repos/runtimes/golang/testdata/go.mod b/pkg/repos/runtimes/golang/testdata/go.mod index 93eac7fe..6725cbfd 100644 --- a/pkg/repos/runtimes/golang/testdata/go.mod +++ b/pkg/repos/runtimes/golang/testdata/go.mod @@ -1,3 +1,3 @@ module example.com -go 1.22.1 +go 1.23.0 diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index 01a752e6..aa57b059 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -136,7 +136,7 @@ func (r *Runtime) runNPM(ctx context.Context, tool types.Tool, toolSource, binDi if tool.WorkingDir == "" { return nil } - if _, err := os.Stat(filepath.Join(tool.WorkingDir, packageJSON)); errors.Is(fs.ErrNotExist, err) { + if _, err := os.Stat(filepath.Join(tool.WorkingDir, packageJSON)); errors.Is(err, fs.ErrNotExist) { return nil } else if err != nil { return err diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index e19f5708..4309bc28 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -271,7 +271,7 @@ func (s *server) parse(w http.ResponseWriter, r *http.Request) { } else { content, loadErr := input.FromLocation(reqObject.File, reqObject.DisableCache) if loadErr != nil { - logger.Errorf(loadErr.Error()) + logger.Errorf("failed to load file: %v", loadErr) writeError(logger, w, http.StatusInternalServerError, loadErr) return } From 538323b7e19ca4d5581272591bbd5dfb3469bc15 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 14 Aug 2024 14:33:53 -0700 Subject: [PATCH 104/270] chore: allow wildcard matching in metadata key names --- pkg/parser/parser.go | 14 +++++++++++++- pkg/parser/parser_test.go | 6 ++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index b57cb658..956822dd 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -4,6 +4,8 @@ import ( "bufio" "fmt" "io" + "maps" + "path" "regexp" "slices" "strconv" @@ -16,7 +18,7 @@ import ( var ( sepRegex = regexp.MustCompile(`^\s*---+\s*$`) strictSepRegex = regexp.MustCompile(`^---\n$`) - skipRegex = regexp.MustCompile(`^![-.:\w]+\s*$`) + skipRegex = regexp.MustCompile(`^![-.:*\w]+\s*$`) ) func normalize(key string) string { @@ -390,6 +392,16 @@ func assignMetadata(nodes []Node) (result []Node) { for _, node := range nodes { if node.ToolNode != nil { node.ToolNode.Tool.MetaData = metadata[node.ToolNode.Tool.Name] + for wildcard := range metadata { + if strings.Contains(wildcard, "*") { + if m, err := path.Match(wildcard, node.ToolNode.Tool.Name); m && err == nil { + if node.ToolNode.Tool.MetaData == nil { + node.ToolNode.Tool.MetaData = map[string]string{} + } + maps.Copy(node.ToolNode.Tool.MetaData, metadata[wildcard]) + } + } + } } result = append(result, node) } diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index 3967ebd5..f98b74e2 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -258,6 +258,11 @@ asdf2 --- !metadata:first:requirements.txt asdf + +--- +!metadata:f*r*:other + +foo bar ` tools, err := ParseTools(strings.NewReader(input)) require.NoError(t, err) @@ -266,5 +271,6 @@ asdf autogold.Expect(map[string]string{ "package.json": "foo=base\nf", "requirements.txt": "asdf", + "other": "foo bar", }).Equal(t, tools[0].MetaData) } From 91a5df67edbf3e526a1e5527c89395dce2afc779 Mon Sep 17 00:00:00 2001 From: Rinor Hoxha Date: Thu, 15 Aug 2024 04:24:22 +0200 Subject: [PATCH 105/270] fix(openapi): don't panic on requestBody content mime without schema (#797) --- pkg/loader/openapi.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index cf3c3f34..8dd83892 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -209,6 +209,11 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } bodyMIME = mime + // requestBody content mime without schema + if content == nil || content.Schema == nil { + continue + } + arg := content.Schema.Value if arg.Description == "" { arg.Description = content.Schema.Value.Description From 2e75740d300c7a8968292043ba3c42f3b507a3e3 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 15 Aug 2024 08:43:54 -0700 Subject: [PATCH 106/270] bug: fix default model provider --- pkg/remote/remote.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index baa54677..fa1d40c2 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -25,7 +25,6 @@ type Client struct { clientsLock sync.Mutex cache *cache.Client clients map[string]clientInfo - modelToProvider map[string]string runner *runner.Runner envs []string credStore credentials.CredentialStore @@ -39,17 +38,13 @@ func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credent envs: envs, credStore: credStore, defaultProvider: defaultProvider, - modelToProvider: make(map[string]string), clients: make(map[string]clientInfo), } } func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { - c.clientsLock.Lock() - provider, ok := c.modelToProvider[messageRequest.Model] - c.clientsLock.Unlock() - - if !ok { + _, provider := c.parseModel(messageRequest.Model) + if provider == "" { return nil, fmt.Errorf("failed to find remote model %s", messageRequest.Model) } @@ -108,10 +103,6 @@ func (c *Client) Supports(ctx context.Context, modelString string) (bool, error) return false, err } - c.clientsLock.Lock() - defer c.clientsLock.Unlock() - - c.modelToProvider[modelString] = providerName return true, nil } From 20c983ecf452cd9d65bc1a9ab51f1fb12d71d4fa Mon Sep 17 00:00:00 2001 From: John Engelman Date: Fri, 16 Aug 2024 08:41:58 -0500 Subject: [PATCH 107/270] fix: Append all credentials for OpenAPI security infos (#661) Co-authored-by: John Engelman --- pkg/loader/openapi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index 8dd83892..e62fc5ef 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -305,7 +305,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) if err != nil { return nil, fmt.Errorf("failed to parse operation server URL: %w", err) } - tool.Credentials = info.GetCredentialToolStrings(operationServerURL.Hostname()) + tool.Credentials = append(tool.Credentials, info.GetCredentialToolStrings(operationServerURL.Hostname())...) } } From 1124ed1c6952c2e8ff033a64a7b5dfc9fc877087 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 16 Aug 2024 16:34:48 -0400 Subject: [PATCH 108/270] feat: add ability to list models from other providers Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 41 ++++++++++++----------------------------- pkg/sdkserver/types.go | 4 +++- 2 files changed, 15 insertions(+), 30 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 4309bc28..6cb1e620 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -73,39 +73,13 @@ func (s *server) version(w http.ResponseWriter, r *http.Request) { // listTools will return the output of `gptscript --list-tools` func (s *server) listTools(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) - var prg types.Program - if r.ContentLength != 0 { - reqObject := new(toolOrFileRequest) - err := json.NewDecoder(r.Body).Decode(reqObject) - if err != nil { - writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) - return - } - - if reqObject.Content != "" { - prg, err = loader.ProgramFromSource(r.Context(), reqObject.Content, reqObject.SubTool, loader.Options{Cache: s.client.Cache}) - } else if reqObject.File != "" { - prg, err = loader.Program(r.Context(), reqObject.File, reqObject.SubTool, loader.Options{Cache: s.client.Cache}) - } else { - prg, err = loader.ProgramFromSource(r.Context(), reqObject.ToolDefs.String(), reqObject.SubTool, loader.Options{Cache: s.client.Cache}) - } - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) - return - } - } - - tools := s.client.ListTools(r.Context(), prg) + tools := s.client.ListTools(r.Context(), types.Program{}) sort.Slice(tools, func(i, j int) bool { return tools[i].Name < tools[j].Name }) lines := make([]string, 0, len(tools)) for _, tool := range tools { - if tool.Name == "" { - tool.Name = prg.Name - } - // Don't print instructions tool.Instructions = "" @@ -118,22 +92,31 @@ func (s *server) listTools(w http.ResponseWriter, r *http.Request) { // listModels will return the output of `gptscript --list-models` func (s *server) listModels(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) + client := s.client + var providers []string if r.ContentLength != 0 { reqObject := new(modelsRequest) - if err := json.NewDecoder(r.Body).Decode(reqObject); err != nil { + err := json.NewDecoder(r.Body).Decode(reqObject) + if err != nil { writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) return } providers = reqObject.Providers + + client, err = gptscript.New(r.Context(), s.gptscriptOpts, gptscript.Options{Env: reqObject.Env, Runner: runner.Options{CredentialOverrides: reqObject.CredentialOverrides}}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to create client: %w", err)) + return + } } if s.gptscriptOpts.DefaultModelProvider != "" { providers = append(providers, s.gptscriptOpts.DefaultModelProvider) } - out, err := s.client.ListModels(r.Context(), providers...) + out, err := client.ListModels(r.Context(), providers...) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to list models: %w", err)) return diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index b24ca645..e26bbba5 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -100,7 +100,9 @@ type parseRequest struct { } type modelsRequest struct { - Providers []string `json:"providers"` + Providers []string `json:"providers"` + Env []string `json:"env"` + CredentialOverrides []string `json:"credentialOverrides"` } type runInfo struct { From 1226edbeb364745fa656cfaff9ef864054452453 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 19 Aug 2024 13:18:53 -0400 Subject: [PATCH 109/270] fix: openapi: return validation errors to the LLM; improve confirmation prompt (#805) Signed-off-by: Grant Linville --- pkg/openapi/run.go | 3 ++- pkg/types/toolstring.go | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go index 2efc2309..6c7e4ca7 100644 --- a/pkg/openapi/run.go +++ b/pkg/openapi/run.go @@ -42,7 +42,8 @@ func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (s } if !validationResult.Valid() { - return "", false, fmt.Errorf("invalid arguments for operation %s: %s", operationID, validationResult.Errors()) + // We don't return an error here because we want the LLM to be able to maintain control and try again. + return fmt.Sprintf("invalid arguments for operation %s: %s", operationID, validationResult.Errors()), true, nil } // Construct and execute the HTTP request. diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 2be6d0fc..9d6d765b 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -3,6 +3,7 @@ package types import ( "encoding/json" "fmt" + "os" "path/filepath" "strings" ) @@ -76,6 +77,11 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { return fmt.Sprintf("Writing `%s`", args["filename"]), nil case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.current", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now", "sys.model.provider.credential": return "", nil + case "sys.openapi": + if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" && args["operation"] != "" { + return fmt.Sprintf("Running API operation `%s` with arguments %s", args["operation"], args["args"]), nil + } + fallthrough default: return "", fmt.Errorf("unknown tool for display string: %s", id) } From 2b18a1d1ca85fb9a9f3b3fb7741697ed8e7a9038 Mon Sep 17 00:00:00 2001 From: Atulpriya Sharma Date: Tue, 20 Aug 2024 12:56:28 +0530 Subject: [PATCH 110/270] Create gcp-assistant.gpt Adding gcp assistant gptscript. Signed-off-by: Atulpriya Sharma --- examples/gcp-assistant.gpt | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 examples/gcp-assistant.gpt diff --git a/examples/gcp-assistant.gpt b/examples/gcp-assistant.gpt new file mode 100644 index 00000000..29b682d0 --- /dev/null +++ b/examples/gcp-assistant.gpt @@ -0,0 +1,28 @@ +Name: GCP Assistant +Description: Agent to help you interact with Google Cloud +Context: learn-gcp, learn-kubectl +Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write +Chat:true +You are an assistant for Google Cloud Platform (GCP). +Rules +1. Use gcloud CLI to interact with GCP. +2. Assume the user is using Google cloud. + +--- +Name: learn-gcp +Description: A tool to help you learn gcp cli +#!/bin/bash +echo "Current gcloud config:" +gcloud config list || true +--- +Name: learn-kubectl +Description: A tool to help you learn k8s and related commands +#!/bin/bash + +CMDS="kubectl helm" +echo 'The additional CLI commands are available locally, use the `exec` tool to invoke them:' +for i in $CMDS; do + if [ -e "$(command -v $i)" ]; then + echo ' ' $i + fi +done From 5b7d8b75ffad76aed2a46402051f7d48bf166a40 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 20 Aug 2024 12:03:20 -0400 Subject: [PATCH 111/270] feat: add ability to include metadata with prompts Based on the metadata, the receiver of the prompt event would be able to handle the prompt in a different way, if needed. Signed-off-by: Donnie Adams --- pkg/builtin/builtin.go | 1 + pkg/prompt/prompt.go | 12 ++++++++---- pkg/sdkserver/prompt.go | 6 +----- pkg/types/prompt.go | 7 ++++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 23db5152..f972d14c 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -217,6 +217,7 @@ var tools = map[string]types.Tool{ "message", "The message to display to the user", "fields", "A comma-separated list of fields to prompt for", "sensitive", "(true or false) Whether the input should be hidden", + "metadata", "(optional) A JSON object of metadata to attach to the prompt", ), }, BuiltinFunc: prompt.SysPrompt, diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go index 44cb20f1..f91a04b6 100644 --- a/pkg/prompt/prompt.go +++ b/pkg/prompt/prompt.go @@ -51,25 +51,29 @@ func sysPromptHTTP(ctx context.Context, envs []string, url string, prompt types. func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string) (_ string, err error) { var params struct { - Message string `json:"message,omitempty"` - Fields string `json:"fields,omitempty"` - Sensitive string `json:"sensitive,omitempty"` + Message string `json:"message,omitempty"` + Fields string `json:"fields,omitempty"` + Sensitive string `json:"sensitive,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` } if err := json.Unmarshal([]byte(input), ¶ms); err != nil { return "", err } + var fields []string for _, env := range envs { if url, ok := strings.CutPrefix(env, types.PromptURLEnvVar+"="); ok { - var fields []string if params.Fields != "" { fields = strings.Split(params.Fields, ",") } + httpPrompt := types.Prompt{ Message: params.Message, Fields: fields, Sensitive: params.Sensitive == "true", + Metadata: params.Metadata, } + return sysPromptHTTP(ctx, envs, url, httpPrompt) } } diff --git a/pkg/sdkserver/prompt.go b/pkg/sdkserver/prompt.go index 8d34fc53..a519f7b2 100644 --- a/pkg/sdkserver/prompt.go +++ b/pkg/sdkserver/prompt.go @@ -76,11 +76,7 @@ func (s *server) prompt(w http.ResponseWriter, r *http.Request) { }(id) s.events.C <- event{ - Prompt: types.Prompt{ - Message: prompt.Message, - Fields: prompt.Fields, - Sensitive: prompt.Sensitive, - }, + Prompt: prompt, Event: gserver.Event{ RunID: id, Event: runner.Event{ diff --git a/pkg/types/prompt.go b/pkg/types/prompt.go index ea17c11c..653ad066 100644 --- a/pkg/types/prompt.go +++ b/pkg/types/prompt.go @@ -6,7 +6,8 @@ const ( ) type Prompt struct { - Message string `json:"message,omitempty"` - Fields []string `json:"fields,omitempty"` - Sensitive bool `json:"sensitive,omitempty"` + Message string `json:"message,omitempty"` + Fields []string `json:"fields,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` } From a387cd8f695f8b0a400376088daf353604620d2a Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 20 Aug 2024 12:18:59 -0400 Subject: [PATCH 112/270] enhance: pretty-print JSON arguments for OpenAPI operations (#808) Signed-off-by: Grant Linville --- pkg/types/toolstring.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 9d6d765b..086ad043 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -79,7 +79,16 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { return "", nil case "sys.openapi": if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" && args["operation"] != "" { - return fmt.Sprintf("Running API operation `%s` with arguments %s", args["operation"], args["args"]), nil + // Pretty print the JSON by unmarshaling and marshaling it + var jsonArgs map[string]any + if err := json.Unmarshal([]byte(args["args"]), &jsonArgs); err != nil { + return "", err + } + jsonPretty, err := json.MarshalIndent(jsonArgs, "", " ") + if err != nil { + return "", err + } + return fmt.Sprintf("Running API operation `%s` with arguments %s", args["operation"], string(jsonPretty)), nil } fallthrough default: From 1202543787fe8cef7aff6bc48cdfa1ad13700c1e Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 20 Aug 2024 21:46:30 -0400 Subject: [PATCH 113/270] fix: include proper input on call events Signed-off-by: Donnie Adams --- pkg/runner/runner.go | 5 +++++ pkg/sdkserver/monitor.go | 5 +---- pkg/sdkserver/types.go | 7 +++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index f92b0705..93e40670 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -628,11 +628,16 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s } } + var content string + if state.ResumeInput != nil { + content = *state.ResumeInput + } monitor.Event(Event{ Time: time.Now(), CallContext: callCtx.GetCallContext(), Type: EventTypeCallContinue, ToolResults: len(callResults), + Content: content, }) e := engine.Engine{ diff --git a/pkg/sdkserver/monitor.go b/pkg/sdkserver/monitor.go index a5b0236b..bdd88c67 100644 --- a/pkg/sdkserver/monitor.go +++ b/pkg/sdkserver/monitor.go @@ -33,6 +33,7 @@ func (s SessionFactory) Start(ctx context.Context, prg *types.Program, env []str Time: time.Now(), Type: runner.EventTypeRunStart, }, + Input: input, RunID: id, Program: prg, }, @@ -43,7 +44,6 @@ func (s SessionFactory) Start(ctx context.Context, prg *types.Program, env []str id: id, prj: prg, env: env, - input: input, events: s.events, }, nil } @@ -56,7 +56,6 @@ type Session struct { id string prj *types.Program env []string - input string events *broadcaster.Broadcaster[event] runLock sync.Mutex } @@ -68,7 +67,6 @@ func (s *Session) Event(e runner.Event) { Event: gserver.Event{ Event: e, RunID: s.id, - Input: s.input, }, } } @@ -87,7 +85,6 @@ func (s *Session) Stop(ctx context.Context, output string, err error) { Type: runner.EventTypeRunFinish, }, RunID: s.id, - Input: s.input, Output: output, }, } diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index e26bbba5..2889626b 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -144,6 +144,7 @@ func (r *runInfo) process(e event) map[string]any { r.Start = e.Time r.Program = *e.Program r.State = Running + r.Input = e.Input case runner.EventTypeRunFinish: r.End = e.Time r.Output = e.Output @@ -167,9 +168,11 @@ func (r *runInfo) process(e event) map[string]any { call.Type = e.Type switch e.Type { - case runner.EventTypeCallStart: + case runner.EventTypeCallStart, runner.EventTypeCallContinue: call.Start = e.Time - call.Input = e.Content + if e.Content != "" { + call.Input = e.Content + } case runner.EventTypeCallSubCalls: call.setSubCalls(e.ToolSubCalls) From c516f7b5a2b956312d3fd2305b2af5cc8e673e14 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 21 Aug 2024 13:35:35 -0400 Subject: [PATCH 114/270] fix: remove config file location from the config (#813) Signed-off-by: Grant Linville --- pkg/config/cliconfig.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index 7970415f..43649135 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -52,14 +52,14 @@ func (a *AuthConfig) UnmarshalJSON(data []byte) error { } type CLIConfig struct { - Auths map[string]AuthConfig `json:"auths,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - GPTScriptConfigFile string `json:"gptscriptConfig,omitempty"` - GatewayURL string `json:"gatewayURL,omitempty"` - Integrations map[string]string `json:"integrations,omitempty"` + Auths map[string]AuthConfig `json:"auths,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + GatewayURL string `json:"gatewayURL,omitempty"` + Integrations map[string]string `json:"integrations,omitempty"` auths map[string]types.AuthConfig authsLock *sync.Mutex + location string } func (c *CLIConfig) Sanitize() *CLIConfig { @@ -93,7 +93,7 @@ func (c *CLIConfig) Save() error { if err != nil { return err } - return os.WriteFile(c.GPTScriptConfigFile, data, 0655) + return os.WriteFile(c.location, data, 0655) } func (c *CLIConfig) GetAuthConfigs() map[string]types.AuthConfig { @@ -113,7 +113,7 @@ func (c *CLIConfig) GetAuthConfigs() map[string]types.AuthConfig { } func (c *CLIConfig) GetFilename() string { - return c.GPTScriptConfigFile + return c.location } func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { @@ -133,8 +133,8 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { return nil, err } result := &CLIConfig{ - authsLock: &sync.Mutex{}, - GPTScriptConfigFile: gptscriptConfigFile, + authsLock: &sync.Mutex{}, + location: gptscriptConfigFile, } if err := json.Unmarshal(data, result); err != nil { return nil, err @@ -158,7 +158,7 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { default: errMsg += " (use 'file')" } - errMsg += fmt.Sprintf("\nPlease edit your config file at %s to fix this.", result.GPTScriptConfigFile) + errMsg += fmt.Sprintf("\nPlease edit your config file at %s to fix this.", result.location) return nil, errors.New(errMsg) } From 83622e6669d52b019a9d5d5af646ccfe4362cc15 Mon Sep 17 00:00:00 2001 From: Bill Maxwell Date: Wed, 21 Aug 2024 09:24:04 -0700 Subject: [PATCH 115/270] chore: add content length when handling json request bodies. Signed-off-by: Bill Maxwell --- pkg/openapi/run.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go index 6c7e4ca7..fb3b746c 100644 --- a/pkg/openapi/run.go +++ b/pkg/openapi/run.go @@ -107,6 +107,7 @@ func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (s return "", false, fmt.Errorf("failed to encode JSON: %w", err) } req.Header.Set("Content-Type", "application/json") + req.ContentLength = int64(body.Len()) case "text/plain": reqBody := "" From 979908c5e3c5d24200565962d0a0647fb1934cd3 Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Thu, 22 Aug 2024 18:04:51 -0700 Subject: [PATCH 116/270] Fix: Do not return full env map after downloading releases Signed-off-by: Daishan Peng --- pkg/repos/runtimes/golang/golang.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 52e8fe0b..2601f521 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -244,7 +244,7 @@ func getChecksum(ctx context.Context, rel *release, artifactName string) string return "" } -func (r *Runtime) Binary(ctx context.Context, tool types.Tool, _, toolSource string, env []string) (bool, []string, error) { +func (r *Runtime) Binary(ctx context.Context, tool types.Tool, _, toolSource string, _ []string) (bool, []string, error) { if !tool.Source.IsGit() { return false, nil, nil } @@ -264,7 +264,7 @@ func (r *Runtime) Binary(ctx context.Context, tool types.Tool, _, toolSource str return false, nil, nil } - return true, env, nil + return true, nil, nil } func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, toolSource string, env []string) ([]string, error) { From ed94de0067fa2a59d948fa73ed26799a48f3662e Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 27 Aug 2024 09:27:05 -0700 Subject: [PATCH 117/270] chore: dynamically update tools and other tool params on subsequent chats --- pkg/engine/engine.go | 49 ++++++---- pkg/tests/runner_test.go | 31 ++++++ .../TestToolsChange/call1-resp.golden | 9 ++ .../testdata/TestToolsChange/call1.golden | 70 ++++++++++++++ .../TestToolsChange/call2-resp.golden | 9 ++ .../testdata/TestToolsChange/call2.golden | 73 ++++++++++++++ .../testdata/TestToolsChange/step1.golden | 93 ++++++++++++++++++ .../testdata/TestToolsChange/step2.golden | 96 +++++++++++++++++++ 8 files changed, 410 insertions(+), 20 deletions(-) create mode 100644 pkg/tests/testdata/TestToolsChange/call1-resp.golden create mode 100644 pkg/tests/testdata/TestToolsChange/call1.golden create mode 100644 pkg/tests/testdata/TestToolsChange/call2-resp.golden create mode 100644 pkg/tests/testdata/TestToolsChange/call2.golden create mode 100644 pkg/tests/testdata/TestToolsChange/step1.golden create mode 100644 pkg/tests/testdata/TestToolsChange/step2.golden diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index f8fd8154..14b75e0a 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -258,6 +258,29 @@ func (c *Context) WrappedContext(e *Engine) context.Context { return context.WithValue(c.Ctx, engineContext{}, &cp) } +func populateMessageParams(ctx Context, completion *types.CompletionRequest, tool types.Tool) error { + completion.Model = tool.Parameters.ModelName + completion.MaxTokens = tool.Parameters.MaxTokens + completion.JSONResponse = tool.Parameters.JSONResponse + completion.Cache = tool.Parameters.Cache + completion.Chat = tool.Parameters.Chat + completion.Temperature = tool.Parameters.Temperature + completion.InternalSystemPrompt = tool.Parameters.InternalPrompt + + if tool.Chat && completion.InternalSystemPrompt == nil { + completion.InternalSystemPrompt = new(bool) + } + + var err error + completion.Tools, err = tool.GetCompletionTools(*ctx.Program, ctx.AgentGroup...) + if err != nil { + return err + } + + completion.Messages = addUpdateSystem(ctx, tool, completion.Messages) + return nil +} + func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { tool := ctx.Tool @@ -290,28 +313,11 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { return nil, fmt.Errorf("credential tools cannot make calls to the LLM") } - completion := types.CompletionRequest{ - Model: tool.Parameters.ModelName, - MaxTokens: tool.Parameters.MaxTokens, - JSONResponse: tool.Parameters.JSONResponse, - Cache: tool.Parameters.Cache, - Chat: tool.Parameters.Chat, - Temperature: tool.Parameters.Temperature, - InternalSystemPrompt: tool.Parameters.InternalPrompt, - } - - if tool.Chat && completion.InternalSystemPrompt == nil { - completion.InternalSystemPrompt = new(bool) - } - - var err error - completion.Tools, err = tool.GetCompletionTools(*ctx.Program, ctx.AgentGroup...) - if err != nil { + var completion types.CompletionRequest + if err := populateMessageParams(ctx, &completion, tool); err != nil { return nil, err } - completion.Messages = addUpdateSystem(ctx, tool, completion.Messages) - if tool.Chat && input == "{}" { input = "" } @@ -497,6 +503,9 @@ func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (*Re return nil, fmt.Errorf("invalid continue call, no completion needed") } - state.Completion.Messages = addUpdateSystem(ctx, ctx.Tool, state.Completion.Messages) + if err := populateMessageParams(ctx, &state.Completion, ctx.Tool); err != nil { + return nil, err + } + return e.complete(ctx.Ctx, state) } diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 141e6aff..483b5b6f 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/loader" "github.com/gptscript-ai/gptscript/pkg/tests/tester" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" @@ -1041,3 +1042,33 @@ func TestRuntimesLocalDev(t *testing.T) { _ = os.RemoveAll("testdata/TestRuntimesLocalDev/node_modules") _ = os.RemoveAll("testdata/TestRuntimesLocalDev/package-lock.json") } + +func TestToolsChange(t *testing.T) { + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +chat: true +tools: sys.ls, sys.read, sys.write +`, "") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 1").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) + + prg, err = loader.ProgramFromSource(context.Background(), ` +chat: true +temperature: 0.6 +tools: sys.ls, sys.write +`, "") + require.NoError(t, err) + + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2") + require.NoError(t, err) + r.AssertResponded(t) + assert.False(t, resp.Done) + autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp.Content) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) +} diff --git a/pkg/tests/testdata/TestToolsChange/call1-resp.golden b/pkg/tests/testdata/TestToolsChange/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestToolsChange/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestToolsChange/call1.golden b/pkg/tests/testdata/TestToolsChange/call1.golden new file mode 100644 index 00000000..6c7c2d55 --- /dev/null +++ b/pkg/tests/testdata/TestToolsChange/call1.golden @@ -0,0 +1,70 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "sys.ls", + "name": "ls", + "description": "Lists the contents of a directory", + "parameters": { + "properties": { + "dir": { + "description": "The directory to list", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "sys.read", + "name": "read", + "description": "Reads the contents of a file", + "parameters": { + "properties": { + "filename": { + "description": "The name of the file to read", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "sys.write", + "name": "write", + "description": "Write the contents to a file", + "parameters": { + "properties": { + "content": { + "description": "The content to write", + "type": "string" + }, + "filename": { + "description": "The name of the file to write to", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestToolsChange/call2-resp.golden b/pkg/tests/testdata/TestToolsChange/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestToolsChange/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestToolsChange/call2.golden b/pkg/tests/testdata/TestToolsChange/call2.golden new file mode 100644 index 00000000..ad86b7ce --- /dev/null +++ b/pkg/tests/testdata/TestToolsChange/call2.golden @@ -0,0 +1,73 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "sys.ls", + "name": "ls", + "description": "Lists the contents of a directory", + "parameters": { + "properties": { + "dir": { + "description": "The directory to list", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "sys.write", + "name": "write", + "description": "Write the contents to a file", + "parameters": { + "properties": { + "content": { + "description": "The content to write", + "type": "string" + }, + "filename": { + "description": "The name of the file to write to", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 2" + } + ], + "usage": {} + } + ], + "chat": true, + "temperature": 0.6 +}` diff --git a/pkg/tests/testdata/TestToolsChange/step1.golden b/pkg/tests/testdata/TestToolsChange/step1.golden new file mode 100644 index 00000000..1aae05d1 --- /dev/null +++ b/pkg/tests/testdata/TestToolsChange/step1.golden @@ -0,0 +1,93 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "sys.ls", + "name": "ls", + "description": "Lists the contents of a directory", + "parameters": { + "properties": { + "dir": { + "description": "The directory to list", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "sys.read", + "name": "read", + "description": "Reads the contents of a file", + "parameters": { + "properties": { + "filename": { + "description": "The name of the file to read", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "sys.write", + "name": "write", + "description": "Write the contents to a file", + "parameters": { + "properties": { + "content": { + "description": "The content to write", + "type": "string" + }, + "filename": { + "description": "The name of the file to write to", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "inline:" + } +}` diff --git a/pkg/tests/testdata/TestToolsChange/step2.golden b/pkg/tests/testdata/TestToolsChange/step2.golden new file mode 100644 index 00000000..9c9dbad7 --- /dev/null +++ b/pkg/tests/testdata/TestToolsChange/step2.golden @@ -0,0 +1,96 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "tools": [ + { + "function": { + "toolID": "sys.ls", + "name": "ls", + "description": "Lists the contents of a directory", + "parameters": { + "properties": { + "dir": { + "description": "The directory to list", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "sys.write", + "name": "write", + "description": "Write the contents to a file", + "parameters": { + "properties": { + "content": { + "description": "The content to write", + "type": "string" + }, + "filename": { + "description": "The name of the file to write to", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 2" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ], + "chat": true, + "temperature": 0.6 + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "inline:" + } +}` From 04dd074694d7928ecbb124635e2ea5ccad77f0a6 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 27 Aug 2024 09:52:59 -0700 Subject: [PATCH 118/270] chore: add with * syntax to context tools Basic example chat: true context: foo with * Say hi --- name: foo #!/bin/bash echo This is the input: ${GPTSCRIPT_INPUT} --- pkg/runner/runner.go | 14 +++- pkg/tests/runner2_test.go | 34 ++++++++++ .../TestContextWithAsterick/call1-resp.golden | 9 +++ .../TestContextWithAsterick/call1.golden | 25 +++++++ .../TestContextWithAsterick/call2-resp.golden | 9 +++ .../TestContextWithAsterick/call2.golden | 43 ++++++++++++ .../TestContextWithAsterick/step1.golden | 48 ++++++++++++++ .../TestContextWithAsterick/step2.golden | 66 +++++++++++++++++++ pkg/tests/tester/runner.go | 18 ++++- 9 files changed, 262 insertions(+), 4 deletions(-) create mode 100644 pkg/tests/runner2_test.go create mode 100644 pkg/tests/testdata/TestContextWithAsterick/call1-resp.golden create mode 100644 pkg/tests/testdata/TestContextWithAsterick/call1.golden create mode 100644 pkg/tests/testdata/TestContextWithAsterick/call2-resp.golden create mode 100644 pkg/tests/testdata/TestContextWithAsterick/call2.golden create mode 100644 pkg/tests/testdata/TestContextWithAsterick/step1.golden create mode 100644 pkg/tests/testdata/TestContextWithAsterick/step2.golden diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 93e40670..30737a4c 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -260,6 +260,10 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) targetArgs := prg.ToolSet[ref.ToolID].Arguments targetKeys := map[string]string{} + if ref.Arg == "*" { + return input, nil + } + if targetArgs == nil { return "", nil } @@ -647,13 +651,17 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s Env: env, } - var contentInput string + var contextInput string if state.Continuation != nil && state.Continuation.State != nil { - contentInput = state.Continuation.State.Input + contextInput = state.Continuation.State.Input + } + + if state.ResumeInput != nil { + contextInput = *state.ResumeInput } - callCtx.InputContext, state, err = r.getContext(callCtx, state, monitor, env, contentInput) + callCtx.InputContext, state, err = r.getContext(callCtx, state, monitor, env, contextInput) if err != nil || state.InputContextContinuation != nil { return state, err } diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go new file mode 100644 index 00000000..12ac4fa0 --- /dev/null +++ b/pkg/tests/runner2_test.go @@ -0,0 +1,34 @@ +package tests + +import ( + "context" + "testing" + + "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/tests/tester" + "github.com/stretchr/testify/require" +) + +func TestContextWithAsterick(t *testing.T) { + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +chat: true +context: foo with * + +Say hi + +--- +name: foo + +#!/bin/bash + +echo This is the input: ${GPTSCRIPT_INPUT} +`, "") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + r.AssertStep(t, resp, err) + + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2") + r.AssertStep(t, resp, err) +} diff --git a/pkg/tests/testdata/TestContextWithAsterick/call1-resp.golden b/pkg/tests/testdata/TestContextWithAsterick/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestContextWithAsterick/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestContextWithAsterick/call1.golden b/pkg/tests/testdata/TestContextWithAsterick/call1.golden new file mode 100644 index 00000000..6d9538ce --- /dev/null +++ b/pkg/tests/testdata/TestContextWithAsterick/call1.golden @@ -0,0 +1,25 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "This is the input: input 1\n\nSay hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestContextWithAsterick/call2-resp.golden b/pkg/tests/testdata/TestContextWithAsterick/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestContextWithAsterick/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestContextWithAsterick/call2.golden b/pkg/tests/testdata/TestContextWithAsterick/call2.golden new file mode 100644 index 00000000..f159014c --- /dev/null +++ b/pkg/tests/testdata/TestContextWithAsterick/call2.golden @@ -0,0 +1,43 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "This is the input: input 2\n\nSay hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 2" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestContextWithAsterick/step1.golden b/pkg/tests/testdata/TestContextWithAsterick/step1.golden new file mode 100644 index 00000000..cc42e6a6 --- /dev/null +++ b/pkg/tests/testdata/TestContextWithAsterick/step1.golden @@ -0,0 +1,48 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "This is the input: input 1\n\nSay hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "inline:" + } +}` diff --git a/pkg/tests/testdata/TestContextWithAsterick/step2.golden b/pkg/tests/testdata/TestContextWithAsterick/step2.golden new file mode 100644 index 00000000..02bc92fe --- /dev/null +++ b/pkg/tests/testdata/TestContextWithAsterick/step2.golden @@ -0,0 +1,66 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "This is the input: input 2\n\nSay hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 2" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "inline:" + } +}` diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index 66337ff5..b460ce18 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -135,7 +135,8 @@ func (c *Client) Call(_ context.Context, messageRequest types.CompletionRequest, type Runner struct { *runner.Runner - Client *Client + Client *Client + StepAsserted int } func (r *Runner) RunDefault() string { @@ -166,6 +167,21 @@ func (r *Runner) AssertResponded(t *testing.T) { require.Len(t, r.Client.result, 0) } +func toJSONString(t *testing.T, v interface{}) string { + t.Helper() + x, err := json.MarshalIndent(v, "", " ") + require.NoError(t, err) + return string(x) +} + +func (r *Runner) AssertStep(t *testing.T, resp runner.ChatResponse, err error) { + t.Helper() + r.StepAsserted++ + require.NoError(t, err) + r.AssertResponded(t) + autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+fmt.Sprintf("/step%d", r.StepAsserted))) +} + func (r *Runner) RespondWith(result ...Result) { r.Client.result = append(r.Client.result, result...) } From 1c75954594d59abe8ae371bc7c96ce953eec4101 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 27 Aug 2024 10:30:30 -0700 Subject: [PATCH 119/270] chore: drop input continuations feature --- pkg/runner/runner.go | 97 ++------- pkg/tests/runner_test.go | 78 +------- .../TestContextSubChat/call10-resp.golden | 9 - .../testdata/TestContextSubChat/call10.golden | 43 ---- .../TestContextSubChat/call3-resp.golden | 16 -- .../testdata/TestContextSubChat/call3.golden | 61 ------ .../TestContextSubChat/call4-resp.golden | 9 - .../testdata/TestContextSubChat/call4.golden | 64 ------ .../TestContextSubChat/call5-resp.golden | 9 - .../testdata/TestContextSubChat/call5.golden | 25 --- .../TestContextSubChat/call6-resp.golden | 16 -- .../testdata/TestContextSubChat/call6.golden | 31 --- .../TestContextSubChat/call7-resp.golden | 9 - .../testdata/TestContextSubChat/call7.golden | 43 ---- .../TestContextSubChat/call8-resp.golden | 16 -- .../testdata/TestContextSubChat/call8.golden | 61 ------ .../TestContextSubChat/call9-resp.golden | 9 - .../testdata/TestContextSubChat/call9.golden | 64 ------ .../testdata/TestContextSubChat/step1.golden | 146 -------------- .../testdata/TestContextSubChat/step2.golden | 48 ----- .../testdata/TestContextSubChat/step3.golden | 188 ------------------ .../testdata/TestContextSubChat/step4.golden | 66 ------ 22 files changed, 20 insertions(+), 1088 deletions(-) delete mode 100644 pkg/tests/testdata/TestContextSubChat/call10-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call10.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call3-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call3.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call4-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call4.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call5-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call5.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call6-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call6.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call7-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call7.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call8-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call8.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call9-resp.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/call9.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/step1.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/step2.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/step3.golden delete mode 100644 pkg/tests/testdata/TestContextSubChat/step4.golden diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 30737a4c..3035a1d1 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -172,11 +172,7 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra return resp, err } - if state == nil || state.StartContinuation { - if state != nil { - state = state.WithResumeInput(&input) - input = state.InputContextContinuationInput - } + if state == nil { state, err = r.start(callCtx, state, monitor, env, input) if err != nil { return resp, err @@ -186,11 +182,9 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra state.ResumeInput = &input } - if !state.StartContinuation { - state, err = r.resume(callCtx, monitor, env, state) - if err != nil { - return resp, err - } + state, err = r.resume(callCtx, monitor, env, state) + if err != nil { + return resp, err } if state.Result != nil { @@ -335,24 +329,10 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) return string(output), err } -func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monitor, env []string, input string) (result []engine.InputContext, _ *State, _ error) { +func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monitor, env []string, input string) (result []engine.InputContext, _ error) { toolRefs, err := callCtx.Tool.GetContextTools(*callCtx.Program) if err != nil { - return nil, nil, err - } - - var newState *State - if state != nil { - cp := *state - newState = &cp - if newState.InputContextContinuation != nil { - newState.InputContexts = nil - newState.InputContextContinuation = nil - newState.InputContextContinuationInput = "" - newState.ResumeInput = state.InputContextContinuationResumeInput - - input = state.InputContextContinuationInput - } + return nil, err } for i, toolRef := range toolRefs { @@ -363,29 +343,16 @@ func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monito contextInput, err := getToolRefInput(callCtx.Program, toolRef, input) if err != nil { - return nil, nil, err + return nil, err } var content *State - if state != nil && state.InputContextContinuation != nil { - content, err = r.subCallResume(callCtx.Ctx, callCtx, monitor, env, toolRef.ToolID, "", state.InputContextContinuation.WithResumeInput(state.ResumeInput), engine.ContextToolCategory) - } else { - content, err = r.subCall(callCtx.Ctx, callCtx, monitor, env, toolRef.ToolID, contextInput, "", engine.ContextToolCategory) - } + content, err = r.subCall(callCtx.Ctx, callCtx, monitor, env, toolRef.ToolID, contextInput, "", engine.ContextToolCategory) if err != nil { - return nil, nil, err + return nil, err } if content.Continuation != nil { - if newState == nil { - newState = &State{} - } - newState.InputContexts = result - newState.InputContextContinuation = content - newState.InputContextContinuationInput = input - if state != nil { - newState.InputContextContinuationResumeInput = state.ResumeInput - } - return nil, newState, nil + return nil, fmt.Errorf("invalid state: context tool [%s] can not result in a continuation", toolRef.ToolID) } result = append(result, engine.InputContext{ ToolID: toolRef.ToolID, @@ -393,7 +360,7 @@ func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monito }) } - return result, newState, nil + return result, nil } func (r *Runner) call(callCtx engine.Context, monitor Monitor, env []string, input string) (*State, error) { @@ -401,9 +368,6 @@ func (r *Runner) call(callCtx engine.Context, monitor Monitor, env []string, inp if err != nil { return nil, err } - if result.StartContinuation { - return result, nil - } return r.resume(callCtx, monitor, env, result) } @@ -435,15 +399,10 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en } } - var newState *State - callCtx.InputContext, newState, err = r.getContext(callCtx, state, monitor, env, input) + callCtx.InputContext, err = r.getContext(callCtx, state, monitor, env, input) if err != nil { return nil, err } - if newState != nil && newState.InputContextContinuation != nil { - newState.StartContinuation = true - return newState, nil - } e := engine.Engine{ Model: r.c, @@ -493,11 +452,7 @@ type State struct { SubCalls []SubCallResult `json:"subCalls,omitempty"` SubCallID string `json:"subCallID,omitempty"` - InputContexts []engine.InputContext `json:"inputContexts,omitempty"` - InputContextContinuation *State `json:"inputContextContinuation,omitempty"` - InputContextContinuationInput string `json:"inputContextContinuationInput,omitempty"` - InputContextContinuationResumeInput *string `json:"inputContextContinuationResumeInput,omitempty"` - StartContinuation bool `json:"startContinuation,omitempty"` + InputContexts []engine.InputContext `json:"inputContexts,omitempty"` } func (s State) WithResumeInput(input *string) *State { @@ -510,10 +465,6 @@ func (s State) ContinuationContentToolID() (string, error) { return s.ContinuationToolID, nil } - if s.InputContextContinuation != nil { - return s.InputContextContinuation.ContinuationContentToolID() - } - for _, subCall := range s.SubCalls { if s.SubCallID == subCall.CallID { return subCall.State.ContinuationContentToolID() @@ -527,10 +478,6 @@ func (s State) ContinuationContent() (string, error) { return *s.Continuation.Result, nil } - if s.InputContextContinuation != nil { - return s.InputContextContinuation.ContinuationContent() - } - for _, subCall := range s.SubCalls { if s.SubCallID == subCall.CallID { return subCall.State.ContinuationContent() @@ -549,10 +496,6 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s retState, retErr = r.handleOutput(callCtx, monitor, env, retState, retErr) }() - if state.StartContinuation { - return nil, fmt.Errorf("invalid state, resume should not have StartContinuation set to true") - } - if state.Continuation == nil { return nil, errors.New("invalid state, resume should have Continuation data") } @@ -651,18 +594,18 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s Env: env, } - var contextInput string + var contentInput string if state.Continuation != nil && state.Continuation.State != nil { - contextInput = state.Continuation.State.Input + contentInput = state.Continuation.State.Input } if state.ResumeInput != nil { - contextInput = *state.ResumeInput + contentInput = *state.ResumeInput } - callCtx.InputContext, state, err = r.getContext(callCtx, state, monitor, env, contextInput) - if err != nil || state.InputContextContinuation != nil { + callCtx.InputContext, err = r.getContext(callCtx, state, monitor, env, contentInput) + if err != nil { return state, err } @@ -772,10 +715,6 @@ func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, callCtx.LastReturn = state.Continuation } - if state.InputContextContinuation != nil { - return state, nil, nil - } - if state.SubCallID != "" { if state.ResumeInput == nil { return nil, nil, fmt.Errorf("invalid state, input must be set for sub call continuation on callID [%s]", state.SubCallID) diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 141e6aff..f21f936e 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -212,82 +212,8 @@ func TestContextSubChat(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "User 1") - require.NoError(t, err) - r.AssertResponded(t) - assert.False(t, resp.Done) - autogold.Expect("Assistant Response 1 - from chatbot1").Equal(t, resp.Content) - autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) - - r.RespondWith(tester.Result{ - Content: []types.ContentPart{ - { - ToolCall: &types.CompletionToolCall{ - ID: "call_2", - Function: types.CompletionFunctionCall{ - Name: types.ToolNormalizer("sys.chat.finish"), - Arguments: "Response from context chatbot", - }, - }, - }, - }, - }, tester.Result{ - Text: "Assistant Response 2 - from context tool", - }, tester.Result{ - Text: "Assistant Response 3 - from main chat tool", - }) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 2") - require.NoError(t, err) - r.AssertResponded(t) - assert.False(t, resp.Done) - autogold.Expect("Assistant Response 3 - from main chat tool").Equal(t, resp.Content) - autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) - - r.RespondWith(tester.Result{ - Content: []types.ContentPart{ - { - ToolCall: &types.CompletionToolCall{ - ID: "call_3", - Function: types.CompletionFunctionCall{ - Name: "chatbot", - Arguments: "Input to chatbot1 on resume", - }, - }, - }, - }, - }, tester.Result{ - Text: "Assistant Response 4 - from chatbot1", - }) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 3") - require.NoError(t, err) - r.AssertResponded(t) - assert.False(t, resp.Done) - autogold.Expect("Assistant Response 3 - from main chat tool").Equal(t, resp.Content) - autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step3")) - - r.RespondWith(tester.Result{ - Content: []types.ContentPart{ - { - ToolCall: &types.CompletionToolCall{ - ID: "call_4", - Function: types.CompletionFunctionCall{ - Name: types.ToolNormalizer("sys.chat.finish"), - Arguments: "Response from context chatbot after resume", - }, - }, - }, - }, - }, tester.Result{ - Text: "Assistant Response 5 - from context tool resume", - }, tester.Result{ - Text: "Assistant Response 6 - from main chat tool resume", - }) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 4") - require.NoError(t, err) - r.AssertResponded(t) - assert.False(t, resp.Done) - autogold.Expect("Assistant Response 6 - from main chat tool resume").Equal(t, resp.Content) - autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step4")) + _, err = r.Chat(context.Background(), nil, prg, os.Environ(), "User 1") + autogold.Expect("invalid state: context tool [testdata/TestContextSubChat/test.gpt:subtool] can not result in a continuation").Equal(t, err.Error()) } func TestSubChat(t *testing.T) { diff --git a/pkg/tests/testdata/TestContextSubChat/call10-resp.golden b/pkg/tests/testdata/TestContextSubChat/call10-resp.golden deleted file mode 100644 index 144ca8d9..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call10-resp.golden +++ /dev/null @@ -1,9 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "text": "Assistant Response 6 - from main chat tool resume" - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call10.golden b/pkg/tests/testdata/TestContextSubChat/call10.golden deleted file mode 100644 index c8c98651..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call10.golden +++ /dev/null @@ -1,43 +0,0 @@ -`{ - "model": "gpt-4o", - "internalSystemPrompt": false, - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Assistant Response 5 - from context tool resume\nHello" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 1" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 3 - from main chat tool" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 3" - } - ], - "usage": {} - } - ], - "chat": true -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call3-resp.golden b/pkg/tests/testdata/TestContextSubChat/call3-resp.golden deleted file mode 100644 index b116d066..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call3-resp.golden +++ /dev/null @@ -1,16 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_2", - "function": { - "name": "chatFinish", - "arguments": "Response from context chatbot" - } - } - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call3.golden b/pkg/tests/testdata/TestContextSubChat/call3.golden deleted file mode 100644 index 55ad402f..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call3.golden +++ /dev/null @@ -1,61 +0,0 @@ -`{ - "model": "gpt-4o", - "internalSystemPrompt": false, - "tools": [ - { - "function": { - "toolID": "sys.chat.finish", - "name": "chatFinish", - "description": "Concludes the conversation. This can not be used to ask a question.", - "parameters": { - "properties": { - "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "This is a chatbot" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "Input to chatbot1" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 1 - from chatbot1" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 1" - } - ], - "usage": {} - } - ], - "chat": true -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call4-resp.golden b/pkg/tests/testdata/TestContextSubChat/call4-resp.golden deleted file mode 100644 index a86ae187..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call4-resp.golden +++ /dev/null @@ -1,9 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "text": "Assistant Response 2 - from context tool" - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call4.golden b/pkg/tests/testdata/TestContextSubChat/call4.golden deleted file mode 100644 index e1fb91ea..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call4.golden +++ /dev/null @@ -1,64 +0,0 @@ -`{ - "model": "gpt-4o", - "tools": [ - { - "function": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "name": "chatbot", - "parameters": { - "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Call chatbot" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_1", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1" - } - } - } - ], - "usage": {} - }, - { - "role": "tool", - "content": [ - { - "text": "Response from context chatbot" - } - ], - "toolCall": { - "index": 0, - "id": "call_1", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1" - } - }, - "usage": {} - } - ] -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call5-resp.golden b/pkg/tests/testdata/TestContextSubChat/call5-resp.golden deleted file mode 100644 index e49a8481..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call5-resp.golden +++ /dev/null @@ -1,9 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "text": "Assistant Response 3 - from main chat tool" - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call5.golden b/pkg/tests/testdata/TestContextSubChat/call5.golden deleted file mode 100644 index 2b8cf41e..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call5.golden +++ /dev/null @@ -1,25 +0,0 @@ -`{ - "model": "gpt-4o", - "internalSystemPrompt": false, - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Assistant Response 2 - from context tool\nHello" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 1" - } - ], - "usage": {} - } - ], - "chat": true -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call6-resp.golden b/pkg/tests/testdata/TestContextSubChat/call6-resp.golden deleted file mode 100644 index 6807fce9..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call6-resp.golden +++ /dev/null @@ -1,16 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_3", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1 on resume" - } - } - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call6.golden b/pkg/tests/testdata/TestContextSubChat/call6.golden deleted file mode 100644 index 225401db..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call6.golden +++ /dev/null @@ -1,31 +0,0 @@ -`{ - "model": "gpt-4o", - "tools": [ - { - "function": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "name": "chatbot", - "parameters": { - "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Call chatbot" - } - ], - "usage": {} - } - ] -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call7-resp.golden b/pkg/tests/testdata/TestContextSubChat/call7-resp.golden deleted file mode 100644 index 3e0c5f3c..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call7-resp.golden +++ /dev/null @@ -1,9 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "text": "Assistant Response 4 - from chatbot1" - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call7.golden b/pkg/tests/testdata/TestContextSubChat/call7.golden deleted file mode 100644 index b0ef4e39..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call7.golden +++ /dev/null @@ -1,43 +0,0 @@ -`{ - "model": "gpt-4o", - "internalSystemPrompt": false, - "tools": [ - { - "function": { - "toolID": "sys.chat.finish", - "name": "chatFinish", - "description": "Concludes the conversation. This can not be used to ask a question.", - "parameters": { - "properties": { - "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "This is a chatbot" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "Input to chatbot1 on resume" - } - ], - "usage": {} - } - ], - "chat": true -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call8-resp.golden b/pkg/tests/testdata/TestContextSubChat/call8-resp.golden deleted file mode 100644 index 2e608b31..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call8-resp.golden +++ /dev/null @@ -1,16 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_4", - "function": { - "name": "chatFinish", - "arguments": "Response from context chatbot after resume" - } - } - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call8.golden b/pkg/tests/testdata/TestContextSubChat/call8.golden deleted file mode 100644 index 3d0db61b..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call8.golden +++ /dev/null @@ -1,61 +0,0 @@ -`{ - "model": "gpt-4o", - "internalSystemPrompt": false, - "tools": [ - { - "function": { - "toolID": "sys.chat.finish", - "name": "chatFinish", - "description": "Concludes the conversation. This can not be used to ask a question.", - "parameters": { - "properties": { - "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "This is a chatbot" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "Input to chatbot1 on resume" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 4 - from chatbot1" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 4" - } - ], - "usage": {} - } - ], - "chat": true -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call9-resp.golden b/pkg/tests/testdata/TestContextSubChat/call9-resp.golden deleted file mode 100644 index 4424246d..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call9-resp.golden +++ /dev/null @@ -1,9 +0,0 @@ -`{ - "role": "assistant", - "content": [ - { - "text": "Assistant Response 5 - from context tool resume" - } - ], - "usage": {} -}` diff --git a/pkg/tests/testdata/TestContextSubChat/call9.golden b/pkg/tests/testdata/TestContextSubChat/call9.golden deleted file mode 100644 index 33768f26..00000000 --- a/pkg/tests/testdata/TestContextSubChat/call9.golden +++ /dev/null @@ -1,64 +0,0 @@ -`{ - "model": "gpt-4o", - "tools": [ - { - "function": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "name": "chatbot", - "parameters": { - "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Call chatbot" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_3", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1 on resume" - } - } - } - ], - "usage": {} - }, - { - "role": "tool", - "content": [ - { - "text": "Response from context chatbot after resume" - } - ], - "toolCall": { - "index": 0, - "id": "call_3", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1 on resume" - } - }, - "usage": {} - } - ] -}` diff --git a/pkg/tests/testdata/TestContextSubChat/step1.golden b/pkg/tests/testdata/TestContextSubChat/step1.golden deleted file mode 100644 index 2ffb138e..00000000 --- a/pkg/tests/testdata/TestContextSubChat/step1.golden +++ /dev/null @@ -1,146 +0,0 @@ -`{ - "done": false, - "content": "Assistant Response 1 - from chatbot1", - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "state": { - "inputContextContinuation": { - "continuation": { - "state": { - "completion": { - "model": "gpt-4o", - "tools": [ - { - "function": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "name": "chatbot", - "parameters": { - "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Call chatbot" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_1", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1" - } - } - } - ], - "usage": {} - } - ] - }, - "pending": { - "call_1": { - "index": 0, - "id": "call_1", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1" - } - } - } - }, - "calls": { - "call_1": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "input": "Input to chatbot1" - } - } - }, - "subCalls": [ - { - "toolId": "testdata/TestContextSubChat/test.gpt:chatbot", - "callId": "call_1", - "state": { - "continuation": { - "state": { - "input": "Input to chatbot1", - "completion": { - "model": "gpt-4o", - "internalSystemPrompt": false, - "tools": [ - { - "function": { - "toolID": "sys.chat.finish", - "name": "chatFinish", - "description": "Concludes the conversation. This can not be used to ask a question.", - "parameters": { - "properties": { - "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "This is a chatbot" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "Input to chatbot1" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 1 - from chatbot1" - } - ], - "usage": {} - } - ], - "chat": true - } - }, - "result": "Assistant Response 1 - from chatbot1" - }, - "continuationToolID": "testdata/TestContextSubChat/test.gpt:chatbot" - } - } - ], - "subCallID": "call_1" - }, - "inputContextContinuationInput": "User 1", - "startContinuation": true - } -}` diff --git a/pkg/tests/testdata/TestContextSubChat/step2.golden b/pkg/tests/testdata/TestContextSubChat/step2.golden deleted file mode 100644 index dfcb2b96..00000000 --- a/pkg/tests/testdata/TestContextSubChat/step2.golden +++ /dev/null @@ -1,48 +0,0 @@ -`{ - "done": false, - "content": "Assistant Response 3 - from main chat tool", - "toolID": "testdata/TestContextSubChat/test.gpt:", - "state": { - "continuation": { - "state": { - "input": "User 1", - "completion": { - "model": "gpt-4o", - "internalSystemPrompt": false, - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Assistant Response 2 - from context tool\nHello" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 1" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 3 - from main chat tool" - } - ], - "usage": {} - } - ], - "chat": true - } - }, - "result": "Assistant Response 3 - from main chat tool" - }, - "continuationToolID": "testdata/TestContextSubChat/test.gpt:" - } -}` diff --git a/pkg/tests/testdata/TestContextSubChat/step3.golden b/pkg/tests/testdata/TestContextSubChat/step3.golden deleted file mode 100644 index 0ccb188b..00000000 --- a/pkg/tests/testdata/TestContextSubChat/step3.golden +++ /dev/null @@ -1,188 +0,0 @@ -`{ - "done": false, - "content": "Assistant Response 3 - from main chat tool", - "toolID": "testdata/TestContextSubChat/test.gpt:", - "state": { - "continuation": { - "state": { - "input": "User 1", - "completion": { - "model": "gpt-4o", - "internalSystemPrompt": false, - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Assistant Response 2 - from context tool\nHello" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 1" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 3 - from main chat tool" - } - ], - "usage": {} - } - ], - "chat": true - } - }, - "result": "Assistant Response 3 - from main chat tool" - }, - "continuationToolID": "testdata/TestContextSubChat/test.gpt:", - "resumeInput": "User 3", - "inputContextContinuation": { - "continuation": { - "state": { - "completion": { - "model": "gpt-4o", - "tools": [ - { - "function": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "name": "chatbot", - "parameters": { - "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Call chatbot" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_3", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1 on resume" - } - } - } - ], - "usage": {} - } - ] - }, - "pending": { - "call_3": { - "index": 0, - "id": "call_3", - "function": { - "name": "chatbot", - "arguments": "Input to chatbot1 on resume" - } - } - } - }, - "calls": { - "call_3": { - "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", - "input": "Input to chatbot1 on resume" - } - } - }, - "subCalls": [ - { - "toolId": "testdata/TestContextSubChat/test.gpt:chatbot", - "callId": "call_3", - "state": { - "continuation": { - "state": { - "input": "Input to chatbot1 on resume", - "completion": { - "model": "gpt-4o", - "internalSystemPrompt": false, - "tools": [ - { - "function": { - "toolID": "sys.chat.finish", - "name": "chatFinish", - "description": "Concludes the conversation. This can not be used to ask a question.", - "parameters": { - "properties": { - "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" - } - }, - "type": "object" - } - } - } - ], - "messages": [ - { - "role": "system", - "content": [ - { - "text": "This is a chatbot" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "Input to chatbot1 on resume" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 4 - from chatbot1" - } - ], - "usage": {} - } - ], - "chat": true - } - }, - "result": "Assistant Response 4 - from chatbot1" - }, - "continuationToolID": "testdata/TestContextSubChat/test.gpt:chatbot" - } - } - ], - "subCallID": "call_3" - }, - "inputContextContinuationInput": "User 1", - "inputContextContinuationResumeInput": "User 3" - } -}` diff --git a/pkg/tests/testdata/TestContextSubChat/step4.golden b/pkg/tests/testdata/TestContextSubChat/step4.golden deleted file mode 100644 index 5e95d626..00000000 --- a/pkg/tests/testdata/TestContextSubChat/step4.golden +++ /dev/null @@ -1,66 +0,0 @@ -`{ - "done": false, - "content": "Assistant Response 6 - from main chat tool resume", - "toolID": "testdata/TestContextSubChat/test.gpt:", - "state": { - "continuation": { - "state": { - "input": "User 1", - "completion": { - "model": "gpt-4o", - "internalSystemPrompt": false, - "messages": [ - { - "role": "system", - "content": [ - { - "text": "Assistant Response 5 - from context tool resume\nHello" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 1" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 3 - from main chat tool" - } - ], - "usage": {} - }, - { - "role": "user", - "content": [ - { - "text": "User 3" - } - ], - "usage": {} - }, - { - "role": "assistant", - "content": [ - { - "text": "Assistant Response 6 - from main chat tool resume" - } - ], - "usage": {} - } - ], - "chat": true - } - }, - "result": "Assistant Response 6 - from main chat tool resume" - }, - "continuationToolID": "testdata/TestContextSubChat/test.gpt:" - } -}` From 93e77066e8dc357070728825eaecc5136852b67c Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 27 Aug 2024 10:44:57 -0700 Subject: [PATCH 120/270] bug: handle case where node_modules is deleted in local tool dev --- pkg/repos/runtimes/node/node.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index aa57b059..4d73c13b 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -26,6 +26,7 @@ var releasesData []byte const ( downloadURL = "https://nodejs.org/dist/%s/" packageJSON = "package.json" + nodeModules = "node_modules" ) type Runtime struct { @@ -64,8 +65,15 @@ func (r *Runtime) supports(testCmd string, cmd []string) bool { func (r *Runtime) GetHash(tool types.Tool) (string, error) { if !tool.Source.IsGit() && tool.WorkingDir != "" { + var prefix string + // This hashes if the node_modules directory was deleted + if s, err := os.Stat(filepath.Join(tool.WorkingDir, nodeModules)); err == nil { + prefix = hash.Digest(tool.WorkingDir + s.ModTime().String())[:7] + } else if s, err := os.Stat(tool.WorkingDir); err == nil { + prefix = hash.Digest(tool.WorkingDir + s.ModTime().String())[:7] + } if s, err := os.Stat(filepath.Join(tool.WorkingDir, packageJSON)); err == nil { - return hash.Digest(tool.WorkingDir + s.ModTime().String())[:7], nil + return prefix + hash.Digest(tool.WorkingDir + s.ModTime().String())[:7], nil } } return "", nil From 7d469cec1ce9b1bda6e7437e6ffe3fdc1028a6c2 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 27 Aug 2024 11:03:21 -0700 Subject: [PATCH 121/270] bug: respect "share context" from referenced tools --- pkg/tests/runner2_test.go | 23 +++++++++ .../TestContextShareBug/call1-resp.golden | 9 ++++ .../testdata/TestContextShareBug/call1.golden | 25 ++++++++++ .../testdata/TestContextShareBug/step1.golden | 48 +++++++++++++++++++ pkg/types/tool.go | 9 ++++ 5 files changed, 114 insertions(+) create mode 100644 pkg/tests/testdata/TestContextShareBug/call1-resp.golden create mode 100644 pkg/tests/testdata/TestContextShareBug/call1.golden create mode 100644 pkg/tests/testdata/TestContextShareBug/step1.golden diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 12ac4fa0..27d4c226 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -32,3 +32,26 @@ echo This is the input: ${GPTSCRIPT_INPUT} resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2") r.AssertStep(t, resp, err) } + +func TestContextShareBug(t *testing.T) { + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +chat: true +tools: sharecontext + +Say hi + +--- +name: sharecontext +share context: realcontext +--- +name: realcontext + +#!sys.echo + +Yo dawg`, "") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + r.AssertStep(t, resp, err) +} diff --git a/pkg/tests/testdata/TestContextShareBug/call1-resp.golden b/pkg/tests/testdata/TestContextShareBug/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestContextShareBug/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestContextShareBug/call1.golden b/pkg/tests/testdata/TestContextShareBug/call1.golden new file mode 100644 index 00000000..0a46f0ca --- /dev/null +++ b/pkg/tests/testdata/TestContextShareBug/call1.golden @@ -0,0 +1,25 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nYo dawg\nSay hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestContextShareBug/step1.golden b/pkg/tests/testdata/TestContextShareBug/step1.golden new file mode 100644 index 00000000..cb17be6d --- /dev/null +++ b/pkg/tests/testdata/TestContextShareBug/step1.golden @@ -0,0 +1,48 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": "input 1", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nYo dawg\nSay hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "input 1" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "inline:" + } +}` diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 57ce3fbf..0d2a5cc0 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -557,6 +557,15 @@ func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { result.Add(contextRef) } + exportOnlyTools, err := t.getCompletionToolRefs(prg, nil, ToolTypeDefault, ToolTypeContext) + if err != nil { + return nil, err + } + + for _, contextRef := range exportOnlyTools { + result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) + } + return result.List() } From e83fe6538b32e3e04d030c272ed01fce77ae98dd Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 27 Aug 2024 20:23:13 -0400 Subject: [PATCH 122/270] fix: complete the SDK server options on run Signed-off-by: Donnie Adams --- pkg/sdkserver/server.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 4ef28267..f72e7ae9 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -32,6 +32,8 @@ type Options struct { // Run will start the server and block until the server is shut down. func Run(ctx context.Context, opts Options) error { + opts = complete(opts) + listener, err := newListener(opts) if err != nil { return err From 5dfb195b0d8cfe555074ef2f9f96bf76f7c8364a Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 28 Aug 2024 00:13:50 -0700 Subject: [PATCH 123/270] bug: show error when config.json is malformed --- pkg/config/cliconfig.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index 43649135..dd358d52 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -137,7 +137,7 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { location: gptscriptConfigFile, } if err := json.Unmarshal(data, result); err != nil { - return nil, err + return nil, fmt.Errorf("failed to unmarshal %s: %v", gptscriptConfigFile, err) } if result.CredentialsStore == "" { From 33741b12c1593aca5e1c423dbbf00b86a6f48996 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 28 Aug 2024 23:17:35 -0700 Subject: [PATCH 124/270] chore: refactor logic for tool sharing --- pkg/engine/engine.go | 6 +- pkg/runner/input.go | 3 +- pkg/runner/output.go | 3 +- pkg/runner/runner.go | 6 +- pkg/tests/testdata/TestAgentOnly/call2.golden | 8 +- pkg/tests/testdata/TestAgentOnly/step1.golden | 8 +- .../testdata/TestAgents/call3-resp.golden | 2 +- pkg/tests/testdata/TestAgents/call3.golden | 8 +- pkg/tests/testdata/TestAgents/step1.golden | 12 +- .../testdata/TestExport/call1-resp.golden | 2 +- pkg/tests/testdata/TestExport/call1.golden | 8 +- pkg/tests/testdata/TestExport/call3.golden | 12 +- .../testdata/TestExportContext/call1.golden | 2 +- .../testdata/TestToolRefAll/call1.golden | 18 +- pkg/types/completion.go | 20 +- pkg/types/set.go | 11 + pkg/types/tool.go | 395 +++++++----------- 17 files changed, 211 insertions(+), 313 deletions(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 14b75e0a..d028d50b 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -204,7 +204,7 @@ func NewContext(ctx context.Context, prg *types.Program, input string) (Context, Input: input, } - agentGroup, err := callCtx.Tool.GetAgents(*prg) + agentGroup, err := callCtx.Tool.GetToolsByType(prg, types.ToolTypeAgent) if err != nil { return callCtx, err } @@ -225,7 +225,7 @@ func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID stri callID = counter.Next() } - agentGroup, err := c.Tool.GetNextAgentGroup(*c.Program, c.AgentGroup, toolID) + agentGroup, err := c.Tool.GetNextAgentGroup(c.Program, c.AgentGroup, toolID) if err != nil { return Context{}, err } @@ -272,7 +272,7 @@ func populateMessageParams(ctx Context, completion *types.CompletionRequest, too } var err error - completion.Tools, err = tool.GetCompletionTools(*ctx.Program, ctx.AgentGroup...) + completion.Tools, err = tool.GetChatCompletionTools(*ctx.Program, ctx.AgentGroup...) if err != nil { return err } diff --git a/pkg/runner/input.go b/pkg/runner/input.go index 7d77330e..a211ec9d 100644 --- a/pkg/runner/input.go +++ b/pkg/runner/input.go @@ -5,10 +5,11 @@ import ( "fmt" "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/types" ) func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []string, input string) (string, error) { - inputToolRefs, err := callCtx.Tool.GetInputFilterTools(*callCtx.Program) + inputToolRefs, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeInput) if err != nil { return "", err } diff --git a/pkg/runner/output.go b/pkg/runner/output.go index d4cb4b9b..e5fe849d 100644 --- a/pkg/runner/output.go +++ b/pkg/runner/output.go @@ -6,10 +6,11 @@ import ( "fmt" "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/types" ) func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []string, state *State, retErr error) (*State, error) { - outputToolRefs, err := callCtx.Tool.GetOutputFilterTools(*callCtx.Program) + outputToolRefs, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeOutput) if err != nil { return nil, err } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 3035a1d1..c843b6b5 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -330,7 +330,7 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) } func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monitor, env []string, input string) (result []engine.InputContext, _ error) { - toolRefs, err := callCtx.Tool.GetContextTools(*callCtx.Program) + toolRefs, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeContext) if err != nil { return nil, err } @@ -387,7 +387,7 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en return nil, err } - credTools, err := callCtx.Tool.GetCredentialTools(*callCtx.Program, callCtx.AgentGroup) + credTools, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeCredential) if err != nil { return nil, err } @@ -503,7 +503,7 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s progress, progressClose := streamProgress(&callCtx, monitor) defer progressClose() - credTools, err := callCtx.Tool.GetCredentialTools(*callCtx.Program, callCtx.AgentGroup) + credTools, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeCredential) if err != nil { return nil, err } diff --git a/pkg/tests/testdata/TestAgentOnly/call2.golden b/pkg/tests/testdata/TestAgentOnly/call2.golden index 82f95523..7f6b155b 100644 --- a/pkg/tests/testdata/TestAgentOnly/call2.golden +++ b/pkg/tests/testdata/TestAgentOnly/call2.golden @@ -4,8 +4,8 @@ "tools": [ { "function": { - "toolID": "testdata/TestAgentOnly/test.gpt:agent1", - "name": "agent1", + "toolID": "testdata/TestAgentOnly/test.gpt:agent3", + "name": "agent3", "parameters": { "properties": { "defaultPromptParameter": { @@ -19,8 +19,8 @@ }, { "function": { - "toolID": "testdata/TestAgentOnly/test.gpt:agent3", - "name": "agent3", + "toolID": "testdata/TestAgentOnly/test.gpt:agent1", + "name": "agent1", "parameters": { "properties": { "defaultPromptParameter": { diff --git a/pkg/tests/testdata/TestAgentOnly/step1.golden b/pkg/tests/testdata/TestAgentOnly/step1.golden index 662dbf04..2cda2025 100644 --- a/pkg/tests/testdata/TestAgentOnly/step1.golden +++ b/pkg/tests/testdata/TestAgentOnly/step1.golden @@ -96,8 +96,8 @@ "tools": [ { "function": { - "toolID": "testdata/TestAgentOnly/test.gpt:agent1", - "name": "agent1", + "toolID": "testdata/TestAgentOnly/test.gpt:agent3", + "name": "agent3", "parameters": { "properties": { "defaultPromptParameter": { @@ -111,8 +111,8 @@ }, { "function": { - "toolID": "testdata/TestAgentOnly/test.gpt:agent3", - "name": "agent3", + "toolID": "testdata/TestAgentOnly/test.gpt:agent1", + "name": "agent1", "parameters": { "properties": { "defaultPromptParameter": { diff --git a/pkg/tests/testdata/TestAgents/call3-resp.golden b/pkg/tests/testdata/TestAgents/call3-resp.golden index e2a65c99..7568fc69 100644 --- a/pkg/tests/testdata/TestAgents/call3-resp.golden +++ b/pkg/tests/testdata/TestAgents/call3-resp.golden @@ -3,7 +3,7 @@ "content": [ { "toolCall": { - "index": 1, + "index": 0, "id": "call_3", "function": { "name": "agent3" diff --git a/pkg/tests/testdata/TestAgents/call3.golden b/pkg/tests/testdata/TestAgents/call3.golden index f9b45a1b..5b1638e0 100644 --- a/pkg/tests/testdata/TestAgents/call3.golden +++ b/pkg/tests/testdata/TestAgents/call3.golden @@ -4,8 +4,8 @@ "tools": [ { "function": { - "toolID": "testdata/TestAgents/test.gpt:agent1", - "name": "agent1", + "toolID": "testdata/TestAgents/test.gpt:agent3", + "name": "agent3", "parameters": { "properties": { "defaultPromptParameter": { @@ -19,8 +19,8 @@ }, { "function": { - "toolID": "testdata/TestAgents/test.gpt:agent3", - "name": "agent3", + "toolID": "testdata/TestAgents/test.gpt:agent1", + "name": "agent1", "parameters": { "properties": { "defaultPromptParameter": { diff --git a/pkg/tests/testdata/TestAgents/step1.golden b/pkg/tests/testdata/TestAgents/step1.golden index 3047e695..72e01114 100644 --- a/pkg/tests/testdata/TestAgents/step1.golden +++ b/pkg/tests/testdata/TestAgents/step1.golden @@ -178,8 +178,8 @@ "tools": [ { "function": { - "toolID": "testdata/TestAgents/test.gpt:agent1", - "name": "agent1", + "toolID": "testdata/TestAgents/test.gpt:agent3", + "name": "agent3", "parameters": { "properties": { "defaultPromptParameter": { @@ -193,8 +193,8 @@ }, { "function": { - "toolID": "testdata/TestAgents/test.gpt:agent3", - "name": "agent3", + "toolID": "testdata/TestAgents/test.gpt:agent1", + "name": "agent1", "parameters": { "properties": { "defaultPromptParameter": { @@ -222,7 +222,7 @@ "content": [ { "toolCall": { - "index": 1, + "index": 0, "id": "call_3", "function": { "name": "agent3" @@ -237,7 +237,7 @@ }, "pending": { "call_3": { - "index": 1, + "index": 0, "id": "call_3", "function": { "name": "agent3" diff --git a/pkg/tests/testdata/TestExport/call1-resp.golden b/pkg/tests/testdata/TestExport/call1-resp.golden index 8462d188..7fe59586 100644 --- a/pkg/tests/testdata/TestExport/call1-resp.golden +++ b/pkg/tests/testdata/TestExport/call1-resp.golden @@ -3,7 +3,7 @@ "content": [ { "toolCall": { - "index": 2, + "index": 1, "id": "call_1", "function": { "name": "transient" diff --git a/pkg/tests/testdata/TestExport/call1.golden b/pkg/tests/testdata/TestExport/call1.golden index 9f8b650d..b700ee55 100644 --- a/pkg/tests/testdata/TestExport/call1.golden +++ b/pkg/tests/testdata/TestExport/call1.golden @@ -18,8 +18,8 @@ }, { "function": { - "toolID": "testdata/TestExport/parent.gpt:parent-local", - "name": "parentLocal", + "toolID": "testdata/TestExport/sub/child.gpt:transient", + "name": "transient", "parameters": { "properties": { "defaultPromptParameter": { @@ -33,8 +33,8 @@ }, { "function": { - "toolID": "testdata/TestExport/sub/child.gpt:transient", - "name": "transient", + "toolID": "testdata/TestExport/parent.gpt:parent-local", + "name": "parentLocal", "parameters": { "properties": { "defaultPromptParameter": { diff --git a/pkg/tests/testdata/TestExport/call3.golden b/pkg/tests/testdata/TestExport/call3.golden index ccf7e980..d2abca0c 100644 --- a/pkg/tests/testdata/TestExport/call3.golden +++ b/pkg/tests/testdata/TestExport/call3.golden @@ -18,8 +18,8 @@ }, { "function": { - "toolID": "testdata/TestExport/parent.gpt:parent-local", - "name": "parentLocal", + "toolID": "testdata/TestExport/sub/child.gpt:transient", + "name": "transient", "parameters": { "properties": { "defaultPromptParameter": { @@ -33,8 +33,8 @@ }, { "function": { - "toolID": "testdata/TestExport/sub/child.gpt:transient", - "name": "transient", + "toolID": "testdata/TestExport/parent.gpt:parent-local", + "name": "parentLocal", "parameters": { "properties": { "defaultPromptParameter": { @@ -62,7 +62,7 @@ "content": [ { "toolCall": { - "index": 2, + "index": 1, "id": "call_1", "function": { "name": "transient" @@ -80,7 +80,7 @@ } ], "toolCall": { - "index": 2, + "index": 1, "id": "call_1", "function": { "name": "transient" diff --git a/pkg/tests/testdata/TestExportContext/call1.golden b/pkg/tests/testdata/TestExportContext/call1.golden index bec15478..0ee8f9fe 100644 --- a/pkg/tests/testdata/TestExportContext/call1.golden +++ b/pkg/tests/testdata/TestExportContext/call1.golden @@ -38,7 +38,7 @@ "role": "system", "content": [ { - "text": "this is from external context\nthis is from context\nThis is from tool" + "text": "this is from context\nthis is from external context\nThis is from tool" } ], "usage": {} diff --git a/pkg/tests/testdata/TestToolRefAll/call1.golden b/pkg/tests/testdata/TestToolRefAll/call1.golden index ef36e3fb..9289affa 100644 --- a/pkg/tests/testdata/TestToolRefAll/call1.golden +++ b/pkg/tests/testdata/TestToolRefAll/call1.golden @@ -18,12 +18,12 @@ }, { "function": { - "toolID": "testdata/TestToolRefAll/test.gpt:none", - "name": "none", + "toolID": "testdata/TestToolRefAll/test.gpt:agentAssistant", + "name": "agentAssistant", "parameters": { "properties": { - "noneArg": { - "description": "stuff", + "defaultPromptParameter": { + "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } }, @@ -33,12 +33,12 @@ }, { "function": { - "toolID": "testdata/TestToolRefAll/test.gpt:agentAssistant", - "name": "agent", + "toolID": "testdata/TestToolRefAll/test.gpt:none", + "name": "none", "parameters": { "properties": { - "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", + "noneArg": { + "description": "stuff", "type": "string" } }, @@ -52,7 +52,7 @@ "role": "system", "content": [ { - "text": "\nShared context\n\nContext Body\nMain tool" + "text": "\nContext Body\n\nShared context\nMain tool" } ], "usage": {} diff --git a/pkg/types/completion.go b/pkg/types/completion.go index dd70ad50..5b3899c3 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -9,15 +9,15 @@ import ( ) type CompletionRequest struct { - Model string `json:"model,omitempty"` - InternalSystemPrompt *bool `json:"internalSystemPrompt,omitempty"` - Tools []CompletionTool `json:"tools,omitempty"` - Messages []CompletionMessage `json:"messages,omitempty"` - MaxTokens int `json:"maxTokens,omitempty"` - Chat bool `json:"chat,omitempty"` - Temperature *float32 `json:"temperature,omitempty"` - JSONResponse bool `json:"jsonResponse,omitempty"` - Cache *bool `json:"cache,omitempty"` + Model string `json:"model,omitempty"` + InternalSystemPrompt *bool `json:"internalSystemPrompt,omitempty"` + Tools []ChatCompletionTool `json:"tools,omitempty"` + Messages []CompletionMessage `json:"messages,omitempty"` + MaxTokens int `json:"maxTokens,omitempty"` + Chat bool `json:"chat,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + JSONResponse bool `json:"jsonResponse,omitempty"` + Cache *bool `json:"cache,omitempty"` } func (r *CompletionRequest) GetCache() bool { @@ -27,7 +27,7 @@ func (r *CompletionRequest) GetCache() bool { return *r.Cache } -type CompletionTool struct { +type ChatCompletionTool struct { Function CompletionFunctionDefinition `json:"function,omitempty"` } diff --git a/pkg/types/set.go b/pkg/types/set.go index 230e112b..65b73d22 100644 --- a/pkg/types/set.go +++ b/pkg/types/set.go @@ -19,6 +19,17 @@ func (t *toolRefSet) List() (result []ToolReference, err error) { return result, t.err } +func (t *toolRefSet) Contains(value ToolReference) bool { + key := toolRefKey{ + name: value.Named, + toolID: value.ToolID, + arg: value.Arg, + } + + _, ok := t.set[key] + return ok +} + func (t *toolRefSet) HasTool(toolID string) bool { for _, ref := range t.set { if ref.ToolID == toolID { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 0d2a5cc0..d9d59837 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -33,11 +33,15 @@ const ( ToolTypeAgent = ToolType("agent") ToolTypeOutput = ToolType("output") ToolTypeInput = ToolType("input") - ToolTypeAssistant = ToolType("assistant") ToolTypeTool = ToolType("tool") ToolTypeCredential = ToolType("credential") - ToolTypeProvider = ToolType("provider") ToolTypeDefault = ToolType("") + + // The following types logically exist but have no real code reference. These are kept + // here just so that we have a comprehensive list + + ToolTypeAssistant = ToolType("assistant") + ToolTypeProvider = ToolType("provider") ) type ErrToolNotFound struct { @@ -140,6 +144,28 @@ type Parameters struct { Type ToolType `json:"type,omitempty"` } +func (p Parameters) allExports() []string { + return slices.Concat( + p.ExportContext, + p.Export, + p.ExportCredentials, + p.ExportInputFilters, + p.ExportOutputFilters, + ) +} + +func (p Parameters) allReferences() []string { + return slices.Concat( + p.GlobalTools, + p.Tools, + p.Context, + p.Agents, + p.Credentials, + p.InputFilters, + p.OutputFilters, + ) +} + func (p Parameters) ToolRefNames() []string { return slices.Concat( p.Tools, @@ -335,39 +361,6 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str return originalName, alias, args, nil } -func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { - toolRefs, err := t.GetToolRefsFromNames(t.Agents) - if err != nil { - return nil, err - } - - genericToolRefs, err := t.getCompletionToolRefs(prg, nil, ToolTypeAgent) - if err != nil { - return nil, err - } - - toolRefs = append(toolRefs, genericToolRefs...) - - // Agent Tool refs must be named - for i, toolRef := range toolRefs { - if toolRef.Named != "" { - continue - } - tool := prg.ToolSet[toolRef.ToolID] - name := tool.Name - if name == "" { - name = toolRef.Reference - } - normed := ToolNormalizer(name) - if trimmed := strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant"); trimmed != "" { - normed = trimmed - } - toolRefs[i].Named = normed - } - - return toolRefs, nil -} - func (t Tool) GetToolRefsFromNames(names []string) (result []ToolReference, _ error) { for _, toolName := range names { toolRefs, ok := t.ToolMapping[toolName] @@ -507,293 +500,185 @@ func (t ToolDef) String() string { return buf.String() } -func (t Tool) getExportedContext(prg Program) ([]ToolReference, error) { - result := &toolRefSet{} - - exportRefs, err := t.GetToolRefsFromNames(t.ExportContext) - if err != nil { - return nil, err - } - - for _, exportRef := range exportRefs { - result.Add(exportRef) - - tool := prg.ToolSet[exportRef.ToolID] - result.AddAll(tool.getExportedContext(prg)) - } - - return result.List() -} - -func (t Tool) getExportedTools(prg Program) ([]ToolReference, error) { - result := &toolRefSet{} - - exportRefs, err := t.GetToolRefsFromNames(t.Export) - if err != nil { - return nil, err - } - - for _, exportRef := range exportRefs { - result.Add(exportRef) - result.AddAll(prg.ToolSet[exportRef.ToolID].getExportedTools(prg)) - } - - return result.List() -} - -// GetContextTools returns all tools that are in the context of the tool including all the -// contexts that are exported by the context tools. This will recurse all exports. -func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { - result := &toolRefSet{} - result.AddAll(t.getDirectContextToolRefs(prg)) - - contextRefs, err := t.getCompletionToolRefs(prg, nil, ToolTypeContext) - if err != nil { - return nil, err - } - - for _, contextRef := range contextRefs { - result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) - result.Add(contextRef) - } - - exportOnlyTools, err := t.getCompletionToolRefs(prg, nil, ToolTypeDefault, ToolTypeContext) - if err != nil { - return nil, err - } +func (t Tool) GetNextAgentGroup(prg *Program, agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { + newAgentGroup := toolRefSet{} + newAgentGroup.AddAll(t.GetToolsByType(prg, ToolTypeAgent)) - for _, contextRef := range exportOnlyTools { - result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) + if newAgentGroup.HasTool(toolID) { + // Join new agent group + return newAgentGroup.List() } - return result.List() + return agentGroup, nil } -// GetContextTools returns all tools that are in the context of the tool including all the -// contexts that are exported by the context tools. This will recurse all exports. -func (t Tool) getDirectContextToolRefs(prg Program) ([]ToolReference, error) { - result := &toolRefSet{} - - contextRefs, err := t.GetToolRefsFromNames(t.Context) +func (t Tool) getAgents(prg *Program) (result []ToolReference, _ error) { + toolRefs, err := t.GetToolRefsFromNames(t.Agents) if err != nil { return nil, err } - for _, contextRef := range contextRefs { - result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) - result.Add(contextRef) + // Agent Tool refs must be named + for i, toolRef := range toolRefs { + if toolRef.Named != "" { + continue + } + tool := prg.ToolSet[toolRef.ToolID] + name := tool.Name + if name == "" { + name = toolRef.Reference + } + normed := ToolNormalizer(name) + if trimmed := strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant"); trimmed != "" { + normed = trimmed + } + toolRefs[i].Named = normed } - return result.List() + return toolRefs, nil } -func (t Tool) GetOutputFilterTools(program Program) ([]ToolReference, error) { - result := &toolRefSet{} - - outputFilterRefs, err := t.GetToolRefsFromNames(t.OutputFilters) - if err != nil { - return nil, err - } - - for _, outputFilterRef := range outputFilterRefs { - result.Add(outputFilterRef) - } - - result.AddAll(t.getCompletionToolRefs(program, nil, ToolTypeOutput)) - - contextRefs, err := t.getDirectContextToolRefs(program) - if err != nil { - return nil, err +func (t Tool) GetToolsByType(prg *Program, toolType ToolType) ([]ToolReference, error) { + if toolType == ToolTypeAgent { + // Agents are special, they can only be sourced from direct references and not the generic 'tool:' or shared by references + return t.getAgents(prg) } - for _, contextRef := range contextRefs { - contextTool := program.ToolSet[contextRef.ToolID] - result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportOutputFilters)) - } - - return result.List() -} - -func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { - result := &toolRefSet{} + toolSet := &toolRefSet{} - inputFilterRefs, err := t.GetToolRefsFromNames(t.InputFilters) - if err != nil { - return nil, err - } + var ( + directRefs []string + toolsListFilterType = []ToolType{toolType} + ) - for _, inputFilterRef := range inputFilterRefs { - result.Add(inputFilterRef) + switch toolType { + case ToolTypeContext: + directRefs = t.Context + case ToolTypeOutput: + directRefs = t.OutputFilters + case ToolTypeInput: + directRefs = t.InputFilters + case ToolTypeTool: + toolsListFilterType = append(toolsListFilterType, ToolTypeDefault, ToolTypeAgent) + case ToolTypeCredential: + directRefs = t.Credentials + default: + return nil, fmt.Errorf("unknown tool type %v", toolType) } - result.AddAll(t.getCompletionToolRefs(program, nil, ToolTypeInput)) + toolSet.AddAll(t.GetToolRefsFromNames(directRefs)) - contextRefs, err := t.getDirectContextToolRefs(program) + toolRefs, err := t.GetToolRefsFromNames(t.Tools) if err != nil { return nil, err } - for _, contextRef := range contextRefs { - contextTool := program.ToolSet[contextRef.ToolID] - result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportInputFilters)) - } - - return result.List() -} - -func (t Tool) GetNextAgentGroup(prg Program, agentGroup []ToolReference, toolID string) (result []ToolReference, _ error) { - newAgentGroup := toolRefSet{} - if err := t.addAgents(prg, &newAgentGroup); err != nil { - return nil, err - } - - if newAgentGroup.HasTool(toolID) { - // Join new agent group - return newAgentGroup.List() - } - - return agentGroup, nil -} - -func filterRefs(prg Program, refs []ToolReference, types ...ToolType) (result []ToolReference) { - for _, ref := range refs { - if slices.Contains(types, prg.ToolSet[ref.ToolID].Type) { - result = append(result, ref) + for _, toolRef := range toolRefs { + tool, ok := prg.ToolSet[toolRef.ToolID] + if !ok { + continue + } + if slices.Contains(toolsListFilterType, tool.Type) { + toolSet.Add(toolRef) } - } - return -} - -func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (result []CompletionTool, err error) { - toolSet := &toolRefSet{} - toolSet.AddAll(t.getCompletionToolRefs(prg, agentGroup, ToolTypeDefault, ToolTypeTool)) - - if err := t.addAgents(prg, toolSet); err != nil { - return nil, err } - refs, err := toolSet.List() + exportSources, err := t.getExportSources(prg) if err != nil { return nil, err } - return toolRefsToCompletionTools(refs, prg), nil -} - -func (t Tool) addAgents(prg Program, result *toolRefSet) error { - subToolRefs, err := t.GetAgents(prg) - if err != nil { - return err - } - - for _, subToolRef := range subToolRefs { - // don't add yourself - if subToolRef.ToolID != t.ID { - // Add the tool itself and no exports - result.Add(subToolRef) + for _, exportSource := range exportSources { + var ( + tool = prg.ToolSet[exportSource.ToolID] + exportRefs []string + ) + + switch toolType { + case ToolTypeContext: + exportRefs = tool.ExportContext + case ToolTypeOutput: + exportRefs = tool.ExportOutputFilters + case ToolTypeInput: + exportRefs = tool.ExportInputFilters + case ToolTypeTool: + exportRefs = tool.Export + case ToolTypeCredential: + exportRefs = tool.ExportCredentials + default: + return nil, fmt.Errorf("unknown tool type %v", toolType) } + toolSet.AddAll(tool.GetToolRefsFromNames(exportRefs)) } - return nil + return toolSet.List() } -func (t Tool) addReferencedTools(prg Program, result *toolRefSet) error { - subToolRefs, err := t.GetToolRefsFromNames(t.Parameters.Tools) +func (t Tool) addExportsRecursively(prg *Program, toolSet *toolRefSet) error { + toolRefs, err := t.GetToolRefsFromNames(t.allExports()) if err != nil { return err } - for _, subToolRef := range subToolRefs { - // Add the tool - result.Add(subToolRef) + for _, toolRef := range toolRefs { + if toolSet.Contains(toolRef) { + continue + } - // Get all tools exports - result.AddAll(prg.ToolSet[subToolRef.ToolID].getExportedTools(prg)) + toolSet.Add(toolRef) + if err := prg.ToolSet[toolRef.ToolID].addExportsRecursively(prg, toolSet); err != nil { + return err + } } return nil } -func (t Tool) addContextExportedTools(prg Program, result *toolRefSet) error { - contextTools, err := t.getDirectContextToolRefs(prg) +func (t Tool) getExportSources(prg *Program) ([]ToolReference, error) { + // We start first with all references from this tool. This gives us the + // initial set of export sources. + // Then all tools in the export sources in the set we look for exports of those tools recursively. + // So a share of a share of a share should be added. + + toolSet := toolRefSet{} + toolRefs, err := t.GetToolRefsFromNames(t.allReferences()) if err != nil { - return err + return nil, err } - for _, contextTool := range contextTools { - result.AddAll(prg.ToolSet[contextTool.ToolID].getExportedTools(prg)) + for _, toolRef := range toolRefs { + if err := prg.ToolSet[toolRef.ToolID].addExportsRecursively(prg, &toolSet); err != nil { + return nil, err + } + toolSet.Add(toolRef) } - return nil + return toolSet.List() } -func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference, types ...ToolType) ([]ToolReference, error) { - if len(types) == 0 { - types = []ToolType{ToolTypeDefault, ToolTypeTool} - } - - result := toolRefSet{} +func (t Tool) GetChatCompletionTools(prg Program, agentGroup ...ToolReference) (result []ChatCompletionTool, err error) { + toolSet := &toolRefSet{} + toolSet.AddAll(t.GetToolsByType(&prg, ToolTypeTool)) + toolSet.AddAll(t.GetToolsByType(&prg, ToolTypeAgent)) if t.Chat { for _, agent := range agentGroup { // don't add yourself if agent.ToolID != t.ID { - result.Add(agent) + toolSet.Add(agent) } } } - if err := t.addReferencedTools(prg, &result); err != nil { - return nil, err - } - - if err := t.addContextExportedTools(prg, &result); err != nil { - return nil, err - } - - refs, err := result.List() - return filterRefs(prg, refs, types...), err -} - -func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { - result := toolRefSet{} - - result.AddAll(t.GetToolRefsFromNames(t.Credentials)) - - result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeCredential)) - - toolRefs, err := result.List() - if err != nil { - return nil, err - } - for _, toolRef := range toolRefs { - referencedTool := prg.ToolSet[toolRef.ToolID] - result.AddAll(referencedTool.GetToolRefsFromNames(referencedTool.ExportCredentials)) - } - - toolRefs, err = t.getCompletionToolRefs(prg, agentGroup) - if err != nil { - return nil, err - } - for _, toolRef := range toolRefs { - referencedTool := prg.ToolSet[toolRef.ToolID] - result.AddAll(referencedTool.GetToolRefsFromNames(referencedTool.ExportCredentials)) - } - - contextToolRefs, err := t.GetContextTools(prg) + refs, err := toolSet.List() if err != nil { return nil, err } - for _, contextToolRef := range contextToolRefs { - contextTool := prg.ToolSet[contextToolRef.ToolID] - result.AddAll(contextTool.GetToolRefsFromNames(contextTool.ExportCredentials)) - } - - return result.List() + return toolRefsToCompletionTools(refs, prg), nil } -func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (result []CompletionTool) { +func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (result []ChatCompletionTool) { toolNames := map[string]struct{}{} for _, subToolRef := range completionTools { @@ -814,7 +699,7 @@ func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (re if subTool.Instructions == "" { log.Debugf("Skipping zero instruction tool %s (%s)", subToolName, subTool.ID) } else { - result = append(result, CompletionTool{ + result = append(result, ChatCompletionTool{ Function: CompletionFunctionDefinition{ ToolID: subTool.ID, Name: PickToolName(subToolName, toolNames), From 1ad818bb3b6edc01be3b280ace1ab50f0781b32e Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 30 Aug 2024 09:31:47 -0400 Subject: [PATCH 125/270] enhance: avoid context limit (#832) Signed-off-by: Grant Linville --- pkg/openai/client.go | 46 ++++++++++++++++++++++++++++++++++++++++++++ pkg/openai/count.go | 34 ++++++++++++++++++++++++-------- 2 files changed, 72 insertions(+), 8 deletions(-) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 42a1a39e..61a7ec77 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -2,6 +2,7 @@ package openai import ( "context" + "errors" "io" "log/slog" "os" @@ -24,6 +25,7 @@ import ( const ( DefaultModel = openai.GPT4o BuiltinCredName = "sys.openai" + TooLongMessage = "Error: tool call output is too long" ) var ( @@ -317,6 +319,14 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques } if messageRequest.Chat { + // Check the last message. If it is from a tool call, and if it takes up more than 80% of the budget on its own, reject it. + lastMessage := msgs[len(msgs)-1] + if lastMessage.Role == string(types.CompletionMessageRoleTypeTool) && countMessage(lastMessage) > int(float64(getBudget(messageRequest.MaxTokens))*0.8) { + // We need to update it in the msgs slice for right now and in the messageRequest for future calls. + msgs[len(msgs)-1].Content = TooLongMessage + messageRequest.Messages[len(messageRequest.Messages)-1].Content = types.Text(TooLongMessage) + } + msgs = dropMessagesOverCount(messageRequest.MaxTokens, msgs) } @@ -383,6 +393,16 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return nil, err } else if !ok { response, err = c.call(ctx, request, id, status) + + // If we got back a context length exceeded error, keep retrying and shrinking the message history until we pass. + var apiError *openai.APIError + if errors.As(err, &apiError) && apiError.Code == "context_length_exceeded" && messageRequest.Chat { + // Decrease maxTokens by 10% to make garbage collection more aggressive. + // The retry loop will further decrease maxTokens if needed. + maxTokens := decreaseTenPercent(messageRequest.MaxTokens) + response, err = c.contextLimitRetryLoop(ctx, request, id, maxTokens, status) + } + if err != nil { return nil, err } @@ -421,6 +441,32 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return &result, nil } +func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, maxTokens int, status chan<- types.CompletionStatus) ([]openai.ChatCompletionStreamResponse, error) { + var ( + response []openai.ChatCompletionStreamResponse + err error + ) + + for range 10 { // maximum 10 tries + // Try to drop older messages again, with a decreased max tokens. + request.Messages = dropMessagesOverCount(maxTokens, request.Messages) + response, err = c.call(ctx, request, id, status) + if err == nil { + return response, nil + } + + var apiError *openai.APIError + if errors.As(err, &apiError) && apiError.Code == "context_length_exceeded" { + // Decrease maxTokens and try again + maxTokens = decreaseTenPercent(maxTokens) + continue + } + return nil, err + } + + return nil, err +} + func appendMessage(msg types.CompletionMessage, response openai.ChatCompletionStreamResponse) types.CompletionMessage { msg.Usage.CompletionTokens = types.FirstSet(msg.Usage.CompletionTokens, response.Usage.CompletionTokens) msg.Usage.PromptTokens = types.FirstSet(msg.Usage.PromptTokens, response.Usage.PromptTokens) diff --git a/pkg/openai/count.go b/pkg/openai/count.go index 47c5c9bd..ffd902e5 100644 --- a/pkg/openai/count.go +++ b/pkg/openai/count.go @@ -1,20 +1,30 @@ package openai -import openai "github.com/gptscript-ai/chat-completion-client" +import ( + openai "github.com/gptscript-ai/chat-completion-client" +) + +const DefaultMaxTokens = 128_000 + +func decreaseTenPercent(maxTokens int) int { + maxTokens = getBudget(maxTokens) + return int(float64(maxTokens) * 0.9) +} + +func getBudget(maxTokens int) int { + if maxTokens == 0 { + return DefaultMaxTokens + } + return maxTokens +} func dropMessagesOverCount(maxTokens int, msgs []openai.ChatCompletionMessage) (result []openai.ChatCompletionMessage) { var ( lastSystem int withinBudget int - budget = maxTokens + budget = getBudget(maxTokens) ) - if maxTokens == 0 { - budget = 300_000 - } else { - budget *= 3 - } - for i, msg := range msgs { if msg.Role == openai.ChatMessageRoleSystem { budget -= countMessage(msg) @@ -33,6 +43,14 @@ func dropMessagesOverCount(maxTokens int, msgs []openai.ChatCompletionMessage) ( } } + // OpenAI gets upset if there is a tool message without a tool call preceding it. + // Check the oldest message within budget, and if it is a tool message, just drop it. + // We do this in a loop because it is possible for multiple tool messages to be in a row, + // due to parallel tool calls. + for withinBudget < len(msgs) && msgs[withinBudget].Role == openai.ChatMessageRoleTool { + withinBudget++ + } + if withinBudget == len(msgs)-1 { // We are going to drop all non system messages, which seems useless, so just return them // all and let it fail From 1d2b70cf492504c15cc1aab1cd5176f2f1c78888 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 3 Sep 2024 11:29:13 -0400 Subject: [PATCH 126/270] chore: sys.read: improve description (#834) Signed-off-by: Grant Linville --- pkg/builtin/builtin.go | 2 +- pkg/tests/testdata/TestToolsChange/call1.golden | 2 +- pkg/tests/testdata/TestToolsChange/step1.golden | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index f972d14c..f8f44ba9 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -59,7 +59,7 @@ var tools = map[string]types.Tool{ "sys.read": { ToolDef: types.ToolDef{ Parameters: types.Parameters{ - Description: "Reads the contents of a file", + Description: "Reads the contents of a file. Can only read plain text files, not binary files", Arguments: types.ObjectSchema( "filename", "The name of the file to read"), }, diff --git a/pkg/tests/testdata/TestToolsChange/call1.golden b/pkg/tests/testdata/TestToolsChange/call1.golden index 6c7c2d55..69ab3d03 100644 --- a/pkg/tests/testdata/TestToolsChange/call1.golden +++ b/pkg/tests/testdata/TestToolsChange/call1.golden @@ -22,7 +22,7 @@ "function": { "toolID": "sys.read", "name": "read", - "description": "Reads the contents of a file", + "description": "Reads the contents of a file. Can only read plain text files, not binary files", "parameters": { "properties": { "filename": { diff --git a/pkg/tests/testdata/TestToolsChange/step1.golden b/pkg/tests/testdata/TestToolsChange/step1.golden index 1aae05d1..e26862ae 100644 --- a/pkg/tests/testdata/TestToolsChange/step1.golden +++ b/pkg/tests/testdata/TestToolsChange/step1.golden @@ -30,7 +30,7 @@ "function": { "toolID": "sys.read", "name": "read", - "description": "Reads the contents of a file", + "description": "Reads the contents of a file. Can only read plain text files, not binary files", "parameters": { "properties": { "filename": { From eaaf0cdb72cae37f84ce600bb49ae1f49fc86be9 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Tue, 3 Sep 2024 12:28:49 -0400 Subject: [PATCH 127/270] fix: sys.read: never read files that contain a null byte (#837) Signed-off-by: Grant Linville --- pkg/builtin/builtin.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index f8f44ba9..c17f2c80 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -489,6 +489,12 @@ func SysRead(_ context.Context, _ []string, input string, _ chan<- string) (stri if len(data) == 0 { return fmt.Sprintf("The file %s has no contents", params.Filename), nil } + + // Assume the file is not text if it contains a null byte + if bytes.IndexByte(data, 0) != -1 { + return fmt.Sprintf("The file %s cannot be read because it is not a plaintext file", params.Filename), nil + } + return string(data), nil } From 6d92974e0c1c976a023a4b2e240c2e1e990c8212 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 5 Sep 2024 20:42:18 -0700 Subject: [PATCH 128/270] bug: "share tools:" should support all tool types, not just tool --- pkg/types/tool.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index d9d59837..f10788b4 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -602,13 +602,27 @@ func (t Tool) GetToolsByType(prg *Program, toolType ToolType) ([]ToolReference, case ToolTypeInput: exportRefs = tool.ExportInputFilters case ToolTypeTool: - exportRefs = tool.Export case ToolTypeCredential: exportRefs = tool.ExportCredentials default: return nil, fmt.Errorf("unknown tool type %v", toolType) } toolSet.AddAll(tool.GetToolRefsFromNames(exportRefs)) + + toolRefs, err := tool.GetToolRefsFromNames(tool.Export) + if err != nil { + return nil, err + } + + for _, toolRef := range toolRefs { + tool, ok := prg.ToolSet[toolRef.ToolID] + if !ok { + continue + } + if slices.Contains(toolsListFilterType, tool.Type) { + toolSet.Add(toolRef) + } + } } return toolSet.List() From d97d564e243bafc2a495c21a363202a759c28330 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 5 Sep 2024 20:17:58 -0400 Subject: [PATCH 129/270] improve error messages Signed-off-by: Grant Linville --- pkg/engine/openapi.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/engine/openapi.go b/pkg/engine/openapi.go index 0bd5f599..a951bd37 100644 --- a/pkg/engine/openapi.go +++ b/pkg/engine/openapi.go @@ -66,7 +66,7 @@ func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error } else if !match { // Report to the LLM that the operation was not found return &Return{ - Result: ptr(fmt.Sprintf("operation %s not found", operation)), + Result: ptr(fmt.Sprintf("ERROR: operation %s not found", operation)), }, nil } } @@ -92,7 +92,7 @@ func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error if !found { // Report to the LLM that the operation was not found return &Return{ - Result: ptr(fmt.Sprintf("operation %s not found", operation)), + Result: ptr(fmt.Sprintf("ERROR: operation %s not found", operation)), }, nil } @@ -115,7 +115,7 @@ func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error } else if !match { // Report to the LLM that the operation was not found return &Return{ - Result: ptr(fmt.Sprintf("operation %s not found", operation)), + Result: ptr(fmt.Sprintf("ERROR: operation %s not found", operation)), }, nil } } @@ -140,7 +140,7 @@ func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error } else if !found { // Report to the LLM that the operation was not found return &Return{ - Result: ptr(fmt.Sprintf("operation %s not found", operation)), + Result: ptr(fmt.Sprintf("ERROR: operation %s not found", operation)), }, nil } From 8128bbc5bc2ee65503829022c0e2e9bc320f1715 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 5 Sep 2024 20:49:13 -0700 Subject: [PATCH 130/270] bug: always prefer tool's given name over the referenced name --- pkg/tests/testdata/TestCase2/call1.golden | 2 +- pkg/types/tool.go | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/tests/testdata/TestCase2/call1.golden b/pkg/tests/testdata/TestCase2/call1.golden index 581e03f5..d9b446d0 100644 --- a/pkg/tests/testdata/TestCase2/call1.golden +++ b/pkg/tests/testdata/TestCase2/call1.golden @@ -4,7 +4,7 @@ { "function": { "toolID": "testdata/TestCase2/test.gpt:bob", - "name": "Bob", + "name": "bob", "description": "I'm Bob, a friendly guy.", "parameters": { "properties": { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index f10788b4..0bd7bc02 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -698,7 +698,10 @@ func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (re for _, subToolRef := range completionTools { subTool := prg.ToolSet[subToolRef.ToolID] - subToolName := subToolRef.Reference + subToolName := subTool.Name + if subToolName == "" { + subToolName = subToolRef.Reference + } if subToolRef.Named != "" { subToolName = subToolRef.Named } From b8f6209f679751d83009a50d0a0137459765368f Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 6 Sep 2024 12:11:52 -0400 Subject: [PATCH 131/270] fix: allocate new storage for env vars on each tool call (#841) Signed-off-by: Grant Linville --- pkg/engine/cmd.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 960bcfe8..c7d21a2b 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -217,30 +217,34 @@ func appendInputAsEnv(env []string, input string) []string { dec := json.NewDecoder(bytes.NewReader([]byte(input))) dec.UseNumber() - env = appendEnv(env, "GPTSCRIPT_INPUT", input) + // If we don't create a new slice here, then parallel tool calls can end up getting messed up. + newEnv := make([]string, len(env), cap(env)+1+len(data)) + copy(newEnv, env) + + newEnv = appendEnv(newEnv, "GPTSCRIPT_INPUT", input) if err := json.Unmarshal([]byte(input), &data); err != nil { // ignore invalid JSON - return env + return newEnv } for k, v := range data { switch val := v.(type) { case string: - env = appendEnv(env, k, val) + newEnv = appendEnv(newEnv, k, val) case json.Number: - env = appendEnv(env, k, string(val)) + newEnv = appendEnv(newEnv, k, string(val)) case bool: - env = appendEnv(env, k, fmt.Sprint(val)) + newEnv = appendEnv(newEnv, k, fmt.Sprint(val)) default: data, err := json.Marshal(val) if err == nil { - env = appendEnv(env, k, string(data)) + newEnv = appendEnv(newEnv, k, string(data)) } } } - return env + return newEnv } func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.Tool, input string, useShell bool) (*exec.Cmd, func(), error) { @@ -248,7 +252,7 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T useShell = false } - envvars := append(e.Env[:], extraEnv...) + envvars := append(e.Env, extraEnv...) envvars = appendInputAsEnv(envvars, input) if log.IsDebug() { envvars = append(envvars, "GPTSCRIPT_DEBUG=true") From c441cb4cf785ba51c0b61b862b118ed7dc46e64a Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 6 Sep 2024 15:18:56 -0400 Subject: [PATCH 132/270] fix: openapi revamp: return error to LLM if args are invalid JSON (#843) Signed-off-by: Grant Linville Co-authored-by: Tyler Slaton <54378333+tylerslaton@users.noreply.github.com> --- pkg/openapi/run.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go index fb3b746c..159ee6fe 100644 --- a/pkg/openapi/run.go +++ b/pkg/openapi/run.go @@ -38,7 +38,8 @@ func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (s // Validate args against the schema. validationResult, err := gojsonschema.Validate(gojsonschema.NewStringLoader(schemaJSON), gojsonschema.NewStringLoader(args)) if err != nil { - return "", false, err + // We don't return an error here because we want the LLM to be able to maintain control and try again. + return fmt.Sprintf("ERROR: failed to validate arguments. Make sure your arguments are valid JSON. %v", err), false, nil } if !validationResult.Valid() { From dbf46d154265e31889ab9fd082ef252b738b69b9 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 9 Sep 2024 11:12:15 -0400 Subject: [PATCH 133/270] fix: openapi revamp: fix incorrect error message when JSON args are invalid (#844) Signed-off-by: Grant Linville --- pkg/openapi/run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go index 159ee6fe..ac1ec660 100644 --- a/pkg/openapi/run.go +++ b/pkg/openapi/run.go @@ -39,7 +39,7 @@ func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (s validationResult, err := gojsonschema.Validate(gojsonschema.NewStringLoader(schemaJSON), gojsonschema.NewStringLoader(args)) if err != nil { // We don't return an error here because we want the LLM to be able to maintain control and try again. - return fmt.Sprintf("ERROR: failed to validate arguments. Make sure your arguments are valid JSON. %v", err), false, nil + return fmt.Sprintf("ERROR: failed to validate arguments. Make sure your arguments are valid JSON. %v", err), true, nil } if !validationResult.Valid() { From c0f116cf9b455658861fabbbb45fefebc9ed4513 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 11 Sep 2024 16:32:16 -0400 Subject: [PATCH 134/270] chore: bubble up errors when downloading credential helper fails (#845) Signed-off-by: Donnie Adams --- pkg/repos/get.go | 5 ++++ pkg/repos/runtimes/golang/golang.go | 37 ++++++++++++++++------------- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 8981d1fa..8346b8cf 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -146,6 +146,11 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co } tool := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "gptscript-credential-helpers", + }, + }, Source: types.ToolSource{ Repo: &types.Repo{ Root: runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_HELPERS_ROOT", "https://github.com/gptscript-ai/gptscript-credential-helpers.git"), diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 2601f521..47e8461f 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -97,21 +97,26 @@ type tag struct { } func GetLatestTag(tool types.Tool) (string, error) { - r, ok := getLatestRelease(tool) + r, ok, err := getLatestRelease(tool) + if err != nil { + return "", err + } + if !ok { return "", fmt.Errorf("failed to get latest release for %s", tool.Name) } + return r.label, nil } -func getLatestRelease(tool types.Tool) (*release, bool) { +func getLatestRelease(tool types.Tool) (*release, bool, error) { if tool.Source.Repo == nil || !strings.HasPrefix(tool.Source.Repo.Root, "https://github.com/") { - return nil, false + return nil, false, nil } parts := strings.Split(strings.TrimPrefix(strings.TrimSuffix(tool.Source.Repo.Root, ".git"), "https://"), "/") if len(parts) != 3 { - return nil, false + return nil, false, fmt.Errorf("invalid GitHub URL: %s", tool.Source.Repo.Root) } client := http.Client{ @@ -124,17 +129,16 @@ func getLatestRelease(tool types.Tool) (*release, bool) { resp, err := client.Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/tags", account, repo)) if err != nil { - // ignore error - return nil, false + return nil, false, fmt.Errorf("failed to get tags: %w", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, false + return nil, false, fmt.Errorf("unexpected status when getting tags: %s", resp.Status) } var tags []tag if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { - return nil, false + return nil, false, fmt.Errorf("failed to decode GitHub tags: %w", err) } for _, tag := range tags { if tag.Commit.Sha == tool.Source.Repo.Revision { @@ -142,23 +146,22 @@ func getLatestRelease(tool types.Tool) (*release, bool) { account: account, repo: repo, label: tag.Name, - }, true + }, true, nil } } resp, err = client.Get(fmt.Sprintf("https://github.com/%s/%s/releases/latest", account, repo)) if err != nil { - // ignore error - return nil, false + return nil, false, fmt.Errorf("failed to get latest release: %w", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusFound { - return nil, false + return nil, false, fmt.Errorf("unexpected status when getting latest release: %s", resp.Status) } target := resp.Header.Get("Location") if target == "" { - return nil, false + return nil, false, nil } parts = strings.Split(target, "/") @@ -168,7 +171,7 @@ func getLatestRelease(tool types.Tool) (*release, bool) { account: account, repo: repo, label: label, - }, true + }, true, nil } func get(ctx context.Context, url string) (*http.Response, error) { @@ -249,7 +252,8 @@ func (r *Runtime) Binary(ctx context.Context, tool types.Tool, _, toolSource str return false, nil, nil } - rel, ok := getLatestRelease(tool) + // ignore the error + rel, ok, _ := getLatestRelease(tool) if !ok { return false, nil, nil } @@ -286,7 +290,8 @@ func (r *Runtime) DownloadCredentialHelper(ctx context.Context, tool types.Tool, return nil } - rel, ok := getLatestRelease(tool) + // ignore the error + rel, ok, _ := getLatestRelease(tool) if !ok { return fmt.Errorf("failed to find %s release", r.ID()) } From 90e7868a95cb4501bfdf706a64d06245e3c55f5c Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 13 Sep 2024 12:57:17 -0400 Subject: [PATCH 135/270] feat: sdkserver: add credential routes (#846) Signed-off-by: Grant Linville --- pkg/cli/credential.go | 2 +- pkg/credentials/store.go | 9 +- pkg/gptscript/gptscript.go | 2 +- pkg/sdkserver/credentials.go | 176 +++++++++++++++++++++++++++++++++++ pkg/sdkserver/routes.go | 5 + pkg/sdkserver/types.go | 7 ++ 6 files changed, 197 insertions(+), 4 deletions(-) create mode 100644 pkg/sdkserver/credentials.go diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index cb000125..733590c4 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -45,7 +45,7 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { ctx := c.root.CredentialContext if c.AllContexts { - ctx = "*" + ctx = credentials.AllCredentialContexts } opts, err := c.root.NewGPTScriptOpts() diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 3940184b..c8558f3a 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -11,6 +11,11 @@ import ( "github.com/gptscript-ai/gptscript/pkg/config" ) +const ( + DefaultCredentialContext = "default" + AllCredentialContexts = "*" +) + type CredentialBuilder interface { EnsureCredentialHelpers(ctx context.Context) error } @@ -105,7 +110,7 @@ func (s Store) List(ctx context.Context) ([]Credential, error) { if err != nil { return nil, err } - if s.credCtx == "*" || c.Context == s.credCtx { + if s.credCtx == AllCredentialContexts || c.Context == s.credCtx { creds = append(creds, c) } } @@ -139,7 +144,7 @@ func validateCredentialCtx(ctx string) error { return fmt.Errorf("credential context cannot be empty") } - if ctx == "*" { // this represents "all contexts" and is allowed + if ctx == AllCredentialContexts { return nil } diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 755fe632..abae80ac 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -75,7 +75,7 @@ func Complete(opts ...Options) Options { result.Env = os.Environ() } if result.CredentialContext == "" { - result.CredentialContext = "default" + result.CredentialContext = credentials.DefaultCredentialContext } return result diff --git a/pkg/sdkserver/credentials.go b/pkg/sdkserver/credentials.go new file mode 100644 index 00000000..adbaacdc --- /dev/null +++ b/pkg/sdkserver/credentials.go @@ -0,0 +1,176 @@ +package sdkserver + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/gptscript-ai/gptscript/pkg/config" + gcontext "github.com/gptscript-ai/gptscript/pkg/context" + "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" +) + +func (s *server) initializeCredentialStore(ctx string) (credentials.CredentialStore, error) { + cfg, err := config.ReadCLIConfig(s.gptscriptOpts.OpenAI.ConfigFile) + if err != nil { + return nil, fmt.Errorf("failed to read CLI config: %w", err) + } + + // TODO - are we sure we want to always use runtimes.Default here? + store, err := credentials.NewStore(cfg, runtimes.Default(s.gptscriptOpts.Cache.CacheDir), ctx, s.gptscriptOpts.Cache.CacheDir) + if err != nil { + return nil, fmt.Errorf("failed to initialize credential store: %w", err) + } + + return store, nil +} + +func (s *server) listCredentials(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + req := new(credentialsRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + if req.AllContexts { + req.Context = credentials.AllCredentialContexts + } else if req.Context == "" { + req.Context = credentials.DefaultCredentialContext + } + + store, err := s.initializeCredentialStore(req.Context) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, err) + return + } + + creds, err := store.List(r.Context()) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to list credentials: %w", err)) + return + } + + // Remove the environment variable values (which are secrets) and refresh tokens from the response. + for i := range creds { + for k := range creds[i].Env { + creds[i].Env[k] = "" + } + creds[i].RefreshToken = "" + } + + writeResponse(logger, w, map[string]any{"stdout": creds}) +} + +func (s *server) createCredential(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + req := new(credentialsRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + cred := new(credentials.Credential) + if err := json.Unmarshal([]byte(req.Content), cred); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid credential: %w", err)) + return + } + + if cred.Context == "" { + cred.Context = credentials.DefaultCredentialContext + } + + store, err := s.initializeCredentialStore(cred.Context) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, err) + return + } + + if err := store.Add(r.Context(), *cred); err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to create credential: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": "Credential created successfully"}) +} + +func (s *server) revealCredential(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + req := new(credentialsRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + if req.Name == "" { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("missing credential name")) + return + } + + if req.AllContexts || req.Context == credentials.AllCredentialContexts { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("allContexts is not supported for credential retrieval; please specify the specific context that the credential is in")) + return + } else if req.Context == "" { + req.Context = credentials.DefaultCredentialContext + } + + store, err := s.initializeCredentialStore(req.Context) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, err) + return + } + + cred, ok, err := store.Get(r.Context(), req.Name) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to get credential: %w", err)) + return + } else if !ok { + writeError(logger, w, http.StatusNotFound, fmt.Errorf("credential not found")) + return + } + + writeResponse(logger, w, map[string]any{"stdout": cred}) +} + +func (s *server) deleteCredential(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + req := new(credentialsRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + } + + if req.Name == "" { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("missing credential name")) + return + } + + if req.AllContexts || req.Context == credentials.AllCredentialContexts { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("allContexts is not supported for credential deletion; please specify the specific context that the credential is in")) + return + } else if req.Context == "" { + req.Context = credentials.DefaultCredentialContext + } + + store, err := s.initializeCredentialStore(req.Context) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, err) + return + } + + // Check to see if a cred exists so we can return a 404 if it doesn't. + if _, ok, err := store.Get(r.Context(), req.Name); err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to get credential: %w", err)) + return + } else if !ok { + writeError(logger, w, http.StatusNotFound, fmt.Errorf("credential not found")) + return + } + + if err := store.Remove(r.Context(), req.Name); err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to delete credential: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": "Credential deleted successfully"}) +} diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 6cb1e620..c180097e 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -58,6 +58,11 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /confirm/{id}", s.confirm) mux.HandleFunc("POST /prompt/{id}", s.prompt) mux.HandleFunc("POST /prompt-response/{id}", s.promptResponse) + + mux.HandleFunc("POST /credentials", s.listCredentials) + mux.HandleFunc("POST /credentials/create", s.createCredential) + mux.HandleFunc("POST /credentials/reveal", s.revealCredential) + mux.HandleFunc("POST /credentials/delete", s.deleteCredential) } // health just provides an endpoint for checking whether the server is running and accessible. diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 2889626b..7ed7da78 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -252,3 +252,10 @@ type prompt struct { Type runner.EventType `json:"type,omitempty"` Time time.Time `json:"time,omitempty"` } + +type credentialsRequest struct { + content `json:",inline"` + AllContexts bool `json:"allContexts"` + Context string `json:"context"` + Name string `json:"name"` +} From 45d444f810600d9584951ec3aba98da9b5e02db5 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 13 Sep 2024 17:30:45 -0400 Subject: [PATCH 136/270] fix: sdkserver: credentials: ensure credential helpers exist (#848) Signed-off-by: Grant Linville --- pkg/sdkserver/credentials.go | 22 ++++++++++++++-------- pkg/sdkserver/routes.go | 3 +++ pkg/sdkserver/server.go | 2 ++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/pkg/sdkserver/credentials.go b/pkg/sdkserver/credentials.go index adbaacdc..d3f86b1f 100644 --- a/pkg/sdkserver/credentials.go +++ b/pkg/sdkserver/credentials.go @@ -1,6 +1,7 @@ package sdkserver import ( + "context" "encoding/json" "fmt" "net/http" @@ -8,17 +9,22 @@ import ( "github.com/gptscript-ai/gptscript/pkg/config" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/credentials" - "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" ) -func (s *server) initializeCredentialStore(ctx string) (credentials.CredentialStore, error) { +func (s *server) initializeCredentialStore(ctx context.Context, credCtx string) (credentials.CredentialStore, error) { cfg, err := config.ReadCLIConfig(s.gptscriptOpts.OpenAI.ConfigFile) if err != nil { return nil, fmt.Errorf("failed to read CLI config: %w", err) } - // TODO - are we sure we want to always use runtimes.Default here? - store, err := credentials.NewStore(cfg, runtimes.Default(s.gptscriptOpts.Cache.CacheDir), ctx, s.gptscriptOpts.Cache.CacheDir) + if err := s.runtimeManager.SetUpCredentialHelpers(ctx, cfg); err != nil { + return nil, fmt.Errorf("failed to set up credential helpers: %w", err) + } + if err := s.runtimeManager.EnsureCredentialHelpers(ctx); err != nil { + return nil, fmt.Errorf("failed to ensure credential helpers: %w", err) + } + + store, err := credentials.NewStore(cfg, s.runtimeManager, credCtx, s.gptscriptOpts.Cache.CacheDir) if err != nil { return nil, fmt.Errorf("failed to initialize credential store: %w", err) } @@ -40,7 +46,7 @@ func (s *server) listCredentials(w http.ResponseWriter, r *http.Request) { req.Context = credentials.DefaultCredentialContext } - store, err := s.initializeCredentialStore(req.Context) + store, err := s.initializeCredentialStore(r.Context(), req.Context) if err != nil { writeError(logger, w, http.StatusInternalServerError, err) return @@ -81,7 +87,7 @@ func (s *server) createCredential(w http.ResponseWriter, r *http.Request) { cred.Context = credentials.DefaultCredentialContext } - store, err := s.initializeCredentialStore(cred.Context) + store, err := s.initializeCredentialStore(r.Context(), cred.Context) if err != nil { writeError(logger, w, http.StatusInternalServerError, err) return @@ -115,7 +121,7 @@ func (s *server) revealCredential(w http.ResponseWriter, r *http.Request) { req.Context = credentials.DefaultCredentialContext } - store, err := s.initializeCredentialStore(req.Context) + store, err := s.initializeCredentialStore(r.Context(), req.Context) if err != nil { writeError(logger, w, http.StatusInternalServerError, err) return @@ -152,7 +158,7 @@ func (s *server) deleteCredential(w http.ResponseWriter, r *http.Request) { req.Context = credentials.DefaultCredentialContext } - store, err := s.initializeCredentialStore(req.Context) + store, err := s.initializeCredentialStore(r.Context(), req.Context) if err != nil { writeError(logger, w, http.StatusInternalServerError, err) return diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index c180097e..f82fa8a7 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -13,6 +13,7 @@ import ( "github.com/gptscript-ai/broadcaster" "github.com/gptscript-ai/gptscript/pkg/cache" gcontext "github.com/gptscript-ai/gptscript/pkg/context" + "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/input" "github.com/gptscript-ai/gptscript/pkg/loader" @@ -30,6 +31,8 @@ type server struct { client *gptscript.GPTScript events *broadcaster.Broadcaster[event] + runtimeManager engine.RuntimeManager + lock sync.RWMutex waitingToConfirm map[string]chan runner.AuthorizerResponse waitingToPrompt map[string]chan map[string]string diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index f72e7ae9..0a68f0fa 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -17,6 +17,7 @@ import ( "github.com/gptscript-ai/broadcaster" "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/mvl" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/rs/cors" @@ -108,6 +109,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { token: token, client: g, events: events, + runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir), // TODO - do we always want to use runtimes.Default here? waitingToConfirm: make(map[string]chan runner.AuthorizerResponse), waitingToPrompt: make(map[string]chan map[string]string), } From 780e07eaeb409a2b47b13d6b4d7de9c2e7d38e8f Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 18 Sep 2024 09:19:30 -0400 Subject: [PATCH 137/270] feat: add stacked credential contexts (#849) Signed-off-by: Grant Linville --- docs/docs/03-tools/04-credential-tools.md | 52 +++++++++ .../04-command-line-reference/gptscript.md | 2 +- .../gptscript_credential.md | 2 +- .../gptscript_credential_delete.md | 2 +- .../gptscript_credential_show.md | 2 +- .../gptscript_eval.md | 2 +- .../gptscript_fmt.md | 2 +- .../gptscript_getenv.md | 2 +- .../gptscript_parse.md | 2 +- integration/cred_test.go | 54 +++++++++ integration/scripts/cred_stacked.gpt | 36 ++++++ pkg/cli/credential.go | 22 ++-- pkg/cli/credential_delete.go | 8 +- pkg/cli/credential_show.go | 8 +- pkg/cli/gptscript.go | 4 +- pkg/credentials/store.go | 104 ++++++++++++++---- pkg/credentials/util.go | 7 ++ pkg/gptscript/gptscript.go | 10 +- pkg/sdkserver/credentials.go | 25 +++-- pkg/sdkserver/routes.go | 10 +- pkg/sdkserver/types.go | 8 +- 21 files changed, 283 insertions(+), 81 deletions(-) create mode 100644 integration/scripts/cred_stacked.gpt diff --git a/docs/docs/03-tools/04-credential-tools.md b/docs/docs/03-tools/04-credential-tools.md index 1911dc34..46a0e69e 100644 --- a/docs/docs/03-tools/04-credential-tools.md +++ b/docs/docs/03-tools/04-credential-tools.md @@ -222,3 +222,55 @@ import os print("myCred expires at " + os.getenv("GPTSCRIPT_CREDENTIAL_EXPIRATION", "")) ``` + +## Stacked Credential Contexts (Advanced) + +When setting the `--credential-context` argument in GPTScript, you can specify multiple contexts separated by commas. +We refer to this as "stacked credential contexts", or just stacked contexts for short. This allows you to specify an order +of priority for credential contexts. This is best explained by example. + +### Example: stacked contexts when running a script that uses a credential + +Let's say you have two contexts, `one` and `two`, and you specify them like this: + +```bash +gptscript --credential-context one,two my-script.gpt +``` + +``` +Credential: my-credential-tool.gpt as myCred + + +``` + +When GPTScript runs, it will first look for a credential called `myCred` in the `one` context. +If it doesn't find it there, it will look for it in the `two` context. If it also doesn't find it there, +it will run the `my-credential-tool.gpt` tool to get the credential. It will then store the new credential into the `one` +context, since that has the highest priority. + +### Example: stacked contexts when listing credentials + +```bash +gptscript --credential-context one,two credentials +``` + +When you list credentials like this, GPTScript will print out the information for all credentials in contexts one and two, +with one exception. If there is a credential name that exists in both contexts, GPTScript will only print the information +for the credential in the context with the highest priority, which in this case is `one`. + +(To see all credentials in all contexts, you can still use the `--all-contexts` flag, and it will show all credentials, +regardless of whether the same name appears in another context.) + +### Example: stacked contexts when showing credentials + +```bash +gptscript --credential-context one,two credential show myCred +``` + +When you show a credential like this, GPTScript will first look for `myCred` in the `one` context. If it doesn't find it +there, it will look for it in the `two` context. If it doesn't find it in either context, it will print an error message. + +:::note +You cannot specify stacked contexts when doing `gptscript credential delete`. GPTScript will return an error if +more than one context is specified for this command. +::: diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index b7de5e86..8a726c64 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -18,7 +18,7 @@ gptscript [flags] PROGRAM_FILE [INPUT...] --color Use color in output (default true) ($GPTSCRIPT_COLOR) --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) --debug Enable debug logging ($GPTSCRIPT_DEBUG) --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) diff --git a/docs/docs/04-command-line-reference/gptscript_credential.md b/docs/docs/04-command-line-reference/gptscript_credential.md index 435ba6e5..eb5781f4 100644 --- a/docs/docs/04-command-line-reference/gptscript_credential.md +++ b/docs/docs/04-command-line-reference/gptscript_credential.md @@ -20,7 +20,7 @@ gptscript credential [flags] ### Options inherited from parent commands ``` - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_credential_delete.md b/docs/docs/04-command-line-reference/gptscript_credential_delete.md index c2f78e88..c9cffdd3 100644 --- a/docs/docs/04-command-line-reference/gptscript_credential_delete.md +++ b/docs/docs/04-command-line-reference/gptscript_credential_delete.md @@ -18,7 +18,7 @@ gptscript credential delete [flags] ### Options inherited from parent commands ``` - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_credential_show.md b/docs/docs/04-command-line-reference/gptscript_credential_show.md index f5fb11af..f89df87a 100644 --- a/docs/docs/04-command-line-reference/gptscript_credential_show.md +++ b/docs/docs/04-command-line-reference/gptscript_credential_show.md @@ -18,7 +18,7 @@ gptscript credential show [flags] ### Options inherited from parent commands ``` - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index ff9e6446..257cf609 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -30,7 +30,7 @@ gptscript eval [flags] --color Use color in output (default true) ($GPTSCRIPT_COLOR) --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) --debug Enable debug logging ($GPTSCRIPT_DEBUG) --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index 7aceb957..1175a1f1 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -24,7 +24,7 @@ gptscript fmt [flags] --color Use color in output (default true) ($GPTSCRIPT_COLOR) --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) --debug Enable debug logging ($GPTSCRIPT_DEBUG) --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) diff --git a/docs/docs/04-command-line-reference/gptscript_getenv.md b/docs/docs/04-command-line-reference/gptscript_getenv.md index 80fea614..4a688439 100644 --- a/docs/docs/04-command-line-reference/gptscript_getenv.md +++ b/docs/docs/04-command-line-reference/gptscript_getenv.md @@ -23,7 +23,7 @@ gptscript getenv [flags] KEY [DEFAULT] --color Use color in output (default true) ($GPTSCRIPT_COLOR) --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) --debug Enable debug logging ($GPTSCRIPT_DEBUG) --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index 3d84622b..66d2791c 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -24,7 +24,7 @@ gptscript parse [flags] --color Use color in output (default true) ($GPTSCRIPT_COLOR) --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-context strings Context name(s) in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) --debug Enable debug logging ($GPTSCRIPT_DEBUG) --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) diff --git a/integration/cred_test.go b/integration/cred_test.go index d77f096c..1ea73d35 100644 --- a/integration/cred_test.go +++ b/integration/cred_test.go @@ -45,3 +45,57 @@ func TestCredentialExpirationEnv(t *testing.T) { } } } + +// TestStackedCredentialContexts tests creating, using, listing, showing, and deleting credentials when there are multiple contexts. +func TestStackedCredentialContexts(t *testing.T) { + // First, test credential creation. We will create a credential called testcred in two different contexts called one and two. + _, err := RunScript("scripts/cred_stacked.gpt", "--sub-tool", "testcred_one", "--credential-context", "one,two") + require.NoError(t, err) + + _, err = RunScript("scripts/cred_stacked.gpt", "--sub-tool", "testcred_two", "--credential-context", "two") + require.NoError(t, err) + + // Next, we try running the testcred_one tool. It should print the value of "testcred" in whichever context it finds the cred first. + out, err := RunScript("scripts/cred_stacked.gpt", "--sub-tool", "testcred_one", "--credential-context", "one,two") + require.NoError(t, err) + require.Contains(t, out, "one") + require.NotContains(t, out, "two") + + out, err = RunScript("scripts/cred_stacked.gpt", "--sub-tool", "testcred_one", "--credential-context", "two,one") + require.NoError(t, err) + require.Contains(t, out, "two") + require.NotContains(t, out, "one") + + // Next, list credentials and specify both contexts. We should get the credential from the first specified context. + out, err = GPTScriptExec("--credential-context", "one,two", "cred") + require.NoError(t, err) + require.Contains(t, out, "one") + require.NotContains(t, out, "two") + + out, err = GPTScriptExec("--credential-context", "two,one", "cred") + require.NoError(t, err) + require.Contains(t, out, "two") + require.NotContains(t, out, "one") + + // Next, try showing the credentials. + out, err = GPTScriptExec("--credential-context", "one,two", "cred", "show", "testcred") + require.NoError(t, err) + require.Contains(t, out, "one") + require.NotContains(t, out, "two") + + out, err = GPTScriptExec("--credential-context", "two,one", "cred", "show", "testcred") + require.NoError(t, err) + require.Contains(t, out, "two") + require.NotContains(t, out, "one") + + // Make sure we get an error if we try to delete a credential with multiple contexts specified. + _, err = GPTScriptExec("--credential-context", "one,two", "cred", "delete", "testcred") + require.Error(t, err) + + // Now actually delete the credentials. + _, err = GPTScriptExec("--credential-context", "one", "cred", "delete", "testcred") + require.NoError(t, err) + + _, err = GPTScriptExec("--credential-context", "two", "cred", "delete", "testcred") + require.NoError(t, err) +} diff --git a/integration/scripts/cred_stacked.gpt b/integration/scripts/cred_stacked.gpt new file mode 100644 index 00000000..1072ca7b --- /dev/null +++ b/integration/scripts/cred_stacked.gpt @@ -0,0 +1,36 @@ +name: testcred_one +credential: cred_one as testcred + +#!python3 + +import os + +print(os.environ.get("VALUE")) + +--- +name: testcred_two +credential: cred_two as testcred + +#!python3 + +import os + +print(os.environ.get("VALUE")) + +--- +name: cred_one + +#!python3 + +import json + +print(json.dumps({"env": {"VALUE": "one"}})) + +--- +name: cred_two + +#!python3 + +import json + +print(json.dumps({"env": {"VALUE": "two"}})) diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index 733590c4..674160b9 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -9,11 +9,10 @@ import ( "time" cmd2 "github.com/gptscript-ai/cmd" - "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" - "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/spf13/cobra" ) @@ -43,27 +42,26 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { return fmt.Errorf("failed to read CLI config: %w", err) } - ctx := c.root.CredentialContext - if c.AllContexts { - ctx = credentials.AllCredentialContexts - } - opts, err := c.root.NewGPTScriptOpts() if err != nil { return err } - opts.Cache = cache.Complete(opts.Cache) - opts.Runner = runner.Complete(opts.Runner) + opts = gptscript.Complete(opts) if opts.Runner.RuntimeManager == nil { opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) } + ctxs := opts.CredentialContexts + if c.AllContexts { + ctxs = []string{credentials.AllCredentialContexts} + } + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { return err } // Initialize the credential store and get all the credentials. - store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, ctx, opts.Cache.CacheDir) + store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, ctxs, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) } @@ -77,7 +75,7 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { defer w.Flush() // Sort credentials and print column names, depending on the options. - if c.AllContexts { + if c.AllContexts || len(c.root.CredentialContext) > 1 { // Sort credentials by context sort.Slice(creds, func(i, j int) bool { if creds[i].Context == creds[j].Context { @@ -114,7 +112,7 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { } var fields []any - if c.AllContexts { + if c.AllContexts || len(c.root.CredentialContext) > 1 { fields = []any{cred.Context, cred.ToolName, expires} } else { fields = []any{cred.ToolName, expires} diff --git a/pkg/cli/credential_delete.go b/pkg/cli/credential_delete.go index 4e9919df..b17ae851 100644 --- a/pkg/cli/credential_delete.go +++ b/pkg/cli/credential_delete.go @@ -3,11 +3,10 @@ package cli import ( "fmt" - "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" - "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/spf13/cobra" ) @@ -34,8 +33,7 @@ func (c *Delete) Run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to read CLI config: %w", err) } - opts.Cache = cache.Complete(opts.Cache) - opts.Runner = runner.Complete(opts.Runner) + opts = gptscript.Complete(opts) if opts.Runner.RuntimeManager == nil { opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) } @@ -44,7 +42,7 @@ func (c *Delete) Run(cmd *cobra.Command, args []string) error { return err } - store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, c.root.CredentialContext, opts.Cache.CacheDir) + store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, opts.CredentialContexts, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) } diff --git a/pkg/cli/credential_show.go b/pkg/cli/credential_show.go index fac1b719..d8ea980b 100644 --- a/pkg/cli/credential_show.go +++ b/pkg/cli/credential_show.go @@ -5,11 +5,10 @@ import ( "os" "text/tabwriter" - "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" - "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/spf13/cobra" ) @@ -36,8 +35,7 @@ func (c *Show) Run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to read CLI config: %w", err) } - opts.Cache = cache.Complete(opts.Cache) - opts.Runner = runner.Complete(opts.Runner) + opts = gptscript.Complete(opts) if opts.Runner.RuntimeManager == nil { opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) } @@ -46,7 +44,7 @@ func (c *Show) Run(cmd *cobra.Command, args []string) error { return err } - store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, c.root.CredentialContext, opts.Cache.CacheDir) + store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, opts.CredentialContexts, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) } diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 2d7e90d9..66719adc 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -64,7 +64,7 @@ type GPTScript struct { Chdir string `usage:"Change current working directory" short:"C"` Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` - CredentialContext string `usage:"Context name in which to store credentials" default:"default"` + CredentialContext []string `usage:"Context name(s) in which to store credentials"` CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` @@ -142,7 +142,7 @@ func (r *GPTScript) NewGPTScriptOpts() (gptscript.Options, error) { }, Quiet: r.Quiet, Env: os.Environ(), - CredentialContext: r.CredentialContext, + CredentialContexts: r.CredentialContext, Workspace: r.Workspace, DisablePromptServer: r.UI, DefaultModelProvider: r.DefaultModelProvider, diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index c8558f3a..749aba3a 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -8,7 +8,10 @@ import ( "strings" "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + credentials2 "github.com/docker/docker-credential-helpers/credentials" "github.com/gptscript-ai/gptscript/pkg/config" + "golang.org/x/exp/maps" ) const ( @@ -28,18 +31,18 @@ type CredentialStore interface { } type Store struct { - credCtx string + credCtxs []string credBuilder CredentialBuilder credHelperDirs CredentialHelperDirs cfg *config.CLIConfig } -func NewStore(cfg *config.CLIConfig, credentialBuilder CredentialBuilder, credCtx, cacheDir string) (CredentialStore, error) { - if err := validateCredentialCtx(credCtx); err != nil { +func NewStore(cfg *config.CLIConfig, credentialBuilder CredentialBuilder, credCtxs []string, cacheDir string) (CredentialStore, error) { + if err := validateCredentialCtx(credCtxs); err != nil { return nil, err } return Store{ - credCtx: credCtx, + credCtxs: credCtxs, credBuilder: credentialBuilder, credHelperDirs: GetCredentialHelperDirs(cacheDir), cfg: cfg, @@ -47,22 +50,45 @@ func NewStore(cfg *config.CLIConfig, credentialBuilder CredentialBuilder, credCt } func (s Store) Get(ctx context.Context, toolName string) (*Credential, bool, error) { + if first(s.credCtxs) == AllCredentialContexts { + return nil, false, fmt.Errorf("cannot get a credential with context %q", AllCredentialContexts) + } + store, err := s.getStore(ctx) if err != nil { return nil, false, err } - auth, err := store.Get(toolNameWithCtx(toolName, s.credCtx)) - if err != nil { - return nil, false, err - } else if auth.Password == "" { + + var ( + authCfg types.AuthConfig + credCtx string + ) + for _, c := range s.credCtxs { + auth, err := store.Get(toolNameWithCtx(toolName, c)) + if err != nil { + if credentials2.IsErrCredentialsNotFound(err) { + continue + } + return nil, false, err + } else if auth.Password == "" { + continue + } + + authCfg = auth + credCtx = c + break + } + + if credCtx == "" { + // Didn't find the credential return nil, false, nil } - if auth.ServerAddress == "" { - auth.ServerAddress = toolNameWithCtx(toolName, s.credCtx) // Not sure why we have to do this, but we do. + if authCfg.ServerAddress == "" { + authCfg.ServerAddress = toolNameWithCtx(toolName, credCtx) // Not sure why we have to do this, but we do. } - cred, err := credentialFromDockerAuthConfig(auth) + cred, err := credentialFromDockerAuthConfig(authCfg) if err != nil { return nil, false, err } @@ -70,7 +96,12 @@ func (s Store) Get(ctx context.Context, toolName string) (*Credential, bool, err } func (s Store) Add(ctx context.Context, cred Credential) error { - cred.Context = s.credCtx + first := first(s.credCtxs) + if first == AllCredentialContexts { + return fmt.Errorf("cannot add a credential with context %q", AllCredentialContexts) + } + cred.Context = first + store, err := s.getStore(ctx) if err != nil { return err @@ -83,11 +114,17 @@ func (s Store) Add(ctx context.Context, cred Credential) error { } func (s Store) Remove(ctx context.Context, toolName string) error { + first := first(s.credCtxs) + if len(s.credCtxs) > 1 || first == AllCredentialContexts { + return fmt.Errorf("error: credential deletion is not supported when multiple credential contexts are provided") + } + store, err := s.getStore(ctx) if err != nil { return err } - return store.Erase(toolNameWithCtx(toolName, s.credCtx)) + + return store.Erase(toolNameWithCtx(toolName, first)) } func (s Store) List(ctx context.Context) ([]Credential, error) { @@ -100,7 +137,8 @@ func (s Store) List(ctx context.Context) ([]Credential, error) { return nil, err } - var creds []Credential + credsByContext := make(map[string][]Credential) + allCreds := make([]Credential, 0) for serverAddress, authCfg := range list { if authCfg.ServerAddress == "" { authCfg.ServerAddress = serverAddress // Not sure why we have to do this, but we do. @@ -110,12 +148,29 @@ func (s Store) List(ctx context.Context) ([]Credential, error) { if err != nil { return nil, err } - if s.credCtx == AllCredentialContexts || c.Context == s.credCtx { - creds = append(creds, c) + + allCreds = append(allCreds, c) + + if credsByContext[c.Context] == nil { + credsByContext[c.Context] = []Credential{c} + } else { + credsByContext[c.Context] = append(credsByContext[c.Context], c) + } + } + + if first(s.credCtxs) == AllCredentialContexts { + return allCreds, nil + } + + // Go through the contexts in reverse order so that higher priority contexts override lower ones. + credsByName := make(map[string]Credential) + for i := len(s.credCtxs) - 1; i >= 0; i-- { + for _, c := range credsByContext[s.credCtxs[i]] { + credsByName[c.ToolName] = c } } - return creds, nil + return maps.Values(credsByName), nil } func (s *Store) getStore(ctx context.Context) (credentials.Store, error) { @@ -139,19 +194,22 @@ func (s *Store) getStoreByHelper(ctx context.Context, helper string) (credential return NewHelper(s.cfg, helper) } -func validateCredentialCtx(ctx string) error { - if ctx == "" { - return fmt.Errorf("credential context cannot be empty") +func validateCredentialCtx(ctxs []string) error { + if len(ctxs) == 0 { + return fmt.Errorf("credential contexts must be provided") } - if ctx == AllCredentialContexts { + if len(ctxs) == 1 && ctxs[0] == AllCredentialContexts { return nil } // check alphanumeric r := regexp.MustCompile("^[a-zA-Z0-9]+$") - if !r.MatchString(ctx) { - return fmt.Errorf("credential context must be alphanumeric") + for _, c := range ctxs { + if !r.MatchString(c) { + return fmt.Errorf("credential contexts must be alphanumeric") + } } + return nil } diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go index 70f31e97..39200369 100644 --- a/pkg/credentials/util.go +++ b/pkg/credentials/util.go @@ -15,3 +15,10 @@ func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { BinDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "bin"), } } + +func first(s []string) string { + if len(s) == 0 { + return "" + } + return s[0] +} diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index abae80ac..7a10eda2 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -45,7 +45,7 @@ type Options struct { Monitor monitor.Options Runner runner.Options DefaultModelProvider string - CredentialContext string + CredentialContexts []string Quiet *bool Workspace string DisablePromptServer bool @@ -60,7 +60,7 @@ func Complete(opts ...Options) Options { result.Runner = runner.Complete(result.Runner, opt.Runner) result.OpenAI = openai.Complete(result.OpenAI, opt.OpenAI) - result.CredentialContext = types.FirstSet(opt.CredentialContext, result.CredentialContext) + result.CredentialContexts = append(result.CredentialContexts, opt.CredentialContexts...) result.Quiet = types.FirstSet(opt.Quiet, result.Quiet) result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) result.Env = append(result.Env, opt.Env...) @@ -74,8 +74,8 @@ func Complete(opts ...Options) Options { if len(result.Env) == 0 { result.Env = os.Environ() } - if result.CredentialContext == "" { - result.CredentialContext = credentials.DefaultCredentialContext + if len(result.CredentialContexts) == 0 { + result.CredentialContexts = []string{credentials.DefaultCredentialContext} } return result @@ -103,7 +103,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { return nil, err } - credStore, err := credentials.NewStore(cliCfg, opts.Runner.RuntimeManager, opts.CredentialContext, cacheClient.CacheDir()) + credStore, err := credentials.NewStore(cliCfg, opts.Runner.RuntimeManager, opts.CredentialContexts, cacheClient.CacheDir()) if err != nil { return nil, err } diff --git a/pkg/sdkserver/credentials.go b/pkg/sdkserver/credentials.go index d3f86b1f..b0246621 100644 --- a/pkg/sdkserver/credentials.go +++ b/pkg/sdkserver/credentials.go @@ -5,13 +5,14 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "github.com/gptscript-ai/gptscript/pkg/config" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/credentials" ) -func (s *server) initializeCredentialStore(ctx context.Context, credCtx string) (credentials.CredentialStore, error) { +func (s *server) initializeCredentialStore(ctx context.Context, credCtxs []string) (credentials.CredentialStore, error) { cfg, err := config.ReadCLIConfig(s.gptscriptOpts.OpenAI.ConfigFile) if err != nil { return nil, fmt.Errorf("failed to read CLI config: %w", err) @@ -24,7 +25,7 @@ func (s *server) initializeCredentialStore(ctx context.Context, credCtx string) return nil, fmt.Errorf("failed to ensure credential helpers: %w", err) } - store, err := credentials.NewStore(cfg, s.runtimeManager, credCtx, s.gptscriptOpts.Cache.CacheDir) + store, err := credentials.NewStore(cfg, s.runtimeManager, credCtxs, s.gptscriptOpts.Cache.CacheDir) if err != nil { return nil, fmt.Errorf("failed to initialize credential store: %w", err) } @@ -41,9 +42,9 @@ func (s *server) listCredentials(w http.ResponseWriter, r *http.Request) { } if req.AllContexts { - req.Context = credentials.AllCredentialContexts - } else if req.Context == "" { - req.Context = credentials.DefaultCredentialContext + req.Context = []string{credentials.AllCredentialContexts} + } else if len(req.Context) == 0 { + req.Context = []string{credentials.DefaultCredentialContext} } store, err := s.initializeCredentialStore(r.Context(), req.Context) @@ -87,7 +88,7 @@ func (s *server) createCredential(w http.ResponseWriter, r *http.Request) { cred.Context = credentials.DefaultCredentialContext } - store, err := s.initializeCredentialStore(r.Context(), cred.Context) + store, err := s.initializeCredentialStore(r.Context(), []string{cred.Context}) if err != nil { writeError(logger, w, http.StatusInternalServerError, err) return @@ -114,11 +115,11 @@ func (s *server) revealCredential(w http.ResponseWriter, r *http.Request) { return } - if req.AllContexts || req.Context == credentials.AllCredentialContexts { + if req.AllContexts || slices.Contains(req.Context, credentials.AllCredentialContexts) { writeError(logger, w, http.StatusBadRequest, fmt.Errorf("allContexts is not supported for credential retrieval; please specify the specific context that the credential is in")) return - } else if req.Context == "" { - req.Context = credentials.DefaultCredentialContext + } else if len(req.Context) == 0 { + req.Context = []string{credentials.DefaultCredentialContext} } store, err := s.initializeCredentialStore(r.Context(), req.Context) @@ -151,11 +152,11 @@ func (s *server) deleteCredential(w http.ResponseWriter, r *http.Request) { return } - if req.AllContexts || req.Context == credentials.AllCredentialContexts { + if req.AllContexts || slices.Contains(req.Context, credentials.AllCredentialContexts) { writeError(logger, w, http.StatusBadRequest, fmt.Errorf("allContexts is not supported for credential deletion; please specify the specific context that the credential is in")) return - } else if req.Context == "" { - req.Context = credentials.DefaultCredentialContext + } else if len(req.Context) == 0 { + req.Context = []string{credentials.DefaultCredentialContext} } store, err := s.initializeCredentialStore(r.Context(), req.Context) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index f82fa8a7..484a6fa1 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -184,11 +184,11 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { } opts := gptscript.Options{ - Cache: cache.Options(reqObject.cacheOptions), - OpenAI: openai.Options(reqObject.openAIOptions), - Env: reqObject.Env, - Workspace: reqObject.Workspace, - CredentialContext: reqObject.CredentialContext, + Cache: cache.Options(reqObject.cacheOptions), + OpenAI: openai.Options(reqObject.openAIOptions), + Env: reqObject.Env, + Workspace: reqObject.Workspace, + CredentialContexts: reqObject.CredentialContext, Runner: runner.Options{ // Set the monitor factory so that we can get events from the server. MonitorFactory: NewSessionFactory(s.events), diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 7ed7da78..65a0c049 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -58,7 +58,7 @@ type toolOrFileRequest struct { ChatState string `json:"chatState"` Workspace string `json:"workspace"` Env []string `json:"env"` - CredentialContext string `json:"credentialContext"` + CredentialContext []string `json:"credentialContext"` CredentialOverrides []string `json:"credentialOverrides"` Confirm bool `json:"confirm"` Location string `json:"location,omitempty"` @@ -255,7 +255,7 @@ type prompt struct { type credentialsRequest struct { content `json:",inline"` - AllContexts bool `json:"allContexts"` - Context string `json:"context"` - Name string `json:"name"` + AllContexts bool `json:"allContexts"` + Context []string `json:"context"` + Name string `json:"name"` } From f6dcb5fe08b7db4f9fe8ffe5834e19f74163bb44 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 18 Sep 2024 10:50:55 -0700 Subject: [PATCH 138/270] chore: don't capture stderr in tool output (#847) --- pkg/engine/cmd.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index c7d21a2b..317f0d6a 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -148,13 +148,8 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate ) cmd.Stdout = io.MultiWriter(stdout, stdoutAndErr, progressOut) - if toolCategory == NoCategory || toolCategory == ContextToolCategory { - cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut) - result = stdoutAndErr - } else { - cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut, os.Stderr) - result = stdout - } + cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut, os.Stderr) + result = stdout if err := cmd.Run(); err != nil { if toolCategory == NoCategory { From f30e865ba54cfb66c82f8ff7489bf58736026778 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 18 Sep 2024 10:49:23 -0700 Subject: [PATCH 139/270] chore: support chattable code tools --- pkg/engine/cmd.go | 8 ++++++-- pkg/engine/engine.go | 44 ++++++++++++++++++++++++++++---------------- pkg/runner/runner.go | 17 ++++++++++++++--- 3 files changed, 48 insertions(+), 21 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 317f0d6a..33a67640 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -68,12 +68,14 @@ func compressEnv(envs []string) (result []string) { func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCategory ToolCategory) (cmdOut string, cmdErr error) { id := counter.Next() + var combinedOutput string defer func() { e.Progress <- types.CompletionStatus{ CompletionID: id, Response: map[string]any{ - "output": cmdOut, - "err": cmdErr, + "output": cmdOut, + "fullOutput": combinedOutput, + "err": cmdErr, }, } }() @@ -156,9 +158,11 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) + combinedOutput = stdoutAndErr.String() return "", fmt.Errorf("ERROR: %s: %w", result, err) } + combinedOutput = stdoutAndErr.String() return result.String(), IsChatFinishMessage(result.String()) } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index d028d50b..0665991c 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -281,6 +281,25 @@ func populateMessageParams(ctx Context, completion *types.CompletionRequest, too return nil } +func (e *Engine) runCommandTools(ctx Context, tool types.Tool, input string) (*Return, error) { + if tool.IsHTTP() { + return e.runHTTP(ctx.Ctx, ctx.Program, tool, input) + } else if tool.IsDaemon() { + return e.runDaemon(ctx.Ctx, ctx.Program, tool, input) + } else if tool.IsOpenAPI() { + return e.runOpenAPI(tool, input) + } else if tool.IsEcho() { + return e.runEcho(tool) + } + s, err := e.runCommand(ctx, tool, input, ctx.ToolCategory) + if err != nil { + return nil, err + } + return &Return{ + Result: &s, + }, nil +} + func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { tool := ctx.Tool @@ -291,22 +310,7 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { }() if tool.IsCommand() { - if tool.IsHTTP() { - return e.runHTTP(ctx.Ctx, ctx.Program, tool, input) - } else if tool.IsDaemon() { - return e.runDaemon(ctx.Ctx, ctx.Program, tool, input) - } else if tool.IsOpenAPI() { - return e.runOpenAPI(tool, input) - } else if tool.IsEcho() { - return e.runEcho(tool) - } - s, err := e.runCommand(ctx, tool, input, ctx.ToolCategory) - if err != nil { - return nil, err - } - return &Return{ - Result: &s, - }, nil + return e.runCommandTools(ctx, tool, input) } if ctx.ToolCategory == CredentialToolCategory { @@ -431,6 +435,14 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { } func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (*Return, error) { + if ctx.Tool.IsCommand() { + var input string + if len(results) == 1 { + input = results[0].User + } + return e.runCommandTools(ctx, ctx.Tool, input) + } + if state == nil { return nil, fmt.Errorf("invalid continue call, missing state") } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index c843b6b5..e6318c15 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -179,7 +179,6 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra } } else { state = state.WithResumeInput(&input) - state.ResumeInput = &input } state, err = r.resume(callCtx, monitor, env, state) @@ -683,7 +682,13 @@ func (r *Runner) subCall(ctx context.Context, parentContext engine.Context, moni }, nil } - return r.call(callCtx, monitor, env, input) + state, err := r.call(callCtx, monitor, env, input) + if finishErr := (*engine.ErrChatFinish)(nil); errors.As(err, &finishErr) && callCtx.Tool.Chat { + return &State{ + Result: &finishErr.Message, + }, nil + } + return state, err } func (r *Runner) subCallResume(ctx context.Context, parentContext engine.Context, monitor Monitor, env []string, toolID, callID string, state *State, toolCategory engine.ToolCategory) (*State, error) { @@ -692,7 +697,13 @@ func (r *Runner) subCallResume(ctx context.Context, parentContext engine.Context return nil, err } - return r.resume(callCtx, monitor, env, state) + state, err = r.resume(callCtx, monitor, env, state) + if finishErr := (*engine.ErrChatFinish)(nil); errors.As(err, &finishErr) && callCtx.Tool.Chat { + return &State{ + Result: &finishErr.Message, + }, nil + } + return state, err } type SubCallResult struct { From a15945e9c34393018c19427778c12298bd6823d6 Mon Sep 17 00:00:00 2001 From: Atulpriya Sharma Date: Fri, 20 Sep 2024 01:56:45 +0530 Subject: [PATCH 140/270] Add Testkube GPT example (#850) --- examples/testkube.gpt | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 examples/testkube.gpt diff --git a/examples/testkube.gpt b/examples/testkube.gpt new file mode 100644 index 00000000..758d9b34 --- /dev/null +++ b/examples/testkube.gpt @@ -0,0 +1,38 @@ +Name: Testkube +Description: A tool to help you perform testing of your application on your Kubernetes clusters using Testkube. +Context: learn-testkube, learn-kubectl +Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write, github.com/gptscript-ai/browse-web-page +chat:true + +You are an assistant for Testkube and help the user create, manage and execute test workflows. You can also perform kubernetes related tasks. + +Rules +1. Access the testkube workflow docs at https://docs.testkube.io/articles/test-workflows and remember the latest specification to create testworkflows. +2. Use testkube CLI to interact with Testkube. +3. Use kubectl CLI to interact with the Kubernetes cluster. +4. Based on the user's request, perform actions on the Kubernetes cluster and create, manage, delete test workflows. + + +--- + +Name: learn-testkube +Description: A tool to help you learn testkube cli +#!/bin/bash +testkube --help +testkube create --help +testkube create testworkflow --help +testkube run --help + +--- + +Name: learn-kubectl +Description: A tool to help you learn k8s and related commands +#!/bin/bash + +CMDS="kubectl helm" +echo 'The additional CLI commands are available locally, use the `exec` tool to invoke them:' +for i in $CMDS; do + if [ -e "$(command -v $i)" ]; then + echo ' ' $i + fi +done From 354541cb0fa6d8bc48949285527b791454b80666 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 19 Sep 2024 17:28:09 -0400 Subject: [PATCH 141/270] fix: stop always adding the default cred context (#854) Signed-off-by: Grant Linville --- pkg/gptscript/gptscript.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 7a10eda2..11afb7d6 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -60,7 +60,7 @@ func Complete(opts ...Options) Options { result.Runner = runner.Complete(result.Runner, opt.Runner) result.OpenAI = openai.Complete(result.OpenAI, opt.OpenAI) - result.CredentialContexts = append(result.CredentialContexts, opt.CredentialContexts...) + result.CredentialContexts = opt.CredentialContexts result.Quiet = types.FirstSet(opt.Quiet, result.Quiet) result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) result.Env = append(result.Env, opt.Env...) From 8ecad90f7ca24b34a42811dd1b49caa046f102e1 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 19 Sep 2024 20:00:35 -0400 Subject: [PATCH 142/270] fix: sdkserver: rename credentialContext to credentialContexts (#855) Signed-off-by: Grant Linville --- pkg/sdkserver/routes.go | 2 +- pkg/sdkserver/types.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 484a6fa1..fc69a08c 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -188,7 +188,7 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { OpenAI: openai.Options(reqObject.openAIOptions), Env: reqObject.Env, Workspace: reqObject.Workspace, - CredentialContexts: reqObject.CredentialContext, + CredentialContexts: reqObject.CredentialContexts, Runner: runner.Options{ // Set the monitor factory so that we can get events from the server. MonitorFactory: NewSessionFactory(s.events), diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 65a0c049..42b2bb64 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -58,7 +58,7 @@ type toolOrFileRequest struct { ChatState string `json:"chatState"` Workspace string `json:"workspace"` Env []string `json:"env"` - CredentialContext []string `json:"credentialContext"` + CredentialContexts []string `json:"credentialContexts"` CredentialOverrides []string `json:"credentialOverrides"` Confirm bool `json:"confirm"` Location string `json:"location,omitempty"` From 32e544c993c71d74c95a3607622caa7a5233cb97 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 23 Sep 2024 11:57:36 -0700 Subject: [PATCH 143/270] bug: load vcs support in embedded server always (#858) * bug: load vcs support in embedded server always * chore: try always running exec for "sh -c" in engine --- main.go | 2 -- pkg/engine/cmd.go | 2 +- pkg/gptscript/gptscript.go | 3 +++ 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/main.go b/main.go index 33ab4278..02923925 100644 --- a/main.go +++ b/main.go @@ -2,8 +2,6 @@ package main import ( "github.com/gptscript-ai/gptscript/pkg/cli" - // Load all VCS - _ "github.com/gptscript-ai/gptscript/pkg/loader/vcs" ) func main() { diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 33a67640..5b27a579 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -328,7 +328,7 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T } if useShell { - args = append([]string{"/bin/sh", "-c"}, strings.Join(args, " ")) + args = append([]string{"/bin/sh", "-c"}, "exec "+strings.Join(args, " ")) } else { args[0] = env.Lookup(envvars, args[0]) } diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 11afb7d6..679eb503 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -25,6 +25,9 @@ import ( "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/types" + + // Load all VCS + _ "github.com/gptscript-ai/gptscript/pkg/loader/vcs" ) var log = mvl.Package() From 2eafb08d985cfc63c74304e99c51c9c139729151 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 23 Sep 2024 12:27:52 -0700 Subject: [PATCH 144/270] chore: bump tui (#859) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 83146eeb..42a1e76f 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb - github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 - github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e + github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240920232051-64eaa0ac8caf + github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index 85a3f76e..c7cb9d5c 100644 --- a/go.sum +++ b/go.sum @@ -204,10 +204,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= -github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e h1:OO/b8gGQi3jIpDoII+jf7fc4ssqOZdFcb9zB+QjsxRQ= -github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= +github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240920232051-64eaa0ac8caf h1:3uBPUYBuCIWgUxQPD3d3bHHr/0zgCsdzk628FJZCmno= +github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240920232051-64eaa0ac8caf/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= +github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 h1:vkgNZVWQgbE33VD3z9WKDwuu7B/eJVVMMPM62ixfCR8= +github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6/go.mod h1:frrl/B+ZH3VSs3Tqk2qxEIIWTONExX3tuUa4JsVnqx4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= From ec0c0198345bd28463e4d99e747da744b7476692 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 23 Sep 2024 16:09:15 -0400 Subject: [PATCH 145/270] fix: modify credential refresh to support stacked contexts (#856) Signed-off-by: Grant Linville --- pkg/credentials/noop.go | 4 ++++ pkg/credentials/store.go | 21 +++++++++++++++++++ pkg/runner/runner.go | 44 +++++++++++++++++++++++++++++----------- 3 files changed, 57 insertions(+), 12 deletions(-) diff --git a/pkg/credentials/noop.go b/pkg/credentials/noop.go index 5f3cc5ad..3a13b907 100644 --- a/pkg/credentials/noop.go +++ b/pkg/credentials/noop.go @@ -12,6 +12,10 @@ func (s NoopStore) Add(context.Context, Credential) error { return nil } +func (s NoopStore) Refresh(context.Context, Credential) error { + return nil +} + func (s NoopStore) Remove(context.Context, string) error { return nil } diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 749aba3a..2414e1e8 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -5,6 +5,7 @@ import ( "fmt" "path/filepath" "regexp" + "slices" "strings" "github.com/docker/cli/cli/config/credentials" @@ -26,6 +27,7 @@ type CredentialBuilder interface { type CredentialStore interface { Get(ctx context.Context, toolName string) (*Credential, bool, error) Add(ctx context.Context, cred Credential) error + Refresh(ctx context.Context, cred Credential) error Remove(ctx context.Context, toolName string) error List(ctx context.Context) ([]Credential, error) } @@ -95,6 +97,8 @@ func (s Store) Get(ctx context.Context, toolName string) (*Credential, bool, err return &cred, true, nil } +// Add adds a new credential to the credential store. +// Any context set on the credential object will be overwritten with the first context of the credential store. func (s Store) Add(ctx context.Context, cred Credential) error { first := first(s.credCtxs) if first == AllCredentialContexts { @@ -113,6 +117,23 @@ func (s Store) Add(ctx context.Context, cred Credential) error { return store.Store(auth) } +// Refresh updates an existing credential in the credential store. +func (s Store) Refresh(ctx context.Context, cred Credential) error { + if !slices.Contains(s.credCtxs, cred.Context) { + return fmt.Errorf("context %q not in list of valid contexts for this credential store", cred.Context) + } + + store, err := s.getStore(ctx) + if err != nil { + return err + } + auth, err := cred.toDockerAuthConfig() + if err != nil { + return err + } + return store.Store(auth) +} + func (s Store) Remove(ctx context.Context, toolName string) error { first := first(s.credCtxs) if len(s.credCtxs) > 1 || first == AllCredentialContexts { diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index e6318c15..7ac9fae0 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -854,8 +854,10 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } var ( - c *credentials.Credential - exists bool + c *credentials.Credential + resultCredential credentials.Credential + exists bool + refresh bool ) rm := runtimeWithLogger(callCtx, monitor, r.runtimeManager) @@ -886,6 +888,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env if !exists || c.IsExpired() { // If the existing credential is expired, we need to provide it to the cred tool through the environment. if exists && c.IsExpired() { + refresh = true credJSON, err := json.Marshal(c) if err != nil { return nil, fmt.Errorf("failed to marshal credential: %w", err) @@ -916,39 +919,56 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env continue } - if err := json.Unmarshal([]byte(*res.Result), &c); err != nil { + if err := json.Unmarshal([]byte(*res.Result), &resultCredential); err != nil { return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", ref.Reference, err) } - c.ToolName = credName - c.Type = credentials.CredentialTypeTool + resultCredential.ToolName = credName + resultCredential.Type = credentials.CredentialTypeTool + + if refresh { + // If this is a credential refresh, we need to make sure we use the same context. + resultCredential.Context = c.Context + } else { + // If it is a new credential, let the credential store determine the context. + resultCredential.Context = "" + } isEmpty := true - for _, v := range c.Env { + for _, v := range resultCredential.Env { if v != "" { isEmpty = false break } } - if !c.Ephemeral { + if !resultCredential.Ephemeral { // Only store the credential if the tool is on GitHub or has an alias, and the credential is non-empty. if (isGitHubTool(toolName) && callCtx.Program.ToolSet[ref.ToolID].Source.Repo != nil) || credentialAlias != "" { if isEmpty { log.Warnf("Not saving empty credential for tool %s", toolName) - } else if err := r.credStore.Add(callCtx.Ctx, *c); err != nil { - return nil, fmt.Errorf("failed to add credential for tool %s: %w", toolName, err) + } else { + if refresh { + err = r.credStore.Refresh(callCtx.Ctx, resultCredential) + } else { + err = r.credStore.Add(callCtx.Ctx, resultCredential) + } + if err != nil { + return nil, fmt.Errorf("failed to save credential for tool %s: %w", toolName, err) + } } } else { log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } } + } else { + resultCredential = *c } - if c.ExpiresAt != nil && (nearestExpiration == nil || nearestExpiration.After(*c.ExpiresAt)) { - nearestExpiration = c.ExpiresAt + if resultCredential.ExpiresAt != nil && (nearestExpiration == nil || nearestExpiration.After(*resultCredential.ExpiresAt)) { + nearestExpiration = resultCredential.ExpiresAt } - for k, v := range c.Env { + for k, v := range resultCredential.Env { env = append(env, fmt.Sprintf("%s=%s", k, v)) } } From f922de17ed3b14b9872b2a9ceb6ce092ec1a1d64 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 25 Sep 2024 14:23:47 -0400 Subject: [PATCH 146/270] fix: pass Usage, ChatResponseCached, and ToolResults to SDKs (#860) Signed-off-by: Donnie Adams --- pkg/sdkserver/types.go | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 42b2bb64..a4332557 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -173,6 +173,9 @@ func (r *runInfo) process(e event) map[string]any { if e.Content != "" { call.Input = e.Content } + if e.ToolResults > 0 { + call.ToolResults = e.ToolResults + } case runner.EventTypeCallSubCalls: call.setSubCalls(e.ToolSubCalls) @@ -185,6 +188,8 @@ func (r *runInfo) process(e event) map[string]any { call.setOutput(e.Content) case runner.EventTypeChat: + call.Usage = e.Usage + call.ChatResponseCached = e.ChatResponseCached if e.ChatRequest != nil { call.LLMRequest = e.ChatRequest } @@ -210,14 +215,16 @@ func (r *runInfo) processStdout(cs runner.ChatResponse) { type call struct { engine.CallContext `json:",inline"` - Type runner.EventType `json:"type"` - Start time.Time `json:"start"` - End time.Time `json:"end"` - Input string `json:"input"` - Output []output `json:"output"` - Usage types.Usage `json:"usage"` - LLMRequest any `json:"llmRequest"` - LLMResponse any `json:"llmResponse"` + Type runner.EventType `json:"type"` + Start time.Time `json:"start"` + End time.Time `json:"end"` + Input string `json:"input"` + Output []output `json:"output"` + Usage types.Usage `json:"usage"` + ChatResponseCached bool `json:"chatResponseCached"` + ToolResults int `json:"toolResults"` + LLMRequest any `json:"llmRequest"` + LLMResponse any `json:"llmResponse"` } func (c *call) setSubCalls(subCalls map[string]engine.Call) { From dd4ba0dffc5b47eb0dcade3359b8bfa6a36444e1 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 25 Sep 2024 17:16:02 -0700 Subject: [PATCH 147/270] chore: support params on input filters besides just input --- pkg/runner/input.go | 8 ++-- pkg/tests/runner2_test.go | 24 ++++++++++ .../TestInputFilterMoreArgs/call1-resp.golden | 9 ++++ .../TestInputFilterMoreArgs/call1.golden | 25 ++++++++++ .../TestInputFilterMoreArgs/call2-resp.golden | 9 ++++ .../TestInputFilterMoreArgs/call2.golden | 25 ++++++++++ .../TestInputFilterMoreArgs/step1.golden | 48 +++++++++++++++++++ .../TestInputFilterMoreArgs/step2.golden | 48 +++++++++++++++++++ 8 files changed, 193 insertions(+), 3 deletions(-) create mode 100644 pkg/tests/testdata/TestInputFilterMoreArgs/call1-resp.golden create mode 100644 pkg/tests/testdata/TestInputFilterMoreArgs/call1.golden create mode 100644 pkg/tests/testdata/TestInputFilterMoreArgs/call2-resp.golden create mode 100644 pkg/tests/testdata/TestInputFilterMoreArgs/call2.golden create mode 100644 pkg/tests/testdata/TestInputFilterMoreArgs/step1.golden create mode 100644 pkg/tests/testdata/TestInputFilterMoreArgs/step2.golden diff --git a/pkg/runner/input.go b/pkg/runner/input.go index a211ec9d..23228813 100644 --- a/pkg/runner/input.go +++ b/pkg/runner/input.go @@ -15,12 +15,14 @@ func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []stri } for _, inputToolRef := range inputToolRefs { - inputData, err := json.Marshal(map[string]any{ - "input": input, - }) + data := map[string]any{} + _ = json.Unmarshal([]byte(input), &data) + data["input"] = input + inputData, err := json.Marshal(data) if err != nil { return "", fmt.Errorf("failed to marshal input: %w", err) } + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, string(inputData), "", engine.InputToolCategory) if err != nil { return "", err diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 27d4c226..93899c84 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -55,3 +55,27 @@ Yo dawg`, "") resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") r.AssertStep(t, resp, err) } + +func TestInputFilterMoreArgs(t *testing.T) { + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +chat: true +inputfilters: stuff + +Say hi + +--- +name: stuff +params: foo: bar +params: input: baz + +#!/bin/bash +echo ${FOO}:${INPUT} +`, "") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, `{"foo":"123"}`) + r.AssertStep(t, resp, err) + resp, err = r.Chat(context.Background(), nil, prg, nil, `"foo":"123"}`) + r.AssertStep(t, resp, err) +} diff --git a/pkg/tests/testdata/TestInputFilterMoreArgs/call1-resp.golden b/pkg/tests/testdata/TestInputFilterMoreArgs/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestInputFilterMoreArgs/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestInputFilterMoreArgs/call1.golden b/pkg/tests/testdata/TestInputFilterMoreArgs/call1.golden new file mode 100644 index 00000000..30693444 --- /dev/null +++ b/pkg/tests/testdata/TestInputFilterMoreArgs/call1.golden @@ -0,0 +1,25 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Say hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "123:{\"foo\":\"123\"}\n" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestInputFilterMoreArgs/call2-resp.golden b/pkg/tests/testdata/TestInputFilterMoreArgs/call2-resp.golden new file mode 100644 index 00000000..997ca1b9 --- /dev/null +++ b/pkg/tests/testdata/TestInputFilterMoreArgs/call2-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestInputFilterMoreArgs/call2.golden b/pkg/tests/testdata/TestInputFilterMoreArgs/call2.golden new file mode 100644 index 00000000..5a39730d --- /dev/null +++ b/pkg/tests/testdata/TestInputFilterMoreArgs/call2.golden @@ -0,0 +1,25 @@ +`{ + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Say hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": ":\"foo\":\"123\"}\n" + } + ], + "usage": {} + } + ], + "chat": true +}` diff --git a/pkg/tests/testdata/TestInputFilterMoreArgs/step1.golden b/pkg/tests/testdata/TestInputFilterMoreArgs/step1.golden new file mode 100644 index 00000000..a04d4508 --- /dev/null +++ b/pkg/tests/testdata/TestInputFilterMoreArgs/step1.golden @@ -0,0 +1,48 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 1", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": "123:{\"foo\":\"123\"}\n", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Say hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": "123:{\"foo\":\"123\"}\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 1" + }, + "continuationToolID": "inline:" + } +}` diff --git a/pkg/tests/testdata/TestInputFilterMoreArgs/step2.golden b/pkg/tests/testdata/TestInputFilterMoreArgs/step2.golden new file mode 100644 index 00000000..aa41f1dd --- /dev/null +++ b/pkg/tests/testdata/TestInputFilterMoreArgs/step2.golden @@ -0,0 +1,48 @@ +`{ + "done": false, + "content": "TEST RESULT CALL: 2", + "toolID": "inline:", + "state": { + "continuation": { + "state": { + "input": ":\"foo\":\"123\"}\n", + "completion": { + "model": "gpt-4o", + "internalSystemPrompt": false, + "messages": [ + { + "role": "system", + "content": [ + { + "text": "Say hi" + } + ], + "usage": {} + }, + { + "role": "user", + "content": [ + { + "text": ":\"foo\":\"123\"}\n" + } + ], + "usage": {} + }, + { + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 2" + } + ], + "usage": {} + } + ], + "chat": true + } + }, + "result": "TEST RESULT CALL: 2" + }, + "continuationToolID": "inline:" + } +}` From fde592070a94e3a8a3c3eff30b74140ac73bfbc2 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 26 Sep 2024 14:58:26 -0400 Subject: [PATCH 148/270] feat: support sqlite credential helper (#857) Signed-off-by: Grant Linville --- pkg/config/cliconfig.go | 43 ++++++++++++++++++------- pkg/credentials/store.go | 4 +-- pkg/credentials/util.go | 33 ++++++++++++++++--- pkg/repos/get.go | 50 +++++++++++++++++------------ pkg/repos/runtimes/golang/golang.go | 3 +- 5 files changed, 93 insertions(+), 40 deletions(-) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index dd358d52..7a82b58a 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -15,13 +15,32 @@ import ( "github.com/docker/cli/cli/config/types" ) +const ( + WincredCredHelper = "wincred" + OsxkeychainCredHelper = "osxkeychain" + SecretserviceCredHelper = "secretservice" + PassCredHelper = "pass" + FileCredHelper = "file" + SqliteCredHelper = "sqlite" + + GPTScriptHelperPrefix = "gptscript-credential-" +) + var ( - darwinHelpers = []string{"osxkeychain", "file"} - windowsHelpers = []string{"wincred", "file"} - linuxHelpers = []string{"secretservice", "pass", "file"} + darwinHelpers = []string{OsxkeychainCredHelper, FileCredHelper, SqliteCredHelper} + windowsHelpers = []string{WincredCredHelper, FileCredHelper} + linuxHelpers = []string{SecretserviceCredHelper, PassCredHelper, FileCredHelper, SqliteCredHelper} ) -const GPTScriptHelperPrefix = "gptscript-credential-" +func listAsString(helpers []string) string { + if len(helpers) == 0 { + return "" + } else if len(helpers) == 1 { + return helpers[0] + } + + return strings.Join(helpers[:len(helpers)-1], ", ") + " or " + helpers[len(helpers)-1] +} type AuthConfig types.AuthConfig @@ -150,13 +169,13 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { errMsg := fmt.Sprintf("invalid credential store '%s'", result.CredentialsStore) switch runtime.GOOS { case "darwin": - errMsg += " (use 'osxkeychain' or 'file')" + errMsg += fmt.Sprintf(" (use %s)", listAsString(darwinHelpers)) case "windows": - errMsg += " (use 'wincred' or 'file')" + errMsg += fmt.Sprintf(" (use %s)", listAsString(windowsHelpers)) case "linux": - errMsg += " (use 'secretservice', 'pass', or 'file')" + errMsg += fmt.Sprintf(" (use %s)", listAsString(linuxHelpers)) default: - errMsg += " (use 'file')" + errMsg += " (use file)" } errMsg += fmt.Sprintf("\nPlease edit your config file at %s to fix this.", result.location) @@ -169,11 +188,11 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { func (c *CLIConfig) setDefaultCredentialsStore() error { switch runtime.GOOS { case "darwin": - c.CredentialsStore = "osxkeychain" + c.CredentialsStore = OsxkeychainCredHelper case "windows": - c.CredentialsStore = "wincred" + c.CredentialsStore = WincredCredHelper default: - c.CredentialsStore = "file" + c.CredentialsStore = FileCredHelper } return c.Save() } @@ -187,7 +206,7 @@ func isValidCredentialHelper(helper string) bool { case "linux": return slices.Contains(linuxHelpers, helper) default: - return helper == "file" + return helper == FileCredHelper } } diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 2414e1e8..9827b147 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -46,7 +46,7 @@ func NewStore(cfg *config.CLIConfig, credentialBuilder CredentialBuilder, credCt return Store{ credCtxs: credCtxs, credBuilder: credentialBuilder, - credHelperDirs: GetCredentialHelperDirs(cacheDir), + credHelperDirs: GetCredentialHelperDirs(cacheDir, cfg.CredentialsStore), cfg: cfg, }, nil } @@ -199,7 +199,7 @@ func (s *Store) getStore(ctx context.Context) (credentials.Store, error) { } func (s *Store) getStoreByHelper(ctx context.Context, helper string) (credentials.Store, error) { - if helper == "" || helper == config.GPTScriptHelperPrefix+"file" { + if helper == "" || helper == config.GPTScriptHelperPrefix+config.FileCredHelper { return credentials.NewFileStore(s.cfg), nil } diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go index 39200369..72f9eab9 100644 --- a/pkg/credentials/util.go +++ b/pkg/credentials/util.go @@ -1,18 +1,43 @@ package credentials import ( + "fmt" "path/filepath" + + "github.com/gptscript-ai/gptscript/pkg/config" + runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" ) type CredentialHelperDirs struct { RevisionFile, LastCheckedFile, BinDir string } -func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { +func RepoNameForCredentialStore(store string) string { + switch store { + case config.SqliteCredHelper: + return "gptscript-credential-sqlite" + default: + return "gptscript-credential-helpers" + } +} + +func GitURLForRepoName(repoName string) (string, error) { + switch repoName { + case "gptscript-credential-sqlite": + return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_SQLITE_ROOT", "https://github.com/gptscript-ai/gptscript-credential-sqlite.git"), nil + case "gptscript-credential-helpers": + return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_HELPERS_ROOT", "https://github.com/gptscript-ai/gptscript-credential-helpers.git"), nil + default: + return "", fmt.Errorf("unknown repo name: %s", repoName) + } +} + +func GetCredentialHelperDirs(cacheDir, store string) CredentialHelperDirs { + repoName := RepoNameForCredentialStore(store) return CredentialHelperDirs{ - RevisionFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "revision"), - LastCheckedFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "last-checked"), - BinDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "bin"), + RevisionFile: filepath.Join(cacheDir, "repos", repoName, "revision"), + LastCheckedFile: filepath.Join(cacheDir, "repos", repoName, "last-checked"), + BinDir: filepath.Join(cacheDir, "repos", repoName, "bin"), } } diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 8346b8cf..a36c2fe0 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -16,7 +16,6 @@ import ( "github.com/BurntSushi/locker" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" - runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/repos/git" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/golang" @@ -55,10 +54,10 @@ func (n noopRuntime) Setup(_ context.Context, _ types.Tool, _, _ string, _ []str } type Manager struct { + cacheDir string storageDir string gitDir string runtimeDir string - credHelperDirs credentials.CredentialHelperDirs runtimes []Runtime credHelperConfig *credHelperConfig } @@ -72,11 +71,11 @@ type credHelperConfig struct { func New(cacheDir string, runtimes ...Runtime) *Manager { root := filepath.Join(cacheDir, "repos") return &Manager{ - storageDir: root, - gitDir: filepath.Join(root, "git"), - runtimeDir: filepath.Join(root, "runtimes"), - credHelperDirs: credentials.GetCredentialHelperDirs(cacheDir), - runtimes: runtimes, + cacheDir: cacheDir, + storageDir: root, + gitDir: filepath.Join(root, "git"), + runtimeDir: filepath.Join(root, "runtimes"), + runtimes: runtimes, } } @@ -110,50 +109,59 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co distInfo, suffix string ) // The file helper is built-in and does not need to be downloaded. - if helperName == "file" { + if helperName == config.FileCredHelper { return nil } switch helperName { - case "wincred": + case config.WincredCredHelper: suffix = ".exe" default: distInfo = fmt.Sprintf("-%s-%s", runtime.GOOS, runtime.GOARCH) } - locker.Lock("gptscript-credential-helpers") - defer locker.Unlock("gptscript-credential-helpers") + repoName := credentials.RepoNameForCredentialStore(helperName) + + locker.Lock(repoName) + defer locker.Unlock(repoName) + + credHelperDirs := credentials.GetCredentialHelperDirs(m.cacheDir, helperName) // Load the last-checked file to make sure we haven't checked the repo in the last 24 hours. now := time.Now() - lastChecked, err := os.ReadFile(m.credHelperDirs.LastCheckedFile) + lastChecked, err := os.ReadFile(credHelperDirs.LastCheckedFile) if err == nil { if t, err := time.Parse(time.RFC3339, strings.TrimSpace(string(lastChecked))); err == nil && now.Sub(t) < 24*time.Hour { // Make sure the binary still exists, and if it does, return. - if _, err := os.Stat(filepath.Join(m.credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { + if _, err := os.Stat(filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { log.Debugf("Credential helper %s up-to-date as of %v, checking for updates after %v", helperName, t, t.Add(24*time.Hour)) return nil } } } - if err := os.MkdirAll(filepath.Dir(m.credHelperDirs.LastCheckedFile), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(credHelperDirs.LastCheckedFile), 0755); err != nil { return err } // Update the last-checked file. - if err := os.WriteFile(m.credHelperDirs.LastCheckedFile, []byte(now.Format(time.RFC3339)), 0644); err != nil { + if err := os.WriteFile(credHelperDirs.LastCheckedFile, []byte(now.Format(time.RFC3339)), 0644); err != nil { + return err + } + + gitURL, err := credentials.GitURLForRepoName(repoName) + if err != nil { return err } tool := types.Tool{ ToolDef: types.ToolDef{ Parameters: types.Parameters{ - Name: "gptscript-credential-helpers", + Name: repoName, }, }, Source: types.ToolSource{ Repo: &types.Repo{ - Root: runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_HELPERS_ROOT", "https://github.com/gptscript-ai/gptscript-credential-helpers.git"), + Root: gitURL, }, }, } @@ -164,12 +172,12 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co var needsDownloaded bool // Check the last revision shasum and see if it is different from the current one. - lastRevision, err := os.ReadFile(m.credHelperDirs.RevisionFile) + lastRevision, err := os.ReadFile(credHelperDirs.RevisionFile) if (err == nil && strings.TrimSpace(string(lastRevision)) != tool.Source.Repo.Root+tag) || errors.Is(err, fs.ErrNotExist) { // Need to pull the latest version. needsDownloaded = true // Update the revision file to the new revision. - if err = os.WriteFile(m.credHelperDirs.RevisionFile, []byte(tool.Source.Repo.Root+tag), 0644); err != nil { + if err = os.WriteFile(credHelperDirs.RevisionFile, []byte(tool.Source.Repo.Root+tag), 0644); err != nil { return err } } else if err != nil { @@ -179,7 +187,7 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co if !needsDownloaded { // Check for the existence of the credential helper binary. // If it's there, we have no need to download it and can just return. - if _, err = os.Stat(filepath.Join(m.credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { + if _, err = os.Stat(filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { return nil } } @@ -187,7 +195,7 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co // Find the Go runtime and use it to build the credential helper. for _, rt := range m.runtimes { if strings.HasPrefix(rt.ID(), "go") { - return rt.(*golang.Runtime).DownloadCredentialHelper(ctx, tool, helperName, distInfo, suffix, m.credHelperDirs.BinDir) + return rt.(*golang.Runtime).DownloadCredentialHelper(ctx, tool, helperName, distInfo, suffix, credHelperDirs.BinDir) } } diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 47e8461f..f86fa88d 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -18,6 +18,7 @@ import ( "runtime" "strings" + "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/debugcmd" runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" @@ -286,7 +287,7 @@ func (r *Runtime) Setup(ctx context.Context, _ types.Tool, dataRoot, toolSource } func (r *Runtime) DownloadCredentialHelper(ctx context.Context, tool types.Tool, helperName, distInfo, suffix string, binDir string) error { - if helperName == "file" { + if helperName == config.FileCredHelper { return nil } From c61a7cae73c1b57c131be866f4c995776ba82d3c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 27 Sep 2024 13:49:22 -0400 Subject: [PATCH 149/270] fix: send proper SSE for stderr message in SDK server (#862) Signed-off-by: Donnie Adams --- pkg/sdkserver/run.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index 0d055614..b6b5a049 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -75,7 +75,9 @@ func processEventStreamOutput(ctx context.Context, logger mvl.Logger, w http.Res "stdout": out, }) case err := <-errChan: - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run file: %w", err)) + writeServerSentEvent(logger, w, map[string]any{ + "stderr": fmt.Sprintf("failed to run: %v", err), + }) } // Now that we have received all events, send the DONE event. From 9d9f8591a9416ecb0a2276c251e759cb54124119 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 27 Sep 2024 17:00:51 -0400 Subject: [PATCH 150/270] chore: bump go-gptscript and Go versions (#863) Signed-off-by: Donnie Adams --- go.mod | 4 ++-- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 42a1e76f..6e22348f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/gptscript-ai/gptscript -go 1.23.0 +go 1.23.1 require ( github.com/AlecAivazis/survey/v2 v2.3.7 @@ -17,7 +17,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb - github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240920232051-64eaa0ac8caf + github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927194651-15782507bdff github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 diff --git a/go.sum b/go.sum index c7cb9d5c..6e3f4a21 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240920232051-64eaa0ac8caf h1:3uBPUYBuCIWgUxQPD3d3bHHr/0zgCsdzk628FJZCmno= -github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240920232051-64eaa0ac8caf/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= +github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927194651-15782507bdff h1:GnbVti8eAH8iecIo5cY5GoXhz/ZChdyA1c2SmukaoeA= +github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927194651-15782507bdff/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 h1:vkgNZVWQgbE33VD3z9WKDwuu7B/eJVVMMPM62ixfCR8= github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6/go.mod h1:frrl/B+ZH3VSs3Tqk2qxEIIWTONExX3tuUa4JsVnqx4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= From cc5e5ed5463d9e9d5fd09bea76755019cd4f75b0 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 27 Sep 2024 20:07:51 -0400 Subject: [PATCH 151/270] chore: bump go-gptscript to 326b7baf6fcb to pick up env var fixes (#864) Signed-off-by: Donnie Adams --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6e22348f..4a95a521 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb - github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927194651-15782507bdff + github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 diff --git a/go.sum b/go.sum index 6e3f4a21..80cbcea1 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927194651-15782507bdff h1:GnbVti8eAH8iecIo5cY5GoXhz/ZChdyA1c2SmukaoeA= -github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927194651-15782507bdff/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= +github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= +github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 h1:vkgNZVWQgbE33VD3z9WKDwuu7B/eJVVMMPM62ixfCR8= github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6/go.mod h1:frrl/B+ZH3VSs3Tqk2qxEIIWTONExX3tuUa4JsVnqx4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= From 822efeb61d15dfc3854c527dc873643172cf338c Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 9 Oct 2024 11:25:20 -0700 Subject: [PATCH 152/270] chore: add multiline parsing and refactor share cred behavior --- integration/cred_test.go | 16 -- integration/scripts/cred_scopes.gpt | 160 ------------------ pkg/config/cliconfig.go | 1 - pkg/credentials/store.go | 2 +- pkg/parser/parser.go | 113 ++++++++++--- pkg/parser/parser_test.go | 83 +++++++++ pkg/tests/runner2_test.go | 32 ++++ .../testdata/TestShareCreds/step1.golden | 6 + pkg/types/completion.go | 3 +- pkg/types/tool.go | 91 ++++++++-- pkg/types/tool_test.go | 4 +- 11 files changed, 294 insertions(+), 217 deletions(-) delete mode 100644 integration/scripts/cred_scopes.gpt create mode 100644 pkg/tests/testdata/TestShareCreds/step1.golden diff --git a/integration/cred_test.go b/integration/cred_test.go index 1ea73d35..a32e50e9 100644 --- a/integration/cred_test.go +++ b/integration/cred_test.go @@ -14,22 +14,6 @@ func TestGPTScriptCredential(t *testing.T) { require.Contains(t, out, "CREDENTIAL") } -// TestCredentialScopes makes sure that environment variables set by credential tools and shared credential tools -// are only available to the correct tools. See scripts/credscopes.gpt for more details. -func TestCredentialScopes(t *testing.T) { - out, err := RunScript("scripts/cred_scopes.gpt", "--sub-tool", "oneOne") - require.NoError(t, err) - require.Contains(t, out, "good") - - out, err = RunScript("scripts/cred_scopes.gpt", "--sub-tool", "twoOne") - require.NoError(t, err) - require.Contains(t, out, "good") - - out, err = RunScript("scripts/cred_scopes.gpt", "--sub-tool", "twoTwo") - require.NoError(t, err) - require.Contains(t, out, "good") -} - // TestCredentialExpirationEnv tests a GPTScript with two credentials that expire at different times. // One expires after two hours, and the other expires after one hour. // This test makes sure that the GPTSCRIPT_CREDENTIAL_EXPIRATION environment variable is set to the nearer expiration time (1h). diff --git a/integration/scripts/cred_scopes.gpt b/integration/scripts/cred_scopes.gpt deleted file mode 100644 index dc8e24e7..00000000 --- a/integration/scripts/cred_scopes.gpt +++ /dev/null @@ -1,160 +0,0 @@ -# This script sets up a chain of tools in a tree structure. -# The root is oneOne, with children twoOne and twoTwo, with children threeOne, threeTwo, and threeThree, with only -# threeTwo shared between them. -# Each tool should only have access to any credentials it defines and any credentials exported/shared by its -# immediate children (but not grandchildren). -# This script checks to make sure that this is working properly. -name: oneOne -tools: twoOne, twoTwo -cred: getcred with oneOne as var and 11 as val - -#!python3 - -import os - -oneOne = os.getenv('oneOne') -twoOne = os.getenv('twoOne') -twoTwo = os.getenv('twoTwo') -threeOne = os.getenv('threeOne') -threeTwo = os.getenv('threeTwo') -threeThree = os.getenv('threeThree') - -if oneOne != '11': - print('error: oneOne is not 11') - exit(1) - -if twoOne != '21': - print('error: twoOne is not 21') - exit(1) - -if twoTwo != '22': - print('error: twoTwo is not 22') - exit(1) - -if threeOne is not None: - print('error: threeOne is not None') - exit(1) - -if threeTwo is not None: - print('error: threeTwo is not None') - exit(1) - -if threeThree is not None: - print('error: threeThree is not None') - exit(1) - -print('good') - ---- -name: twoOne -tools: threeOne, threeTwo -sharecred: getcred with twoOne as var and 21 as val - -#!python3 - -import os - -oneOne = os.getenv('oneOne') -twoOne = os.getenv('twoOne') -twoTwo = os.getenv('twoTwo') -threeOne = os.getenv('threeOne') -threeTwo = os.getenv('threeTwo') -threeThree = os.getenv('threeThree') - -if oneOne is not None: - print('error: oneOne is not None') - exit(1) - -if twoOne is not None: - print('error: twoOne is not None') - exit(1) - -if twoTwo is not None: - print('error: twoTwo is not None') - exit(1) - -if threeOne != '31': - print('error: threeOne is not 31') - exit(1) - -if threeTwo != '32': - print('error: threeTwo is not 32') - exit(1) - -if threeThree is not None: - print('error: threeThree is not None') - exit(1) - -print('good') - ---- -name: twoTwo -tools: threeTwo, threeThree -sharecred: getcred with twoTwo as var and 22 as val - -#!python3 - -import os - -oneOne = os.getenv('oneOne') -twoOne = os.getenv('twoOne') -twoTwo = os.getenv('twoTwo') -threeOne = os.getenv('threeOne') -threeTwo = os.getenv('threeTwo') -threeThree = os.getenv('threeThree') - -if oneOne is not None: - print('error: oneOne is not None') - exit(1) - -if twoOne is not None: - print('error: twoOne is not None') - exit(1) - -if twoTwo is not None: - print('error: twoTwo is not None') - exit(1) - -if threeOne is not None: - print('error: threeOne is not None') - exit(1) - -if threeTwo != '32': - print('error: threeTwo is not 32') - exit(1) - -if threeThree != '33': - print('error: threeThree is not 33') - exit(1) - -print('good') - ---- -name: threeOne -sharecred: getcred with threeOne as var and 31 as val - ---- -name: threeTwo -sharecred: getcred with threeTwo as var and 32 as val - ---- -name: threeThree -sharecred: getcred with threeThree as var and 33 as val - ---- -name: getcred - -#!python3 - -import os -import json - -var = os.getenv('VAR') -val = os.getenv('VAL') - -output = { - "env": { - var: val - } -} -print(json.dumps(output)) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index 7a82b58a..d0ef00c8 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -73,7 +73,6 @@ func (a *AuthConfig) UnmarshalJSON(data []byte) error { type CLIConfig struct { Auths map[string]AuthConfig `json:"auths,omitempty"` CredentialsStore string `json:"credsStore,omitempty"` - GatewayURL string `json:"gatewayURL,omitempty"` Integrations map[string]string `json:"integrations,omitempty"` auths map[string]types.AuthConfig diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 9827b147..1843cd8d 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -225,7 +225,7 @@ func validateCredentialCtx(ctxs []string) error { } // check alphanumeric - r := regexp.MustCompile("^[a-zA-Z0-9]+$") + r := regexp.MustCompile("^[-a-zA-Z0-9]+$") for _, c := range ctxs { if !r.MatchString(c) { return fmt.Errorf("credential contexts must be alphanumeric") diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index 956822dd..e6113d86 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -1,7 +1,6 @@ package parser import ( - "bufio" "fmt" "io" "maps" @@ -17,8 +16,10 @@ import ( var ( sepRegex = regexp.MustCompile(`^\s*---+\s*$`) + endHeaderRegex = regexp.MustCompile(`^\s*===+\s*$`) strictSepRegex = regexp.MustCompile(`^---\n$`) skipRegex = regexp.MustCompile(`^![-.:*\w]+\s*$`) + nameRegex = regexp.MustCompile(`^[a-z]+$`) ) func normalize(key string) string { @@ -74,7 +75,7 @@ func addArg(line string, tool *types.Tool) error { return nil } -func isParam(line string, tool *types.Tool) (_ bool, err error) { +func isParam(line string, tool *types.Tool, scan *simplescanner) (_ bool, err error) { key, value, ok := strings.Cut(line, ":") if !ok { return false, nil @@ -90,7 +91,7 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { case "globalmodel", "globalmodelname": tool.Parameters.GlobalModelName = value case "description": - tool.Parameters.Description = value + tool.Parameters.Description = scan.AddMultiline(value) case "internalprompt": v, err := toBool(value) if err != nil { @@ -104,27 +105,33 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { } tool.Parameters.Chat = v case "export", "exporttool", "exports", "exporttools", "sharetool", "sharetools", "sharedtool", "sharedtools": - tool.Parameters.Export = append(tool.Parameters.Export, csv(value)...) + tool.Parameters.Export = append(tool.Parameters.Export, csv(scan.AddMultiline(value))...) case "tool", "tools": - tool.Parameters.Tools = append(tool.Parameters.Tools, csv(value)...) + tool.Parameters.Tools = append(tool.Parameters.Tools, csv(scan.AddMultiline(value))...) case "inputfilter", "inputfilters": - tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(value)...) + tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(scan.AddMultiline(value))...) case "shareinputfilter", "shareinputfilters", "sharedinputfilter", "sharedinputfilters": - tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(value)...) + tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(scan.AddMultiline(value))...) case "outputfilter", "outputfilters": - tool.Parameters.OutputFilters = append(tool.Parameters.OutputFilters, csv(value)...) + tool.Parameters.OutputFilters = append(tool.Parameters.OutputFilters, csv(scan.AddMultiline(value))...) case "shareoutputfilter", "shareoutputfilters", "sharedoutputfilter", "sharedoutputfilters": - tool.Parameters.ExportOutputFilters = append(tool.Parameters.ExportOutputFilters, csv(value)...) + tool.Parameters.ExportOutputFilters = append(tool.Parameters.ExportOutputFilters, csv(scan.AddMultiline(value))...) case "agent", "agents": - tool.Parameters.Agents = append(tool.Parameters.Agents, csv(value)...) + tool.Parameters.Agents = append(tool.Parameters.Agents, csv(scan.AddMultiline(value))...) case "globaltool", "globaltools": - tool.Parameters.GlobalTools = append(tool.Parameters.GlobalTools, csv(value)...) + tool.Parameters.GlobalTools = append(tool.Parameters.GlobalTools, csv(scan.AddMultiline(value))...) case "exportcontext", "exportcontexts", "sharecontext", "sharecontexts", "sharedcontext", "sharedcontexts": - tool.Parameters.ExportContext = append(tool.Parameters.ExportContext, csv(value)...) + tool.Parameters.ExportContext = append(tool.Parameters.ExportContext, csv(scan.AddMultiline(value))...) case "context": - tool.Parameters.Context = append(tool.Parameters.Context, csv(value)...) + tool.Parameters.Context = append(tool.Parameters.Context, csv(scan.AddMultiline(value))...) + case "metadata": + mkey, mvalue, _ := strings.Cut(scan.AddMultiline(value), ":") + if tool.MetaData == nil { + tool.MetaData = map[string]string{} + } + tool.MetaData[strings.TrimSpace(mkey)] = strings.TrimSpace(mvalue) case "args", "arg", "param", "params", "parameters", "parameter": - if err := addArg(value, tool); err != nil { + if err := addArg(scan.AddMultiline(value), tool); err != nil { return false, err } case "maxtoken", "maxtokens": @@ -149,13 +156,13 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { return false, err } case "credentials", "creds", "credential", "cred": - tool.Parameters.Credentials = append(tool.Parameters.Credentials, value) + tool.Parameters.Credentials = append(tool.Parameters.Credentials, csv(scan.AddMultiline(value))...) case "sharecredentials", "sharecreds", "sharecredential", "sharecred", "sharedcredentials", "sharedcreds", "sharedcredential", "sharedcred": - tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, value) + tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, scan.AddMultiline(value)) case "type": tool.Type = types.ToolType(strings.ToLower(value)) default: - return false, nil + return nameRegex.MatchString(key), nil } return true, nil @@ -206,6 +213,7 @@ func (c *context) finish(tools *[]Node) { len(c.tool.ExportInputFilters) > 0 || len(c.tool.ExportOutputFilters) > 0 || len(c.tool.Agents) > 0 || + len(c.tool.ExportCredentials) > 0 || c.tool.Chat { *tools = append(*tools, Node{ ToolNode: &ToolNode{ @@ -391,7 +399,10 @@ func assignMetadata(nodes []Node) (result []Node) { for _, node := range nodes { if node.ToolNode != nil { - node.ToolNode.Tool.MetaData = metadata[node.ToolNode.Tool.Name] + if node.ToolNode.Tool.MetaData == nil { + node.ToolNode.Tool.MetaData = map[string]string{} + } + maps.Copy(node.ToolNode.Tool.MetaData, metadata[node.ToolNode.Tool.Name]) for wildcard := range metadata { if strings.Contains(wildcard, "*") { if m, err := path.Match(wildcard, node.ToolNode.Tool.Name); m && err == nil { @@ -433,15 +444,71 @@ func isGPTScriptHashBang(line string) bool { return false } -func parse(input io.Reader) ([]Node, error) { - scan := bufio.NewScanner(input) +type simplescanner struct { + lines []string +} + +func newSimpleScanner(data []byte) *simplescanner { + if len(data) == 0 { + return &simplescanner{} + } + lines := strings.Split(string(data), "\n") + return &simplescanner{ + lines: append([]string{""}, lines...), + } +} + +func dropCR(s string) string { + if len(s) > 0 && s[len(s)-1] == '\r' { + return s[:len(s)-1] + } + return s +} +func (s *simplescanner) AddMultiline(current string) string { + result := current + for { + if len(s.lines) < 2 || len(s.lines[1]) == 0 { + return result + } + if strings.HasPrefix(s.lines[1], " ") || strings.HasPrefix(s.lines[1], "\t") { + result += " " + dropCR(s.lines[1]) + s.lines = s.lines[1:] + } else { + return result + } + } +} + +func (s *simplescanner) Text() string { + if len(s.lines) == 0 { + return "" + } + return dropCR(s.lines[0]) +} + +func (s *simplescanner) Scan() bool { + if len(s.lines) == 0 { + return false + } + s.lines = s.lines[1:] + return true +} + +func parse(input io.Reader) ([]Node, error) { var ( tools []Node context context lineNo int ) + data, err := io.ReadAll(input) + if err != nil { + return nil, err + } + + scan := newSimpleScanner(data) + for scan.Scan() { lineNo++ if context.tool.Source.LineNo == 0 { @@ -488,11 +555,15 @@ func parse(input io.Reader) ([]Node, error) { } // Look for params - if isParam, err := isParam(line, &context.tool); err != nil { + if isParam, err := isParam(line, &context.tool, scan); err != nil { return nil, NewErrLine("", lineNo, err) } else if isParam { context.seenParam = true continue + } else if endHeaderRegex.MatchString(line) { + // force the end of the header and don't include the current line in the header + context.inBody = true + continue } } diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index f98b74e2..6eab45c9 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -1,6 +1,7 @@ package parser import ( + "reflect" "strings" "testing" @@ -244,6 +245,7 @@ share output filters: shared func TestParseMetaData(t *testing.T) { input := ` name: first +metadata: foo: bar body --- @@ -269,8 +271,89 @@ foo bar assert.Len(t, tools, 1) autogold.Expect(map[string]string{ + "foo": "bar", "package.json": "foo=base\nf", "requirements.txt": "asdf", "other": "foo bar", }).Equal(t, tools[0].MetaData) + + autogold.Expect(`Name: first +Meta Data: foo: bar +Meta Data: other: foo bar +Meta Data: requirements.txt: asdf + +body +--- +!metadata:first:package.json +foo=base +f +`).Equal(t, tools[0].String()) +} + +func TestFormatWithBadInstruction(t *testing.T) { + input := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "foo", + }, + Instructions: "foo: bar", + }, + } + autogold.Expect("Name: foo\n===\nfoo: bar\n").Equal(t, input.String()) + + tools, err := ParseTools(strings.NewReader(input.String())) + require.NoError(t, err) + if reflect.DeepEqual(input, tools[0]) { + t.Errorf("expected %v, got %v", input, tools[0]) + } +} + +func TestSingleTool(t *testing.T) { + input := ` +name: foo + +#!sys.echo +hi +` + + tools, err := ParseTools(strings.NewReader(input)) + require.NoError(t, err) + autogold.Expect(types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{Name: "foo"}, + Instructions: "#!sys.echo\nhi", + }, + Source: types.ToolSource{LineNo: 1}, + }).Equal(t, tools[0]) +} + +func TestMultiline(t *testing.T) { + input := ` +name: first +credential: foo + , bar, + baz +model: the model + +body +` + tools, err := ParseTools(strings.NewReader(input)) + require.NoError(t, err) + + assert.Len(t, tools, 1) + autogold.Expect(types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "first", + ModelName: "the model", + Credentials: []string{ + "foo", + "bar", + "baz", + }, + }, + Instructions: "body", + }, + Source: types.ToolSource{LineNo: 1}, + }).Equal(t, tools[0]) } diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 93899c84..8dbd2ba7 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -79,3 +79,35 @@ echo ${FOO}:${INPUT} resp, err = r.Chat(context.Background(), nil, prg, nil, `"foo":"123"}`) r.AssertStep(t, resp, err) } + +func TestShareCreds(t *testing.T) { + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +creds: foo + +#!/bin/bash +echo $CRED +echo $CRED2 + +--- +name: foo +share credentials: bar + +--- +name: bar +share credentials: baz + +#!/bin/bash +echo '{"env": {"CRED": "that worked"}}' + +--- +name: baz + +#!/bin/bash +echo '{"env": {"CRED2": "that also worked"}}' +`, "") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, "") + r.AssertStep(t, resp, err) +} diff --git a/pkg/tests/testdata/TestShareCreds/step1.golden b/pkg/tests/testdata/TestShareCreds/step1.golden new file mode 100644 index 00000000..9d584f92 --- /dev/null +++ b/pkg/tests/testdata/TestShareCreds/step1.golden @@ -0,0 +1,6 @@ +`{ + "done": true, + "content": "that worked\nthat also worked\n", + "toolID": "", + "state": null +}` diff --git a/pkg/types/completion.go b/pkg/types/completion.go index 5b3899c3..6a05effa 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" - "github.com/fatih/color" "github.com/getkin/kin-openapi/openapi3" ) @@ -112,7 +111,7 @@ func (c CompletionMessage) String() string { } buf.WriteString(content.Text) if content.ToolCall != nil { - buf.WriteString(fmt.Sprintf(" %s -> %s", color.GreenString(content.ToolCall.Function.Name), content.ToolCall.Function.Arguments)) + buf.WriteString(fmt.Sprintf(" %s -> %s", content.ToolCall.Function.Name, content.ToolCall.Function.Arguments)) } } return buf.String() diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 0bd7bc02..cefbd311 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -476,9 +476,22 @@ func (t ToolDef) String() string { _, _ = fmt.Fprintf(buf, "Chat: true\n") } + keys := maps.Keys(t.MetaData) + sort.Strings(keys) + for _, key := range keys { + value := t.MetaData[key] + if !strings.Contains(value, "\n") { + _, _ = fmt.Fprintf(buf, "Meta Data: %s: %s\n", key, value) + } + } + // Instructions should be printed last if t.Instructions != "" && t.BuiltinFunc == nil { - _, _ = fmt.Fprintln(buf) + if strings.Contains(strings.Split(strings.TrimSpace(t.Instructions), "\n")[0], ":") { + _, _ = fmt.Fprintln(buf, "===") + } else { + _, _ = fmt.Fprintln(buf) + } _, _ = fmt.Fprintln(buf, t.Instructions) } @@ -486,14 +499,17 @@ func (t ToolDef) String() string { keys := maps.Keys(t.MetaData) sort.Strings(keys) for _, key := range keys { - buf.WriteString("---\n") - buf.WriteString("!metadata:") - buf.WriteString(t.Name) - buf.WriteString(":") - buf.WriteString(key) - buf.WriteString("\n") - buf.WriteString(t.MetaData[key]) - buf.WriteString("\n") + value := t.MetaData[key] + if strings.Contains(value, "\n") { + buf.WriteString("---\n") + buf.WriteString("!metadata:") + buf.WriteString(t.Name) + buf.WriteString(":") + buf.WriteString(key) + buf.WriteString("\n") + buf.WriteString(t.MetaData[key]) + buf.WriteString("\n") + } } } @@ -512,6 +528,56 @@ func (t Tool) GetNextAgentGroup(prg *Program, agentGroup []ToolReference, toolID return agentGroup, nil } +func (t Tool) getCredentials(prg *Program) (result []ToolReference, _ error) { + toolRefs, err := t.GetToolRefsFromNames(t.Credentials) + if err != nil { + return nil, err + } + + for _, toolRef := range toolRefs { + tool, ok := prg.ToolSet[toolRef.ToolID] + if !ok { + continue + } + + if !tool.IsNoop() { + result = append(result, toolRef) + } + + shared, err := tool.getSharedCredentials(prg) + if err != nil { + return nil, err + } + result = append(result, shared...) + } + + return result, nil +} + +func (t Tool) getSharedCredentials(prg *Program) (result []ToolReference, _ error) { + toolRefs, err := t.GetToolRefsFromNames(t.ExportCredentials) + if err != nil { + return nil, err + } + for _, toolRef := range toolRefs { + tool, ok := prg.ToolSet[toolRef.ToolID] + if !ok { + continue + } + + if !tool.IsNoop() { + result = append(result, toolRef) + } + + nested, err := tool.getSharedCredentials(prg) + if err != nil { + return nil, err + } + result = append(result, nested...) + } + return result, nil +} + func (t Tool) getAgents(prg *Program) (result []ToolReference, _ error) { toolRefs, err := t.GetToolRefsFromNames(t.Agents) if err != nil { @@ -542,6 +608,9 @@ func (t Tool) GetToolsByType(prg *Program, toolType ToolType) ([]ToolReference, if toolType == ToolTypeAgent { // Agents are special, they can only be sourced from direct references and not the generic 'tool:' or shared by references return t.getAgents(prg) + } else if toolType == ToolTypeCredential { + // Credentials are special too, you can only get shared credentials from directly referenced credentials + return t.getCredentials(prg) } toolSet := &toolRefSet{} @@ -560,8 +629,6 @@ func (t Tool) GetToolsByType(prg *Program, toolType ToolType) ([]ToolReference, directRefs = t.InputFilters case ToolTypeTool: toolsListFilterType = append(toolsListFilterType, ToolTypeDefault, ToolTypeAgent) - case ToolTypeCredential: - directRefs = t.Credentials default: return nil, fmt.Errorf("unknown tool type %v", toolType) } @@ -602,8 +669,6 @@ func (t Tool) GetToolsByType(prg *Program, toolType ToolType) ([]ToolReference, case ToolTypeInput: exportRefs = tool.ExportInputFilters case ToolTypeTool: - case ToolTypeCredential: - exportRefs = tool.ExportCredentials default: return nil, fmt.Errorf("unknown tool type %v", toolType) } diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index e95c2248..89c36ac8 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -73,6 +73,7 @@ Credential: Credential2 Share Credential: ExportCredential1 Share Credential: ExportCredential2 Chat: true +Meta Data: requirements.txt: requests=5 This is a sample instruction --- @@ -81,9 +82,6 @@ This is a sample instruction // blah blah some ugly JSON } ---- -!metadata:Tool Sample:requirements.txt -requests=5 `).Equal(t, tool.String()) } From 8a0f8c65f81a5fee427d73f3898a6807d116a090 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 9 Oct 2024 16:57:07 -0700 Subject: [PATCH 153/270] bug: allow space in metadata name --- pkg/parser/parser.go | 2 +- pkg/parser/parser_test.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index e6113d86..c0beb8f2 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -18,7 +18,7 @@ var ( sepRegex = regexp.MustCompile(`^\s*---+\s*$`) endHeaderRegex = regexp.MustCompile(`^\s*===+\s*$`) strictSepRegex = regexp.MustCompile(`^---\n$`) - skipRegex = regexp.MustCompile(`^![-.:*\w]+\s*$`) + skipRegex = regexp.MustCompile(`^![ -.:*\w]+\s*$`) nameRegex = regexp.MustCompile(`^[a-z]+$`) ) diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index 6eab45c9..7e1282ca 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -242,6 +242,23 @@ share output filters: shared }}).Equal(t, out) } +func TestParseMetaDataSpace(t *testing.T) { + input := ` +name: a space +body +--- +!metadata:a space:other +foo bar +` + tools, err := ParseTools(strings.NewReader(input)) + require.NoError(t, err) + + assert.Len(t, tools, 1) + autogold.Expect(map[string]string{ + "other": "foo bar", + }).Equal(t, tools[0].MetaData) +} + func TestParseMetaData(t *testing.T) { input := ` name: first From df259f9e7fc43076d23b2ecb56d1f33c44c0a8f7 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 9 Oct 2024 21:46:50 -0700 Subject: [PATCH 154/270] chore: return toolMapping in chat progress message --- pkg/openai/client.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 61a7ec77..af518f93 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -356,6 +356,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques } } + toolMapping := map[string]string{} for _, tool := range messageRequest.Tools { var params any = tool.Function.Parameters if tool.Function.Parameters == nil || len(tool.Function.Parameters.Properties) == 0 { @@ -365,6 +366,10 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques } } + if tool.Function.ToolID != "" { + toolMapping[tool.Function.Name] = tool.Function.ToolID + } + request.Tools = append(request.Tools, openai.Tool{ Type: openai.ToolTypeFunction, Function: &openai.FunctionDefinition{ @@ -378,7 +383,10 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques id := counter.Next() status <- types.CompletionStatus{ CompletionID: id, - Request: request, + Request: map[string]any{ + "chatCompletion": request, + "toolMapping": toolMapping, + }, } var cacheResponse bool From 6ec5178d9d26d21114f992e1da1b794ee27e440e Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 10 Oct 2024 21:49:07 -0400 Subject: [PATCH 155/270] feat: add dataset operations to sdkserver (#869) Signed-off-by: Grant Linville --- pkg/sdkserver/datasets.go | 332 ++++++++++++++++++++++++++++++++++++++ pkg/sdkserver/routes.go | 6 + 2 files changed, 338 insertions(+) create mode 100644 pkg/sdkserver/datasets.go diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go new file mode 100644 index 00000000..0085132c --- /dev/null +++ b/pkg/sdkserver/datasets.go @@ -0,0 +1,332 @@ +package sdkserver + +import ( + "encoding/json" + "fmt" + "net/http" + + gcontext "github.com/gptscript-ai/gptscript/pkg/context" + "github.com/gptscript-ai/gptscript/pkg/gptscript" + "github.com/gptscript-ai/gptscript/pkg/loader" +) + +type datasetRequest struct { + Input string `json:"input"` + Workspace string `json:"workspace"` + DatasetToolRepo string `json:"datasetToolRepo"` +} + +func (r datasetRequest) validate(requireInput bool) error { + if r.Workspace == "" { + return fmt.Errorf("workspace is required") + } else if requireInput && r.Input == "" { + return fmt.Errorf("input is required") + } + return nil +} + +func (r datasetRequest) opts(o gptscript.Options) gptscript.Options { + opts := gptscript.Options{ + Cache: o.Cache, + Monitor: o.Monitor, + Runner: o.Runner, + Workspace: r.Workspace, + } + return opts +} + +func (r datasetRequest) getToolRepo() string { + if r.DatasetToolRepo != "" { + return r.DatasetToolRepo + } + return "github.com/gptscript-ai/datasets" +} + +func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + var req datasetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) + return + } + + if err := req.validate(false); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), "List Datasets from "+req.getToolRepo(), "", loader.Options{ + Cache: g.Cache, + }) + + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": result}) +} + +type createDatasetArgs struct { + Name string `json:"datasetName"` + Description string `json:"datasetDescription"` +} + +func (a createDatasetArgs) validate() error { + if a.Name == "" { + return fmt.Errorf("datasetName is required") + } + return nil +} + +func (s *server) createDataset(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + var req datasetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) + return + } + + if err := req.validate(true); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) + return + } + + var args createDatasetArgs + if err := json.Unmarshal([]byte(req.Input), &args); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) + return + } + + if err := args.validate(); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + prg, err := loader.Program(r.Context(), "Create Dataset from "+req.getToolRepo(), "", loader.Options{ + Cache: g.Cache, + }) + + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": result}) +} + +type addDatasetElementArgs struct { + DatasetID string `json:"datasetID"` + ElementName string `json:"elementName"` + ElementDescription string `json:"elementDescription"` + ElementContent string `json:"elementContent"` +} + +func (a addDatasetElementArgs) validate() error { + if a.DatasetID == "" { + return fmt.Errorf("datasetID is required") + } + if a.ElementName == "" { + return fmt.Errorf("elementName is required") + } + if a.ElementContent == "" { + return fmt.Errorf("elementContent is required") + } + return nil +} + +func (s *server) addDatasetElement(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + var req datasetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) + return + } + + if err := req.validate(true); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) + return + } + + var args addDatasetElementArgs + if err := json.Unmarshal([]byte(req.Input), &args); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) + return + } + + if err := args.validate(); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + prg, err := loader.Program(r.Context(), "Add Element from "+req.getToolRepo(), "", loader.Options{ + Cache: g.Cache, + }) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": result}) +} + +type listDatasetElementsArgs struct { + DatasetID string `json:"datasetID"` +} + +func (a listDatasetElementsArgs) validate() error { + if a.DatasetID == "" { + return fmt.Errorf("datasetID is required") + } + return nil +} + +func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + var req datasetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) + return + } + + if err := req.validate(true); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) + return + } + + var args listDatasetElementsArgs + if err := json.Unmarshal([]byte(req.Input), &args); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) + return + } + + if err := args.validate(); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + prg, err := loader.Program(r.Context(), "List Elements from "+req.getToolRepo(), "", loader.Options{ + Cache: g.Cache, + }) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": result}) +} + +type getDatasetElementArgs struct { + DatasetID string `json:"datasetID"` + Element string `json:"element"` +} + +func (a getDatasetElementArgs) validate() error { + if a.DatasetID == "" { + return fmt.Errorf("datasetID is required") + } + if a.Element == "" { + return fmt.Errorf("element is required") + } + return nil +} + +func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + var req datasetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) + return + } + + if err := req.validate(true); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) + return + } + + var args getDatasetElementArgs + if err := json.Unmarshal([]byte(req.Input), &args); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) + return + } + + if err := args.validate(); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + prg, err := loader.Program(r.Context(), "Get Element from "+req.getToolRepo(), "", loader.Options{ + Cache: g.Cache, + }) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": result}) +} diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index fc69a08c..8427a6a5 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -66,6 +66,12 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /credentials/create", s.createCredential) mux.HandleFunc("POST /credentials/reveal", s.revealCredential) mux.HandleFunc("POST /credentials/delete", s.deleteCredential) + + mux.HandleFunc("POST /datasets", s.listDatasets) + mux.HandleFunc("POST /datasets/create", s.createDataset) + mux.HandleFunc("POST /datasets/list-elements", s.listDatasetElements) + mux.HandleFunc("POST /datasets/get-element", s.getDatasetElement) + mux.HandleFunc("POST /datasets/add-element", s.addDatasetElement) } // health just provides an endpoint for checking whether the server is running and accessible. From d70f9193827e01becd201df383e380102c504b55 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 14 Oct 2024 10:10:36 -0700 Subject: [PATCH 156/270] chore: add ability to pass args to input/output filters --- pkg/runner/input.go | 7 +- pkg/runner/output.go | 40 +++++++- pkg/runner/runner.go | 16 ++-- pkg/tests/runner2_test.go | 91 +++++++++++++++++++ .../testdata/TestFilterArgs/step1.golden | 6 ++ 5 files changed, 149 insertions(+), 11 deletions(-) create mode 100644 pkg/tests/testdata/TestFilterArgs/step1.golden diff --git a/pkg/runner/input.go b/pkg/runner/input.go index 23228813..360e6274 100644 --- a/pkg/runner/input.go +++ b/pkg/runner/input.go @@ -18,12 +18,15 @@ func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []stri data := map[string]any{} _ = json.Unmarshal([]byte(input), &data) data["input"] = input - inputData, err := json.Marshal(data) + + inputArgs, err := argsForFilters(callCtx.Program, inputToolRef, &State{ + StartInput: &input, + }, data) if err != nil { return "", fmt.Errorf("failed to marshal input: %w", err) } - res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, string(inputData), "", engine.InputToolCategory) + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, inputToolRef.ToolID, inputArgs, "", engine.InputToolCategory) if err != nil { return "", err } diff --git a/pkg/runner/output.go b/pkg/runner/output.go index e5fe849d..8a6aefdb 100644 --- a/pkg/runner/output.go +++ b/pkg/runner/output.go @@ -4,12 +4,48 @@ import ( "encoding/json" "errors" "fmt" + "maps" + "strings" "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/types" ) -func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []string, state *State, retErr error) (*State, error) { +func argsForFilters(prg *types.Program, tool types.ToolReference, startState *State, filterDefinedInput map[string]any) (string, error) { + startInput := "" + if startState.ResumeInput != nil { + startInput = *startState.ResumeInput + } else if startState.StartInput != nil { + startInput = *startState.StartInput + } + + parsedArgs, err := getToolRefInput(prg, tool, startInput) + if err != nil { + return "", err + } + + argData := map[string]any{} + if strings.HasPrefix(parsedArgs, "{") { + if err := json.Unmarshal([]byte(parsedArgs), &argData); err != nil { + return "", fmt.Errorf("failed to unmarshal parsedArgs for filter: %w", err) + } + } else if _, hasInput := filterDefinedInput["input"]; parsedArgs != "" && !hasInput { + argData["input"] = parsedArgs + } + + resultData := map[string]any{} + maps.Copy(resultData, filterDefinedInput) + maps.Copy(resultData, argData) + + result, err := json.Marshal(resultData) + if err != nil { + return "", fmt.Errorf("failed to marshal resultData for filter: %w", err) + } + + return string(result), nil +} + +func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []string, startState, state *State, retErr error) (*State, error) { outputToolRefs, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeOutput) if err != nil { return nil, err @@ -40,7 +76,7 @@ func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []str } for _, outputToolRef := range outputToolRefs { - inputData, err := json.Marshal(map[string]any{ + inputData, err := argsForFilters(callCtx.Program, outputToolRef, startState, map[string]any{ "output": output, "continuation": continuation, "chat": callCtx.Tool.Chat, diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 7ac9fae0..18bc1bc4 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -269,6 +269,9 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) outputMap := map[string]interface{}{} _ = json.Unmarshal([]byte(input), &inputMap) + for k, v := range inputMap { + inputMap[strings.ToLower(k)] = v + } fields := strings.Fields(ref.Arg) @@ -291,7 +294,7 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) key := strings.TrimPrefix(field, "$") key = strings.TrimPrefix(key, "{") key = strings.TrimSuffix(key, "}") - val = inputMap[key] + val = inputMap[strings.ToLower(key)] } else { val = field } @@ -425,6 +428,7 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en msg = "Tool call request has been denied" } return &State{ + StartInput: &input, Continuation: &engine.Return{ Result: &msg, }, @@ -438,6 +442,7 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en } return &State{ + StartInput: &input, Continuation: ret, }, nil } @@ -447,6 +452,8 @@ type State struct { ContinuationToolID string `json:"continuationToolID,omitempty"` Result *string `json:"result,omitempty"` + StartInput *string `json:"startInput,omitempty"` + ResumeInput *string `json:"resumeInput,omitempty"` SubCalls []SubCallResult `json:"subCalls,omitempty"` SubCallID string `json:"subCallID,omitempty"` @@ -485,14 +492,9 @@ func (s State) ContinuationContent() (string, error) { return "", fmt.Errorf("illegal state: no result message found in chat response") } -type Needed struct { - Content string `json:"content,omitempty"` - Input string `json:"input,omitempty"` -} - func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, state *State) (retState *State, retErr error) { defer func() { - retState, retErr = r.handleOutput(callCtx, monitor, env, retState, retErr) + retState, retErr = r.handleOutput(callCtx, monitor, env, state, retState, retErr) }() if state.Continuation == nil { diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 8dbd2ba7..165f86c8 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -2,10 +2,12 @@ package tests import ( "context" + "encoding/json" "testing" "github.com/gptscript-ai/gptscript/pkg/loader" "github.com/gptscript-ai/gptscript/pkg/tests/tester" + "github.com/hexops/autogold/v2" "github.com/stretchr/testify/require" ) @@ -111,3 +113,92 @@ echo '{"env": {"CRED2": "that also worked"}}' resp, err := r.Chat(context.Background(), nil, prg, nil, "") r.AssertStep(t, resp, err) } + +func TestFilterArgs(t *testing.T) { + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +inputfilters: input with ${Foo} +inputfilters: input with foo +inputfilters: input with * +outputfilters: output with * +outputfilters: output with foo +outputfilters: output with ${Foo} +params: Foo: a description + +#!/bin/bash +echo ${FOO} + +--- +name: input +params: notfoo: a description + +#!/bin/bash +echo "${GPTSCRIPT_INPUT}" + +--- +name: output +params: notfoo: a description + +#!/bin/bash +echo "${GPTSCRIPT_INPUT}" +`, "") + require.NoError(t, err) + + resp, err := r.Chat(context.Background(), nil, prg, nil, `{"foo":"baz", "start": true}`) + r.AssertStep(t, resp, err) + + data := map[string]any{} + err = json.Unmarshal([]byte(resp.Content), &data) + require.NoError(t, err) + + autogold.Expect(map[string]interface{}{ + "chat": false, + "continuation": false, + "notfoo": "baz", + "output": `{"chat":false,"continuation":false,"notfoo":"foo","output":"{\"chat\":false,\"continuation\":false,\"foo\":\"baz\",\"input\":\"{\\\"foo\\\":\\\"baz\\\",\\\"input\\\":\\\"{\\\\\\\"foo\\\\\\\":\\\\\\\"baz\\\\\\\", \\\\\\\"start\\\\\\\": true}\\\",\\\"notfoo\\\":\\\"baz\\\",\\\"start\\\":true}\\n\",\"notfoo\":\"foo\",\"output\":\"baz\\n\",\"start\":true}\n"} +`, + }).Equal(t, data) + + val := data["output"].(string) + data = map[string]any{} + err = json.Unmarshal([]byte(val), &data) + require.NoError(t, err) + autogold.Expect(map[string]interface{}{ + "chat": false, + "continuation": false, + "notfoo": "foo", + "output": `{"chat":false,"continuation":false,"foo":"baz","input":"{\"foo\":\"baz\",\"input\":\"{\\\"foo\\\":\\\"baz\\\", \\\"start\\\": true}\",\"notfoo\":\"baz\",\"start\":true}\n","notfoo":"foo","output":"baz\n","start":true} +`, + }).Equal(t, data) + + val = data["output"].(string) + data = map[string]any{} + err = json.Unmarshal([]byte(val), &data) + require.NoError(t, err) + autogold.Expect(map[string]interface{}{ + "chat": false, + "continuation": false, + "foo": "baz", "input": `{"foo":"baz","input":"{\"foo\":\"baz\", \"start\": true}","notfoo":"baz","start":true} +`, + "notfoo": "foo", + "output": "baz\n", + "start": true, + }).Equal(t, data) + + val = data["input"].(string) + data = map[string]any{} + err = json.Unmarshal([]byte(val), &data) + require.NoError(t, err) + autogold.Expect(map[string]interface{}{ + "foo": "baz", + "input": `{"foo":"baz", "start": true}`, + "notfoo": "baz", + "start": true, + }).Equal(t, data) + + val = data["input"].(string) + data = map[string]any{} + err = json.Unmarshal([]byte(val), &data) + require.NoError(t, err) + autogold.Expect(map[string]interface{}{"foo": "baz", "start": true}).Equal(t, data) +} diff --git a/pkg/tests/testdata/TestFilterArgs/step1.golden b/pkg/tests/testdata/TestFilterArgs/step1.golden new file mode 100644 index 00000000..a6e6599b --- /dev/null +++ b/pkg/tests/testdata/TestFilterArgs/step1.golden @@ -0,0 +1,6 @@ +`{ + "done": true, + "content": "{\"chat\":false,\"continuation\":false,\"notfoo\":\"baz\",\"output\":\"{\\\"chat\\\":false,\\\"continuation\\\":false,\\\"notfoo\\\":\\\"foo\\\",\\\"output\\\":\\\"{\\\\\\\"chat\\\\\\\":false,\\\\\\\"continuation\\\\\\\":false,\\\\\\\"foo\\\\\\\":\\\\\\\"baz\\\\\\\",\\\\\\\"input\\\\\\\":\\\\\\\"{\\\\\\\\\\\\\\\"foo\\\\\\\\\\\\\\\":\\\\\\\\\\\\\\\"baz\\\\\\\\\\\\\\\",\\\\\\\\\\\\\\\"input\\\\\\\\\\\\\\\":\\\\\\\\\\\\\\\"{\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"foo\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\":\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"baz\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\", \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"start\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\": true}\\\\\\\\\\\\\\\",\\\\\\\\\\\\\\\"notfoo\\\\\\\\\\\\\\\":\\\\\\\\\\\\\\\"baz\\\\\\\\\\\\\\\",\\\\\\\\\\\\\\\"start\\\\\\\\\\\\\\\":true}\\\\\\\\n\\\\\\\",\\\\\\\"notfoo\\\\\\\":\\\\\\\"foo\\\\\\\",\\\\\\\"output\\\\\\\":\\\\\\\"baz\\\\\\\\n\\\\\\\",\\\\\\\"start\\\\\\\":true}\\\\n\\\"}\\n\"}\n", + "toolID": "", + "state": null +}` From 9e3893ce3ae84a3d6b38a2c8e1f8ce86ee67aa04 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:16:33 -0400 Subject: [PATCH 157/270] test: refine smoke judge comparison rules and output - Focus comparison on matching event types to reduce false negatives - Drop "ignore callProgress" rule (we're eliding them from the event stream before sending them to the judge now) Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/tests/judge/judge.go | 2 ++ pkg/tests/smoke/smoke_test.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/tests/judge/judge.go b/pkg/tests/judge/judge.go index eae12c2e..6a6c345c 100644 --- a/pkg/tests/judge/judge.go +++ b/pkg/tests/judge/judge.go @@ -40,6 +40,8 @@ After making a determination, respond with a JSON object that conforms to the fo ] } +If you determine actual and expected are not equivalent, include a diff of the parts of actual and expected that are not equivalent in the reasoning field of your response. + Your responses are concise and include only the json object described above. ` diff --git a/pkg/tests/smoke/smoke_test.go b/pkg/tests/smoke/smoke_test.go index a6d0ab2c..a721f0de 100644 --- a/pkg/tests/smoke/smoke_test.go +++ b/pkg/tests/smoke/smoke_test.go @@ -82,8 +82,8 @@ func TestSmoke(t *testing.T) { expectedEvents, actualEvents, ` -- disregard differences in timestamps, generated IDs, natural language verbiage, and event order -- omit callProgress events from the comparison +- disregard differences in event order, timestamps, generated IDs, and natural language verbiage, grammar, and punctuation +- compare events with matching event types - the overall stream of events and set of tools called should roughly match - arguments passed in tool calls should be roughly the same - the final callFinish event should be semantically similar From eefe82939bd3dcd4fa77664bb1750da7fa979c3f Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Tue, 6 Aug 2024 12:21:29 -0400 Subject: [PATCH 158/270] test: add gpt-4o-mini to smoke test github workflow - Add gpt-4o-mini to smoke test github workflow - Add gpt-4o-mini smoke test golden files - Remove outdated gpt-4o model and 4-turbo smoke test config and golden files - Add golden files for gpt-4o-2024-08-06 - Regenerate golden files for existing models to drop callProgress events (we weren't comparing these anyway) Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 16 +- pkg/tests/judge/judge.go | 4 +- pkg/tests/smoke/smoke_test.go | 5 + .../claude-3-5-sonnet-20240620-expected.json | 448 +-- .../Bob/gpt-4-turbo-2024-04-09-expected.json | 2348 --------------- .../Bob/gpt-4o-2024-05-13-expected.json | 2643 ----------------- .../Bob/gpt-4o-2024-08-06-expected.json | 616 ++++ .../Bob/gpt-4o-mini-2024-07-18-expected.json | 538 ++++ .../Bob/mistral-large-2402-expected.json | 2295 +------------- .../claude-3-5-sonnet-20240620-expected.json | 323 +- .../gpt-4-turbo-2024-04-09-expected.json | 1549 ---------- .../gpt-4o-2024-05-13-expected.json | 1808 ----------- .../gpt-4o-2024-08-06-expected.json | 596 ++++ .../gpt-4o-mini-2024-07-18-expected.json | 528 ++++ .../mistral-large-2402-expected.json | 1229 +------- 15 files changed, 2609 insertions(+), 12337 deletions(-) delete mode 100644 pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json delete mode 100644 pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json delete mode 100644 pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json delete mode 100644 pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index 59ff43a2..9f736949 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -59,7 +59,7 @@ jobs: echo "run_smoke_tests=false" >> $GITHUB_OUTPUT - gpt-4o-2024-05-13: + gpt-4o-2024-08-06: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 @@ -81,14 +81,14 @@ jobs: go-version: "1.21" - env: OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: gpt-4o-2024-05-13 - name: Run smoke test for gpt-4o-2024-05-13 + GPTSCRIPT_DEFAULT_MODEL: gpt-4o-2024-08-06 + name: Run smoke test for gpt-4o-2024-08-06 run: | - echo "Running smoke test for model gpt-4o-2024-05-13" + echo "Running smoke test for model gpt-4o-2024-08-06" export PATH="$(pwd)/bin:${PATH}" make smoke - gpt-4-turbo-2024-04-09: + gpt-4o-mini-2024-07-18: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 @@ -110,10 +110,10 @@ jobs: go-version: "1.21" - env: OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: gpt-4-turbo-2024-04-09 - name: Run smoke test for gpt-4-turbo-2024-04-09 + GPTSCRIPT_DEFAULT_MODEL: gpt-4o-mini-2024-07-18 + name: Run smoke test for gpt-4o-mini-2024-07-18 run: | - echo "Running smoke test for model gpt-4-turbo-2024-04-09" + echo "Running smoke test for model gpt-4o-mini-2024-07-18" export PATH="$(pwd)/bin:${PATH}" make smoke diff --git a/pkg/tests/judge/judge.go b/pkg/tests/judge/judge.go index 6a6c345c..f6581dcc 100644 --- a/pkg/tests/judge/judge.go +++ b/pkg/tests/judge/judge.go @@ -86,10 +86,10 @@ func New[T any](client *openai.Client) (*Judge[T], error) { } func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria string) (equal bool, reasoning string, err error) { - comparisonJSON, err := json.MarshalIndent(&comparison[T]{ + comparisonJSON, err := json.Marshal(&comparison[T]{ Expected: expected, Actual: actual, - }, "", " ") + }) if err != nil { return false, "", fmt.Errorf("failed to marshal judge testcase JSON: %w", err) } diff --git a/pkg/tests/smoke/smoke_test.go b/pkg/tests/smoke/smoke_test.go index a721f0de..b7e50b37 100644 --- a/pkg/tests/smoke/smoke_test.go +++ b/pkg/tests/smoke/smoke_test.go @@ -175,6 +175,11 @@ func getActualEvents(t *testing.T, eventsFile string) []event { var e event require.NoError(t, json.Unmarshal([]byte(line), &e)) + + if e.Type == runner.EventTypeCallProgress { + continue + } + events = append(events, e) } diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json index 63673b21..1b6342c4 100644 --- a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json +++ b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json @@ -1,13 +1,13 @@ [ { - "time": "2024-07-02T19:39:41.734737-04:00", + "time": "2024-10-14T12:30:37.766793-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-02T19:39:41.735252-04:00", + "time": "2024-10-14T12:30:37.767629-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-07-02T19:39:42.643066-04:00", + "time": "2024-10-14T12:30:38.791552-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-02T19:39:42.643356-04:00", + "time": "2024-10-14T12:30:38.791851-04:00", "callContext": { - "id": "1719963583", + "id": "1728923439", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -59,30 +59,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" }, "currentAgent": {}, "inputContext": null, @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-07-02T19:39:43.666188-04:00", + "time": "2024-10-14T12:30:39.80734-04:00", "callContext": { - "id": "1719963583", + "id": "1728923439", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -106,30 +106,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" }, "currentAgent": {}, "inputContext": null, @@ -138,17 +138,17 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10887" + "content": "http://127.0.0.1:10940" }, { - "time": "2024-07-02T19:39:43.666385-04:00", + "time": "2024-10-14T12:30:39.80752-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-07-02T19:39:43.666475-04:00", + "time": "2024-10-14T12:30:39.807592-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -179,117 +179,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963584", + "chatCompletionId": "1728923440", "usage": {}, "chatRequest": { - "model": "claude-3-5-sonnet-20240620", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-07-02T19:39:43.66707-04:00", + "time": "2024-10-14T12:30:41.840024-04:00", "callContext": { - "id": "1719963582", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963584", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-02T19:39:45.331397-04:00", - "callContext": { - "id": "1719963582", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963584", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" - }, - { - "time": "2024-07-02T19:39:45.332721-04:00", - "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -320,7 +220,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963584", + "chatCompletionId": "1728923440", "usage": {}, "chatResponse": { "role": "assistant", @@ -328,7 +228,7 @@ { "toolCall": { "index": 0, - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -340,9 +240,9 @@ } }, { - "time": "2024-07-02T19:39:45.332824-04:00", + "time": "2024-10-14T12:30:41.840092-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -373,7 +273,7 @@ "inputContext": null }, "toolSubCalls": { - "toolu_01XLGhfvziYUf8rFoQEbvw4P": { + "toolu_01B2uNGCcfcK9K5oGmBeix8b": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -382,9 +282,9 @@ "usage": {} }, { - "time": "2024-07-02T19:39:45.332869-04:00", + "time": "2024-10-14T12:30:41.840134-04:00", "callContext": { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -399,7 +299,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -414,16 +314,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963582" + "parentID": "1728923438" }, "type": "callStart", "usage": {}, "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-07-02T19:39:45.487113-04:00", + "time": "2024-10-14T12:30:42.553374-04:00", "callContext": { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -438,7 +338,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -453,70 +353,20 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963582" + "parentID": "1728923438" }, "type": "callChat", - "chatCompletionId": "1719963585", + "chatCompletionId": "1728923441", "usage": {}, "chatRequest": { - "model": "claude-3-5-sonnet-20240620", - "messages": [ - { - "role": "system", - "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" - }, - { - "role": "user", - "content": "{\"question\": \"how are you doing\"}" - } - ], - "temperature": 0 + "model": "", + "messages": null } }, { - "time": "2024-07-02T19:39:45.487696-04:00", - "callContext": { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719963582" - }, - "type": "callProgress", - "chatCompletionId": "1719963585", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-02T19:39:46.547634-04:00", + "time": "2024-10-14T12:30:43.320476-04:00", "callContext": { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -531,7 +381,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -546,50 +396,10 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963582" - }, - "type": "callProgress", - "chatCompletionId": "1719963585", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-07-02T19:39:46.549112-04:00", - "callContext": { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719963582" + "parentID": "1728923438" }, "type": "callChat", - "chatCompletionId": "1719963585", + "chatCompletionId": "1728923441", "usage": {}, "chatResponse": { "role": "assistant", @@ -602,9 +412,9 @@ } }, { - "time": "2024-07-02T19:39:46.549192-04:00", + "time": "2024-10-14T12:30:43.320527-04:00", "callContext": { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", + "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -619,7 +429,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -634,16 +444,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963582" + "parentID": "1728923438" }, "type": "callFinish", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-07-02T19:39:46.549253-04:00", + "time": "2024-10-14T12:30:43.320565-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -678,9 +488,9 @@ "usage": {} }, { - "time": "2024-07-02T19:39:46.710406-04:00", + "time": "2024-10-14T12:30:43.676243-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -711,137 +521,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963586", + "chatCompletionId": "1728923442", "usage": {}, "chatRequest": { - "model": "claude-3-5-sonnet-20240620", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "toolu_01XLGhfvziYUf8rFoQEbvw4P", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", - "name": "bob", - "tool_call_id": "toolu_01XLGhfvziYUf8rFoQEbvw4P" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-07-02T19:39:46.710675-04:00", - "callContext": { - "id": "1719963582", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963586", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-02T19:39:48.861353-04:00", - "callContext": { - "id": "1719963582", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963586", - "usage": {}, - "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." - }, - { - "time": "2024-07-02T19:39:48.862116-04:00", + "time": "2024-10-14T12:30:45.165846-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -872,22 +562,22 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963586", + "chatCompletionId": "1728923442", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + "text": "Bob's reply was: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" } ], "usage": {} } }, { - "time": "2024-07-02T19:39:48.862154-04:00", + "time": "2024-10-14T12:30:45.165883-04:00", "callContext": { - "id": "1719963582", + "id": "1728923438", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -919,10 +609,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + "content": "Bob's reply was: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-07-02T19:39:48.862164-04:00", + "time": "2024-10-14T12:30:45.165904-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json deleted file mode 100644 index 01745e39..00000000 --- a/pkg/tests/smoke/testdata/Bob/gpt-4-turbo-2024-04-09-expected.json +++ /dev/null @@ -1,2348 +0,0 @@ -[ - { - "time": "2024-06-20T17:08:06.902669-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-20T17:08:06.902927-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-20T17:08:07.292073-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917688", - "usage": {}, - "chatRequest": { - "model": "gpt-4-turbo-2024-04-09", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T17:08:07.292172-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T17:08:28.052253-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T17:08:28.05243-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T17:08:28.1369-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T17:08:28.137013-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T17:08:28.244585-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" - }, - { - "time": "2024-06-20T17:08:28.244731-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" - }, - { - "time": "2024-06-20T17:08:28.32623-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" - }, - { - "time": "2024-06-20T17:08:28.326358-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:28.326393-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:28.32645-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:28.326527-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917688", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:28.327843-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917688", - "usage": { - "promptTokens": 142, - "completionTokens": 17, - "totalTokens": 159 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - } - ], - "usage": { - "promptTokens": 142, - "completionTokens": 17, - "totalTokens": 159 - } - } - }, - { - "time": "2024-06-20T17:08:28.328046-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "toolSubCalls": { - "call_vsmL6EoDecm0oVmUnHIvNkaL": { - "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\":\"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-06-20T17:08:28.328123-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:28.53993-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callChat", - "chatCompletionId": "1718917689", - "usage": {}, - "chatRequest": { - "model": "gpt-4-turbo-2024-04-09", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" - }, - { - "role": "user", - "content": "{\"question\":\"how are you doing\"}" - } - ], - "temperature": 0 - } - }, - { - "time": "2024-06-20T17:08:28.540154-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T17:08:29.188341-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T17:08:29.188493-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T17:08:29.244545-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T17:08:29.244765-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T17:08:29.643951-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are" - }, - { - "time": "2024-06-20T17:08:29.644128-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are" - }, - { - "time": "2024-06-20T17:08:29.676951-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing," - }, - { - "time": "2024-06-20T17:08:29.677047-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing," - }, - { - "time": "2024-06-20T17:08:29.677123-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I" - }, - { - "time": "2024-06-20T17:08:29.677156-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm" - }, - { - "time": "2024-06-20T17:08:29.677184-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing" - }, - { - "time": "2024-06-20T17:08:29.677251-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great" - }, - { - "time": "2024-06-20T17:08:29.677288-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great" - }, - { - "time": "2024-06-20T17:08:29.727848-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" - }, - { - "time": "2024-06-20T17:08:29.727983-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" - }, - { - "time": "2024-06-20T17:08:29.782554-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:29.782738-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:29.782806-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:29.782839-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:29.782868-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callProgress", - "chatCompletionId": "1718917689", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:29.782988-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callChat", - "chatCompletionId": "1718917689", - "usage": { - "promptTokens": 124, - "completionTokens": 17, - "totalTokens": 141 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 124, - "completionTokens": 17, - "totalTokens": 141 - } - } - }, - { - "time": "2024-06-20T17:08:29.783047-04:00", - "callContext": { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917687" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:29.783089-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-06-20T17:08:29.966093-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917690", - "usage": {}, - "chatRequest": { - "model": "gpt-4-turbo-2024-04-09", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "call_vsmL6EoDecm0oVmUnHIvNkaL", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!", - "name": "bob", - "tool_call_id": "call_vsmL6EoDecm0oVmUnHIvNkaL" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T17:08:29.966449-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T17:08:30.668641-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm" - }, - { - "time": "2024-06-20T17:08:30.668802-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm" - }, - { - "time": "2024-06-20T17:08:30.668957-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm" - }, - { - "time": "2024-06-20T17:08:30.669089-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing" - }, - { - "time": "2024-06-20T17:08:30.669299-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow" - }, - { - "time": "2024-06-20T17:08:30.669392-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow" - }, - { - "time": "2024-06-20T17:08:30.716062-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow friendly AI" - }, - { - "time": "2024-06-20T17:08:30.7162-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow friendly AI" - }, - { - "time": "2024-06-20T17:08:30.743098-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:30.743401-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:30.74648-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:30.746568-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917690", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:30.746778-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917690", - "usage": { - "promptTokens": 183, - "completionTokens": 10, - "totalTokens": 193 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 183, - "completionTokens": 10, - "totalTokens": 193 - } - } - }, - { - "time": "2024-06-20T17:08:30.746856-04:00", - "callContext": { - "id": "1718917687", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:30.746896-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json deleted file mode 100644 index 4d22287d..00000000 --- a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-05-13-expected.json +++ /dev/null @@ -1,2643 +0,0 @@ -[ - { - "time": "2024-06-20T16:58:11.3174-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-20T16:58:11.317644-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-20T16:58:11.638778-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917093", - "usage": {}, - "chatRequest": { - "model": "gpt-4o-2024-05-13", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T16:58:11.639016-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T16:58:12.564724-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T16:58:12.564911-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question" - }, - { - "time": "2024-06-20T16:58:12.564948-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question" - }, - { - "time": "2024-06-20T16:58:12.56497-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T16:58:12.564995-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how" - }, - { - "time": "2024-06-20T16:58:12.565045-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you" - }, - { - "time": "2024-06-20T16:58:12.565071-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you" - }, - { - "time": "2024-06-20T16:58:12.565112-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:12.565137-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:12.56516-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:12.565176-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917093", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:12.565397-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917093", - "usage": { - "promptTokens": 138, - "completionTokens": 17, - "totalTokens": 155 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - } - ], - "usage": { - "promptTokens": 138, - "completionTokens": 17, - "totalTokens": 155 - } - } - }, - { - "time": "2024-06-20T16:58:12.565644-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "toolSubCalls": { - "call_rTx93wiIASDA8uk8XHwjVmCC": { - "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\":\"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-06-20T16:58:12.565728-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:12.72779-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callChat", - "chatCompletionId": "1718917094", - "usage": {}, - "chatRequest": { - "model": "gpt-4o-2024-05-13", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" - }, - { - "role": "user", - "content": "{\"question\":\"how are you doing\"}" - } - ], - "temperature": 0 - } - }, - { - "time": "2024-06-20T16:58:12.728069-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T16:58:13.077264-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {} - }, - { - "time": "2024-06-20T16:58:13.077534-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T16:58:13.134723-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T16:58:13.134893-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T16:58:13.230591-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how" - }, - { - "time": "2024-06-20T16:58:13.230667-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how" - }, - { - "time": "2024-06-20T16:58:13.246344-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you" - }, - { - "time": "2024-06-20T16:58:13.246468-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing" - }, - { - "time": "2024-06-20T16:58:13.246531-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing" - }, - { - "time": "2024-06-20T16:58:13.246592-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm" - }, - { - "time": "2024-06-20T16:58:13.246645-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing" - }, - { - "time": "2024-06-20T16:58:13.246736-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great" - }, - { - "time": "2024-06-20T16:58:13.246796-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great" - }, - { - "time": "2024-06-20T16:58:13.30169-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" - }, - { - "time": "2024-06-20T16:58:13.301837-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" - }, - { - "time": "2024-06-20T16:58:13.31565-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-06-20T16:58:13.315798-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:13.315842-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:13.315879-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:13.315951-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callProgress", - "chatCompletionId": "1718917094", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:13.316055-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callChat", - "chatCompletionId": "1718917094", - "usage": { - "promptTokens": 122, - "completionTokens": 17, - "totalTokens": 139 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 122, - "completionTokens": 17, - "totalTokens": 139 - } - } - }, - { - "time": "2024-06-20T16:58:13.316115-04:00", - "callContext": { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917092" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:13.316171-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-06-20T16:58:13.533625-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917095", - "usage": {}, - "chatRequest": { - "model": "gpt-4o-2024-05-13", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "call_rTx93wiIASDA8uk8XHwjVmCC", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", - "name": "bob", - "tool_call_id": "call_rTx93wiIASDA8uk8XHwjVmCC" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T16:58:13.53384-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T16:58:13.856349-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T16:58:13.856437-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T16:58:13.874317-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T16:58:13.874428-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T16:58:14.060243-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are" - }, - { - "time": "2024-06-20T16:58:14.060366-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are" - }, - { - "time": "2024-06-20T16:58:14.060418-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are" - }, - { - "time": "2024-06-20T16:58:14.060435-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you" - }, - { - "time": "2024-06-20T16:58:14.060456-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing" - }, - { - "time": "2024-06-20T16:58:14.060521-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing" - }, - { - "time": "2024-06-20T16:58:14.060555-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing" - }, - { - "time": "2024-06-20T16:58:14.060577-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing" - }, - { - "time": "2024-06-20T16:58:14.06061-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" - }, - { - "time": "2024-06-20T16:58:14.060626-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" - }, - { - "time": "2024-06-20T16:58:14.060686-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" - }, - { - "time": "2024-06-20T16:58:14.06071-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-06-20T16:58:14.060727-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-06-20T16:58:14.060743-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:14.060788-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:14.060806-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917095", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:14.061001-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917095", - "usage": { - "promptTokens": 179, - "completionTokens": 18, - "totalTokens": 197 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 179, - "completionTokens": 18, - "totalTokens": 197 - } - } - }, - { - "time": "2024-06-20T16:58:14.061031-04:00", - "callContext": { - "id": "1718917092", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:14.061047-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json new file mode 100644 index 00000000..ebce34be --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json @@ -0,0 +1,616 @@ +[ + { + "time": "2024-08-23T11:30:01.577023-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-08-23T11:30:01.577297-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-08-23T11:30:01.577466-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724427003", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-08-23T11:30:02.103572-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724427003", + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + } + } + }, + { + "time": "2024-08-23T11:30:02.103835-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "call_k5gUMCNtxH9gKglmCEkjfWAR": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-08-23T11:30:02.103996-04:00", + "callContext": { + "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724427002" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-08-23T11:30:02.104177-04:00", + "callContext": { + "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724427002" + }, + "type": "callChat", + "chatCompletionId": "1724427004", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"" + }, + { + "role": "user", + "content": "{\"question\":\"how are you doing\"}" + } + ], + "temperature": 0 + } + }, + { + "time": "2024-08-23T11:30:02.567871-04:00", + "callContext": { + "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724427002" + }, + "type": "callChat", + "chatCompletionId": "1724427004", + "usage": { + "promptTokens": 122, + "completionTokens": 14, + "totalTokens": 136 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 122, + "completionTokens": 14, + "totalTokens": 136 + } + } + }, + { + "time": "2024-08-23T11:30:02.568006-04:00", + "callContext": { + "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724427002" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-08-23T11:30:02.568167-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-08-23T11:30:02.568274-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724427005", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!", + "name": "bob", + "tool_call_id": "call_k5gUMCNtxH9gKglmCEkjfWAR" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-08-23T11:30:03.052817-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724427005", + "usage": { + "promptTokens": 176, + "completionTokens": 18, + "totalTokens": 194 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 176, + "completionTokens": 18, + "totalTokens": 194 + } + } + }, + { + "time": "2024-08-23T11:30:03.05287-04:00", + "callContext": { + "id": "1724427002", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-08-23T11:30:03.052914-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json new file mode 100644 index 00000000..3c1aebc9 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json @@ -0,0 +1,538 @@ +[ + { + "time": "2024-10-14T11:31:46.97662-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-10-14T11:31:46.977148-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-10-14T11:31:46.977209-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919908", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T11:31:49.170338-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919908", + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + } + } + }, + { + "time": "2024-10-14T11:31:49.170563-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "call_qBi5ZvQ2pFwXdENJXmuCb6Oy": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-10-14T11:31:49.171155-04:00", + "callContext": { + "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919907" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-10-14T11:31:49.171395-04:00", + "callContext": { + "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919907" + }, + "type": "callChat", + "chatCompletionId": "1728919909", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T11:31:50.446571-04:00", + "callContext": { + "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919907" + }, + "type": "callChat", + "chatCompletionId": "1728919909", + "usage": { + "promptTokens": 122, + "completionTokens": 17, + "totalTokens": 139 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 122, + "completionTokens": 17, + "totalTokens": 139 + } + } + }, + { + "time": "2024-10-14T11:31:50.446692-04:00", + "callContext": { + "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919907" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-10-14T11:31:50.446773-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-10-14T11:31:50.446939-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919910", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T11:31:52.118055-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919910", + "usage": { + "promptTokens": 179, + "completionTokens": 18, + "totalTokens": 197 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 179, + "completionTokens": 18, + "totalTokens": 197 + } + } + }, + { + "time": "2024-10-14T11:31:52.118196-04:00", + "callContext": { + "id": "1728919907", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-10-14T11:31:52.118256-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json index 8730226a..c2971d57 100644 --- a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json @@ -1,13 +1,13 @@ [ { - "time": "2024-07-03T10:53:01.406601-04:00", + "time": "2024-10-14T12:20:24.700667-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-03T10:53:01.406888-04:00", + "time": "2024-10-14T12:20:24.701071-04:00", "callContext": { - "id": "1720018382", + "id": "1728922825", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-07-03T10:53:02.106674-04:00", + "time": "2024-10-14T12:20:25.518655-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-03T10:53:02.107085-04:00", + "time": "2024-10-14T12:20:25.518946-04:00", "callContext": { - "id": "1720018383", + "id": "1728922826", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -59,30 +59,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" }, "currentAgent": {}, "inputContext": null, @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-07-03T10:53:03.125117-04:00", + "time": "2024-10-14T12:20:26.534361-04:00", "callContext": { - "id": "1720018383", + "id": "1728922826", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -106,30 +106,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" }, "currentAgent": {}, "inputContext": null, @@ -138,17 +138,17 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10244" + "content": "http://127.0.0.1:11149" }, { - "time": "2024-07-03T10:53:03.125375-04:00", + "time": "2024-10-14T12:20:26.534546-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-07-03T10:53:03.12547-04:00", + "time": "2024-10-14T12:20:26.534598-04:00", "callContext": { - "id": "1720018382", + "id": "1728922825", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -179,154 +179,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018384", + "chatCompletionId": "1728922827", "usage": {}, "chatRequest": { - "model": "mistral-large-2402", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-07-03T10:53:03.126002-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018384", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-03T10:53:03.438634-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018384", - "usage": {} - }, - { - "time": "2024-07-03T10:53:03.917633-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018384", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" - }, - { - "time": "2024-07-03T10:53:03.9181-04:00", + "time": "2024-10-14T12:20:27.793767-04:00", "callContext": { - "id": "1720018382", + "id": "1728922825", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -357,7 +220,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018384", + "chatCompletionId": "1728922827", "usage": { "promptTokens": 188, "completionTokens": 23, @@ -369,7 +232,7 @@ { "toolCall": { "index": 0, - "id": "IePX3uH5y", + "id": "jSMVlVVyb", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -385,9 +248,9 @@ } }, { - "time": "2024-07-03T10:53:03.918447-04:00", + "time": "2024-10-14T12:20:27.793996-04:00", "callContext": { - "id": "1720018382", + "id": "1728922825", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -418,7 +281,7 @@ "inputContext": null }, "toolSubCalls": { - "IePX3uH5y": { + "jSMVlVVyb": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -427,9 +290,9 @@ "usage": {} }, { - "time": "2024-07-03T10:53:03.918585-04:00", + "time": "2024-10-14T12:20:27.794146-04:00", "callContext": { - "id": "IePX3uH5y", + "id": "jSMVlVVyb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -444,7 +307,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -459,16 +322,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018382" + "parentID": "1728922825" }, "type": "callStart", "usage": {}, "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-07-03T10:53:04.089188-04:00", + "time": "2024-10-14T12:20:28.306793-04:00", "callContext": { - "id": "IePX3uH5y", + "id": "jSMVlVVyb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -483,7 +346,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -498,70 +361,20 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018382" + "parentID": "1728922825" }, "type": "callChat", - "chatCompletionId": "1720018385", + "chatCompletionId": "1728922828", "usage": {}, "chatRequest": { - "model": "mistral-large-2402", - "messages": [ - { - "role": "system", - "content": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"" - }, - { - "role": "user", - "content": "{\"question\": \"how are you doing\"}" - } - ], - "temperature": 0 + "model": "", + "messages": null } }, { - "time": "2024-07-03T10:53:04.089548-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-03T10:53:04.287287-04:00", + "time": "2024-10-14T12:20:29.060571-04:00", "callContext": { - "id": "IePX3uH5y", + "id": "jSMVlVVyb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -576,7 +389,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -591,56 +404,33 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018382" + "parentID": "1728922825" }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {} - }, - { - "time": "2024-07-03T10:53:04.287688-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" + "type": "callChat", + "chatCompletionId": "1728922828", + "usage": { + "promptTokens": 145, + "completionTokens": 19, + "totalTokens": 164 }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks" + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 145, + "completionTokens": 19, + "totalTokens": 164 + } + } }, { - "time": "2024-07-03T10:53:04.302879-04:00", + "time": "2024-10-14T12:20:29.060766-04:00", "callContext": { - "id": "IePX3uH5y", + "id": "jSMVlVVyb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -655,7 +445,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -670,1941 +460,94 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018382" + "parentID": "1728922825" }, - "type": "callProgress", - "chatCompletionId": "1720018385", + "type": "callFinish", "usage": {}, - "content": "Thanks for" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-07-03T10:53:04.323886-04:00", + "time": "2024-10-14T12:20:29.060906-04:00", "callContext": { - "id": "IePX3uH5y", + "id": "1728922825", "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" } - }, - "type": "object" + ] }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", "bob": "testdata/Bob/test.gpt:bob" }, "source": { "location": "testdata/Bob/test.gpt", - "lineNo": 6 + "lineNo": 1 }, "workingDir": "testdata/Bob" }, "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" + "inputContext": null }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking" + "toolResults": 1, + "type": "callContinue", + "usage": {} }, { - "time": "2024-07-03T10:53:04.345227-04:00", + "time": "2024-10-14T12:20:29.203429-04:00", "callContext": { - "id": "IePX3uH5y", + "id": "1728922825", "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" } - }, - "type": "object" + ] }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", "bob": "testdata/Bob/test.gpt:bob" }, "source": { "location": "testdata/Bob/test.gpt", - "lineNo": 6 + "lineNo": 1 }, "workingDir": "testdata/Bob" }, "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" + "inputContext": null }, - "type": "callProgress", - "chatCompletionId": "1720018385", + "type": "callChat", + "chatCompletionId": "1728922829", "usage": {}, - "content": "Thanks for asking \"" - }, - { - "time": "2024-07-03T10:53:04.364182-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how" - }, - { - "time": "2024-07-03T10:53:04.387098-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are" - }, - { - "time": "2024-07-03T10:53:04.405872-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you" - }, - { - "time": "2024-07-03T10:53:04.427749-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing" - }, - { - "time": "2024-07-03T10:53:04.448563-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\"," - }, - { - "time": "2024-07-03T10:53:04.468026-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I" - }, - { - "time": "2024-07-03T10:53:04.490606-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'" - }, - { - "time": "2024-07-03T10:53:04.511171-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm" - }, - { - "time": "2024-07-03T10:53:04.531235-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing" - }, - { - "time": "2024-07-03T10:53:04.553855-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great" - }, - { - "time": "2024-07-03T10:53:04.574503-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow" - }, - { - "time": "2024-07-03T10:53:04.598055-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly" - }, - { - "time": "2024-07-03T10:53:04.613412-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI" - }, - { - "time": "2024-07-03T10:53:04.635897-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-07-03T10:53:04.656116-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-07-03T10:53:04.680962-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callProgress", - "chatCompletionId": "1720018385", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-07-03T10:53:04.681447-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callChat", - "chatCompletionId": "1720018385", - "usage": { - "promptTokens": 143, - "completionTokens": 19, - "totalTokens": 162 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 143, - "completionTokens": 19, - "totalTokens": 162 - } - } - }, - { - "time": "2024-07-03T10:53:04.681598-04:00", - "callContext": { - "id": "IePX3uH5y", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${question}\", I'm doing great fellow friendly AI tool!\"", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018382" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-07-03T10:53:04.681725-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-07-03T10:53:04.842182-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1720018386", - "usage": {}, - "chatRequest": { - "model": "mistral-large-2402", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "IePX3uH5y", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", - "name": "bob", - "tool_call_id": "IePX3uH5y" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-07-03T10:53:04.842685-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-03T10:53:05.384131-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {} - }, - { - "time": "2024-07-03T10:53:05.384657-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-07-03T10:53:05.404612-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said" - }, - { - "time": "2024-07-03T10:53:05.430068-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said," - }, - { - "time": "2024-07-03T10:53:05.452982-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"" - }, - { - "time": "2024-07-03T10:53:05.501525-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for" - }, - { - "time": "2024-07-03T10:53:05.50179-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for" - }, - { - "time": "2024-07-03T10:53:05.532152-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking" - }, - { - "time": "2024-07-03T10:53:05.553295-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking '" - }, - { - "time": "2024-07-03T10:53:05.576012-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how" - }, - { - "time": "2024-07-03T10:53:05.602885-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are" - }, - { - "time": "2024-07-03T10:53:05.624751-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you" - }, - { - "time": "2024-07-03T10:53:05.649543-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing" - }, - { - "time": "2024-07-03T10:53:05.674199-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing'," - }, - { - "time": "2024-07-03T10:53:05.697398-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I" - }, - { - "time": "2024-07-03T10:53:05.725234-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'" - }, - { - "time": "2024-07-03T10:53:05.747687-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm" - }, - { - "time": "2024-07-03T10:53:05.773761-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing" - }, - { - "time": "2024-07-03T10:53:05.798352-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great" - }, - { - "time": "2024-07-03T10:53:05.823105-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow" - }, - { - "time": "2024-07-03T10:53:05.846373-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly" - }, - { - "time": "2024-07-03T10:53:05.871647-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI" - }, - { - "time": "2024-07-03T10:53:05.895603-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-07-03T10:53:05.919754-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" - }, - { - "time": "2024-07-03T10:53:05.956244-04:00", - "callContext": { - "id": "1720018382", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018386", - "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "chatRequest": { + "model": "", + "messages": null + } }, { - "time": "2024-07-03T10:53:05.956647-04:00", + "time": "2024-10-14T12:20:30.272631-04:00", "callContext": { - "id": "1720018382", + "id": "1728922825", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -2635,7 +578,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018386", + "chatCompletionId": "1728922829", "usage": { "promptTokens": 246, "completionTokens": 23, @@ -2656,9 +599,9 @@ } }, { - "time": "2024-07-03T10:53:05.956695-04:00", + "time": "2024-10-14T12:20:30.27277-04:00", "callContext": { - "id": "1720018382", + "id": "1728922825", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -2693,7 +636,7 @@ "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-07-03T10:53:05.956743-04:00", + "time": "2024-10-14T12:20:30.27283-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json index 4f3dd9d7..3a2838ab 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json @@ -1,13 +1,13 @@ [ { - "time": "2024-07-02T19:39:48.881112-04:00", + "time": "2024-08-23T12:02:17.549859-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-02T19:39:48.881306-04:00", + "time": "2024-08-23T12:02:17.55023-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-07-02T19:39:49.304999-04:00", + "time": "2024-08-23T12:02:18.283201-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-02T19:39:49.305331-04:00", + "time": "2024-08-23T12:02:18.28339-04:00", "callContext": { - "id": "1719963590", + "id": "1724428939", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -59,30 +59,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" }, "currentAgent": {}, "inputContext": null, @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-07-02T19:39:50.324048-04:00", + "time": "2024-08-23T12:02:19.295369-04:00", "callContext": { - "id": "1719963590", + "id": "1724428939", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -106,30 +106,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/6b5a88075b1e4501e845f4dab5be16ea8739aa4c" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" }, "currentAgent": {}, "inputContext": null, @@ -138,17 +138,17 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10585" + "content": "http://127.0.0.1:10739" }, { - "time": "2024-07-02T19:39:50.324287-04:00", + "time": "2024-08-23T12:02:19.295542-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-07-02T19:39:50.324366-04:00", + "time": "2024-08-23T12:02:19.295604-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -179,7 +179,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963591", + "chatCompletionId": "1724428940", "usage": {}, "chatRequest": { "model": "claude-3-5-sonnet-20240620", @@ -211,85 +211,9 @@ } }, { - "time": "2024-07-02T19:39:50.324921-04:00", + "time": "2024-08-23T12:02:21.136785-04:00", "callContext": { - "id": "1719963589", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963591", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-02T19:39:51.941234-04:00", - "callContext": { - "id": "1719963589", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963591", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" - }, - { - "time": "2024-07-02T19:39:51.941444-04:00", - "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -320,7 +244,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963591", + "chatCompletionId": "1724428940", "usage": {}, "chatResponse": { "role": "assistant", @@ -328,7 +252,7 @@ { "toolCall": { "index": 0, - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -340,9 +264,9 @@ } }, { - "time": "2024-07-02T19:39:51.941523-04:00", + "time": "2024-08-23T12:02:21.136848-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -373,7 +297,7 @@ "inputContext": null }, "toolSubCalls": { - "toolu_01SHRzoGuUEQtwBp2ktaqu7P": { + "toolu_01XzHFJpwHD8hzowvAqGgfSz": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -382,9 +306,9 @@ "usage": {} }, { - "time": "2024-07-02T19:39:51.941567-04:00", + "time": "2024-08-23T12:02:21.136877-04:00", "callContext": { - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -399,7 +323,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -414,7 +338,7 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963589", + "parentID": "1724428938", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -422,9 +346,9 @@ "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-07-02T19:39:51.942241-04:00", + "time": "2024-08-23T12:02:21.137223-04:00", "callContext": { - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -439,7 +363,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -454,11 +378,11 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963589", + "parentID": "1724428938", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1719963592", + "chatCompletionId": "1724428941", "usage": {}, "chatRequest": { "model": "", @@ -466,9 +390,9 @@ } }, { - "time": "2024-07-02T19:39:51.945596-04:00", + "time": "2024-08-23T12:02:21.142624-04:00", "callContext": { - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -483,7 +407,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -498,61 +422,20 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963589", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callProgress", - "chatCompletionId": "1719963592", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-07-02T19:39:51.945769-04:00", - "callContext": { - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1719963589", + "parentID": "1724428938", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1719963592", + "chatCompletionId": "1724428941", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-07-02T19:39:51.945833-04:00", + "time": "2024-08-23T12:02:21.142691-04:00", "callContext": { - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -567,7 +450,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -582,7 +465,7 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1719963589", + "parentID": "1724428938", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -590,9 +473,9 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-07-02T19:39:51.945871-04:00", + "time": "2024-08-23T12:02:21.142723-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -627,9 +510,9 @@ "usage": {} }, { - "time": "2024-07-02T19:39:52.112712-04:00", + "time": "2024-08-23T12:02:21.371211-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -660,7 +543,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963593", + "chatCompletionId": "1724428942", "usage": {}, "chatRequest": { "model": "claude-3-5-sonnet-20240620", @@ -674,7 +557,7 @@ "content": "", "tool_calls": [ { - "id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P", + "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", "type": "function", "function": { "name": "bob", @@ -687,7 +570,7 @@ "role": "tool", "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "toolu_01SHRzoGuUEQtwBp2ktaqu7P" + "tool_call_id": "toolu_01XzHFJpwHD8hzowvAqGgfSz" } ], "temperature": 0, @@ -712,85 +595,9 @@ } }, { - "time": "2024-07-02T19:39:52.11309-04:00", - "callContext": { - "id": "1719963589", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963593", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-02T19:39:53.567604-04:00", - "callContext": { - "id": "1719963589", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1719963593", - "usage": {}, - "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." - }, - { - "time": "2024-07-02T19:39:53.568556-04:00", + "time": "2024-08-23T12:02:23.102371-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -821,22 +628,22 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1719963593", + "chatCompletionId": "1724428942", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + "text": "Bob's reply was: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" } ], "usage": {} } }, { - "time": "2024-07-02T19:39:53.568602-04:00", + "time": "2024-08-23T12:02:23.102422-04:00", "callContext": { - "id": "1719963589", + "id": "1724428938", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, @@ -868,10 +675,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob's reply to the question \"how are you doing\" is:\n\n\"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"\n\nI have repeated his reply exactly as requested." + "content": "Bob's reply was: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-07-02T19:39:53.568622-04:00", + "time": "2024-08-23T12:02:23.102441-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json deleted file mode 100644 index 5b2409f6..00000000 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4-turbo-2024-04-09-expected.json +++ /dev/null @@ -1,1549 +0,0 @@ -[ - { - "time": "2024-06-20T17:08:30.778302-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-20T17:08:30.778582-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-20T17:08:30.981266-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917712", - "usage": {}, - "chatRequest": { - "model": "gpt-4-turbo-2024-04-09", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T17:08:30.981391-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T17:08:32.232987-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T17:08:32.233265-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T17:08:32.344744-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T17:08:32.344882-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T17:08:32.361676-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" - }, - { - "time": "2024-06-20T17:08:32.361793-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" - }, - { - "time": "2024-06-20T17:08:32.440498-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" - }, - { - "time": "2024-06-20T17:08:32.440743-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:32.440798-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:32.440836-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:32.440873-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917712", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:32.441115-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917712", - "usage": { - "promptTokens": 142, - "completionTokens": 17, - "totalTokens": 159 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - } - ], - "usage": { - "promptTokens": 142, - "completionTokens": 17, - "totalTokens": 159 - } - } - }, - { - "time": "2024-06-20T17:08:32.441462-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "toolSubCalls": { - "call_slFgd2P2lMxXQoyrPbm2YsrQ": { - "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\":\"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-06-20T17:08:32.441542-04:00", - "callContext": { - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917711", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T17:08:32.442736-04:00", - "callContext": { - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917711", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1718917713", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-06-20T17:08:32.448288-04:00", - "callContext": { - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917711", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callProgress", - "chatCompletionId": "1718917713", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-06-20T17:08:32.448728-04:00", - "callContext": { - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917711", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1718917713", - "usage": {}, - "chatResponse": { - "usage": {} - } - }, - { - "time": "2024-06-20T17:08:32.448906-04:00", - "callContext": { - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917711", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-06-20T17:08:32.448977-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-06-20T17:08:32.624086-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917714", - "usage": {}, - "chatRequest": { - "model": "gpt-4-turbo-2024-04-09", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "call_slFgd2P2lMxXQoyrPbm2YsrQ", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", - "name": "bob", - "tool_call_id": "call_slFgd2P2lMxXQoyrPbm2YsrQ" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T17:08:32.624367-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T17:08:33.020025-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I" - }, - { - "time": "2024-06-20T17:08:33.020187-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I" - }, - { - "time": "2024-06-20T17:08:33.09047-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing" - }, - { - "time": "2024-06-20T17:08:33.090722-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing" - }, - { - "time": "2024-06-20T17:08:33.150983-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow" - }, - { - "time": "2024-06-20T17:08:33.151128-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow" - }, - { - "time": "2024-06-20T17:08:33.26424-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow friendly AI" - }, - { - "time": "2024-06-20T17:08:33.264352-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-06-20T17:08:33.264393-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-06-20T17:08:33.264427-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:33.264492-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:33.264575-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917714", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:33.264897-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917714", - "usage": { - "promptTokens": 183, - "completionTokens": 10, - "totalTokens": 193 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 183, - "completionTokens": 10, - "totalTokens": 193 - } - } - }, - { - "time": "2024-06-20T17:08:33.264985-04:00", - "callContext": { - "id": "1718917711", - "tool": { - "modelName": "gpt-4-turbo-2024-04-09", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T17:08:33.265021-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json deleted file mode 100644 index f61edd7d..00000000 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-05-13-expected.json +++ /dev/null @@ -1,1808 +0,0 @@ -[ - { - "time": "2024-06-20T16:58:14.093283-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-06-20T16:58:14.093568-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-06-20T16:58:14.31069-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917096", - "usage": {}, - "chatRequest": { - "model": "gpt-4o-2024-05-13", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T16:58:14.311071-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T16:58:14.807492-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T16:58:14.807779-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"" - }, - { - "time": "2024-06-20T16:58:14.832551-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T16:58:14.832684-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"" - }, - { - "time": "2024-06-20T16:58:14.865368-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" - }, - { - "time": "2024-06-20T16:58:14.865484-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are" - }, - { - "time": "2024-06-20T16:58:14.899511-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" - }, - { - "time": "2024-06-20T16:58:14.899668-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing" - }, - { - "time": "2024-06-20T16:58:14.900883-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:14.900938-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:14.900969-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917096", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:14.901222-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917096", - "usage": { - "promptTokens": 138, - "completionTokens": 17, - "totalTokens": 155 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - } - ], - "usage": { - "promptTokens": 138, - "completionTokens": 17, - "totalTokens": 155 - } - } - }, - { - "time": "2024-06-20T16:58:14.901521-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "toolSubCalls": { - "call_PGLxooO6eBPt3eSEBCMkuWYN": { - "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\":\"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-06-20T16:58:14.901599-04:00", - "callContext": { - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917095", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\":\"how are you doing\"}" - }, - { - "time": "2024-06-20T16:58:14.90268-04:00", - "callContext": { - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917095", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1718917097", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-06-20T16:58:14.908538-04:00", - "callContext": { - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917095", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callProgress", - "chatCompletionId": "1718917097", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-06-20T16:58:14.908991-04:00", - "callContext": { - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917095", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1718917097", - "usage": {}, - "chatResponse": { - "usage": {} - } - }, - { - "time": "2024-06-20T16:58:14.909126-04:00", - "callContext": { - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null, - "toolName": "bob", - "parentID": "1718917095", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-06-20T16:58:14.909293-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-06-20T16:58:15.10962-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917098", - "usage": {}, - "chatRequest": { - "model": "gpt-4o-2024-05-13", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "call_PGLxooO6eBPt3eSEBCMkuWYN", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", - "name": "bob", - "tool_call_id": "call_PGLxooO6eBPt3eSEBCMkuWYN" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] - } - }, - { - "time": "2024-06-20T16:58:15.110087-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-06-20T16:58:15.629792-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T16:58:15.629968-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks" - }, - { - "time": "2024-06-20T16:58:15.749206-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for" - }, - { - "time": "2024-06-20T16:58:15.749324-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking" - }, - { - "time": "2024-06-20T16:58:15.780062-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are" - }, - { - "time": "2024-06-20T16:58:15.780171-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you" - }, - { - "time": "2024-06-20T16:58:15.780293-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing" - }, - { - "time": "2024-06-20T16:58:15.780335-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing" - }, - { - "time": "2024-06-20T16:58:15.780406-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm" - }, - { - "time": "2024-06-20T16:58:15.780445-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm" - }, - { - "time": "2024-06-20T16:58:15.780477-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing" - }, - { - "time": "2024-06-20T16:58:15.780511-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great" - }, - { - "time": "2024-06-20T16:58:15.816742-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" - }, - { - "time": "2024-06-20T16:58:15.816889-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly" - }, - { - "time": "2024-06-20T16:58:15.859699-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-06-20T16:58:15.859764-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:15.859784-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:15.859841-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:15.85986-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1718917098", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:15.860819-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1718917098", - "usage": { - "promptTokens": 178, - "completionTokens": 17, - "totalTokens": 195 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 178, - "completionTokens": 17, - "totalTokens": 195 - } - } - }, - { - "time": "2024-06-20T16:58:15.860872-04:00", - "callContext": { - "id": "1718917095", - "tool": { - "modelName": "gpt-4o-2024-05-13", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-06-20T16:58:15.860919-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json new file mode 100644 index 00000000..23fe9c9d --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json @@ -0,0 +1,596 @@ +[ + { + "time": "2024-08-23T10:47:06.912223-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-08-23T10:47:06.912642-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-08-23T10:47:06.912689-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724424428", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-08-23T10:47:07.441483-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724424428", + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + } + } + }, + { + "time": "2024-08-23T10:47:07.441781-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "call_NjCLsJjY7PegUkXOqx5DA0o0": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-08-23T10:47:07.441904-04:00", + "callContext": { + "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724424427", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-08-23T10:47:07.443055-04:00", + "callContext": { + "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724424427", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1724424429", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-08-23T10:47:07.453557-04:00", + "callContext": { + "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724424427", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1724424429", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-08-23T10:47:07.453768-04:00", + "callContext": { + "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1724424427", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-08-23T10:47:07.453868-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-08-23T10:47:07.453942-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724424430", + "usage": {}, + "chatRequest": { + "model": "gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "type": "function", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + ] + }, + { + "role": "tool", + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", + "name": "bob", + "tool_call_id": "call_NjCLsJjY7PegUkXOqx5DA0o0" + } + ], + "temperature": 0, + "tools": [ + { + "type": "function", + "function": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "parameters": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + } + } + } + ] + } + }, + { + "time": "2024-08-23T10:47:07.950943-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1724424430", + "usage": { + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 + } + } + }, + { + "time": "2024-08-23T10:47:07.951019-04:00", + "callContext": { + "id": "1724424427", + "tool": { + "modelName": "gpt-4o-2024-08-06", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-08-23T10:47:07.951093-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json new file mode 100644 index 00000000..6354054d --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json @@ -0,0 +1,528 @@ +[ + { + "time": "2024-10-14T11:31:52.152013-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-10-14T11:31:52.152428-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-10-14T11:31:52.152473-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919914", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T11:31:54.107362-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919914", + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "function": { + "name": "bob", + "arguments": "{\"question\":\"how are you doing\"}" + } + } + } + ], + "usage": { + "promptTokens": 138, + "completionTokens": 17, + "totalTokens": 155 + } + } + }, + { + "time": "2024-10-14T11:31:54.107585-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "call_AmrlGivMXtyAzbP85T7lwFN9": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\":\"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-10-14T11:31:54.107715-04:00", + "callContext": { + "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919913", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\":\"how are you doing\"}" + }, + { + "time": "2024-10-14T11:31:54.108876-04:00", + "callContext": { + "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919913", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1728919915", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T11:31:54.121327-04:00", + "callContext": { + "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919913", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1728919915", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-10-14T11:31:54.121533-04:00", + "callContext": { + "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 7 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728919913", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-10-14T11:31:54.121596-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-10-14T11:31:54.121802-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919916", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T11:31:55.746879-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728919916", + "usage": { + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 + }, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + } + ], + "usage": { + "promptTokens": 178, + "completionTokens": 17, + "totalTokens": 195 + } + } + }, + { + "time": "2024-10-14T11:31:55.746965-04:00", + "callContext": { + "id": "1728919913", + "tool": { + "modelName": "gpt-4o-mini-2024-07-18", + "internalPrompt": null, + "tools": [ + "bob" + ], + "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-10-14T11:31:55.747227-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json index 43df5fa7..6dc41a08 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json @@ -1,13 +1,13 @@ [ { - "time": "2024-07-03T10:53:05.993864-04:00", + "time": "2024-08-23T12:02:07.951538-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-03T10:53:05.99419-04:00", + "time": "2024-08-23T12:02:07.951742-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-07-03T10:53:06.366435-04:00", + "time": "2024-08-23T12:02:08.65216-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-07-03T10:53:06.366952-04:00", + "time": "2024-08-23T12:02:08.65229-04:00", "callContext": { - "id": "1720018387", + "id": "1724428929", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -59,30 +59,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" } ] }, "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" }, "currentAgent": {}, "inputContext": null, @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-07-03T10:53:07.383203-04:00", + "time": "2024-08-23T12:02:09.659962-04:00", "callContext": { - "id": "1720018387", + "id": "1724428929", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -106,30 +106,30 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", "toolMapping": { "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" } ] }, "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt:Mistral La Plateforme Provider" + "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "cbf1aeb6db495b9b6223984651d29ac511d2748d" + "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/cbf1aeb6db495b9b6223984651d29ac511d2748d" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" }, "currentAgent": {}, "inputContext": null, @@ -138,17 +138,17 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10798" + "content": "http://127.0.0.1:10532" }, { - "time": "2024-07-03T10:53:07.383553-04:00", + "time": "2024-08-23T12:02:09.66007-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-07-03T10:53:07.383621-04:00", + "time": "2024-08-23T12:02:09.660117-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -179,7 +179,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018388", + "chatCompletionId": "1724428930", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -211,122 +211,9 @@ } }, { - "time": "2024-07-03T10:53:07.384109-04:00", + "time": "2024-08-23T12:02:11.140706-04:00", "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018388", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-03T10:53:07.670442-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018388", - "usage": {} - }, - { - "time": "2024-07-03T10:53:08.283965-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018388", - "usage": {}, - "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" - }, - { - "time": "2024-07-03T10:53:08.284275-04:00", - "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -357,7 +244,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018388", + "chatCompletionId": "1724428930", "usage": { "promptTokens": 188, "completionTokens": 23, @@ -369,7 +256,7 @@ { "toolCall": { "index": 0, - "id": "K9FVJPqm6", + "id": "r1wQzUugN", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -385,9 +272,9 @@ } }, { - "time": "2024-07-03T10:53:08.285881-04:00", + "time": "2024-08-23T12:02:11.140968-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -418,7 +305,7 @@ "inputContext": null }, "toolSubCalls": { - "K9FVJPqm6": { + "r1wQzUugN": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -427,9 +314,9 @@ "usage": {} }, { - "time": "2024-07-03T10:53:08.286454-04:00", + "time": "2024-08-23T12:02:11.141094-04:00", "callContext": { - "id": "K9FVJPqm6", + "id": "r1wQzUugN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -444,7 +331,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -459,7 +346,7 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018386", + "parentID": "1724428928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -467,9 +354,9 @@ "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-07-03T10:53:08.287602-04:00", + "time": "2024-08-23T12:02:11.141978-04:00", "callContext": { - "id": "K9FVJPqm6", + "id": "r1wQzUugN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -484,7 +371,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -499,11 +386,11 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018386", + "parentID": "1724428928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1720018389", + "chatCompletionId": "1724428931", "usage": {}, "chatRequest": { "model": "", @@ -511,50 +398,9 @@ } }, { - "time": "2024-07-03T10:53:08.295318-04:00", - "callContext": { - "id": "K9FVJPqm6", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1720018386", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callProgress", - "chatCompletionId": "1720018389", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-07-03T10:53:08.295753-04:00", + "time": "2024-08-23T12:02:11.153328-04:00", "callContext": { - "id": "K9FVJPqm6", + "id": "r1wQzUugN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -569,7 +415,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -584,20 +430,20 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018386", + "parentID": "1724428928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1720018389", + "chatCompletionId": "1724428931", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-07-03T10:53:08.29586-04:00", + "time": "2024-08-23T12:02:11.153471-04:00", "callContext": { - "id": "K9FVJPqm6", + "id": "r1wQzUugN", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -612,7 +458,7 @@ }, "type": "object" }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${question}, I'm doing great fellow friendly AI tool!\"", + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", "id": "testdata/BobAsShell/test.gpt:bob", "localTools": { "": "testdata/BobAsShell/test.gpt:", @@ -627,7 +473,7 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1720018386", + "parentID": "1724428928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -635,9 +481,9 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-07-03T10:53:08.295955-04:00", + "time": "2024-08-23T12:02:11.153544-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -672,9 +518,9 @@ "usage": {} }, { - "time": "2024-07-03T10:53:08.467884-04:00", + "time": "2024-08-23T12:02:11.41447-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -705,7 +551,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018390", + "chatCompletionId": "1724428932", "usage": {}, "chatRequest": { "model": "mistral-large-2402", @@ -719,7 +565,7 @@ "content": "", "tool_calls": [ { - "id": "K9FVJPqm6", + "id": "r1wQzUugN", "type": "function", "function": { "name": "bob", @@ -732,7 +578,7 @@ "role": "tool", "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "K9FVJPqm6" + "tool_call_id": "r1wQzUugN" } ], "temperature": 0, @@ -757,958 +603,9 @@ } }, { - "time": "2024-07-03T10:53:08.468218-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Waiting for model response..." - }, - { - "time": "2024-07-03T10:53:08.755588-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {} - }, - { - "time": "2024-07-03T10:53:08.756052-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob" - }, - { - "time": "2024-07-03T10:53:08.792872-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said" - }, - { - "time": "2024-07-03T10:53:08.843651-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said," - }, - { - "time": "2024-07-03T10:53:08.890513-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"" - }, - { - "time": "2024-07-03T10:53:08.940592-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks" - }, - { - "time": "2024-07-03T10:53:08.979075-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for" - }, - { - "time": "2024-07-03T10:53:09.026193-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking" - }, - { - "time": "2024-07-03T10:53:09.082478-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how" - }, - { - "time": "2024-07-03T10:53:09.120629-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are" - }, - { - "time": "2024-07-03T10:53:09.169561-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you" - }, - { - "time": "2024-07-03T10:53:09.216601-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing" - }, - { - "time": "2024-07-03T10:53:09.261113-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing," - }, - { - "time": "2024-07-03T10:53:09.306429-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I" - }, - { - "time": "2024-07-03T10:53:09.354951-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'" - }, - { - "time": "2024-07-03T10:53:09.401888-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm" - }, - { - "time": "2024-07-03T10:53:09.448467-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing" - }, - { - "time": "2024-07-03T10:53:09.493586-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great" - }, - { - "time": "2024-07-03T10:53:09.537745-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow" - }, - { - "time": "2024-07-03T10:53:09.585266-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly" - }, - { - "time": "2024-07-03T10:53:09.63097-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI" - }, - { - "time": "2024-07-03T10:53:09.678117-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool" - }, - { - "time": "2024-07-03T10:53:09.726398-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" - }, - { - "time": "2024-07-03T10:53:09.773449-04:00", - "callContext": { - "id": "1720018386", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callProgress", - "chatCompletionId": "1720018390", - "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" - }, - { - "time": "2024-07-03T10:53:09.774342-04:00", + "time": "2024-08-23T12:02:12.424283-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -1739,7 +636,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1720018390", + "chatCompletionId": "1724428932", "usage": { "promptTokens": 247, "completionTokens": 22, @@ -1760,9 +657,9 @@ } }, { - "time": "2024-07-03T10:53:09.774431-04:00", + "time": "2024-08-23T12:02:12.42432-04:00", "callContext": { - "id": "1720018386", + "id": "1724428928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, @@ -1797,7 +694,7 @@ "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" }, { - "time": "2024-07-03T10:53:09.774496-04:00", + "time": "2024-08-23T12:02:12.424439-04:00", "type": "runFinish", "usage": {} } From 2bd60f8bfd0bff7c62f62b2c5b4d5a9de102bd99 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 14 Oct 2024 15:02:03 -0400 Subject: [PATCH 159/270] test: regenerate gpt-4o smoke test golden files `gpt-4o-2024-08-06` smoke tests are failing because of outdated golden files. Update the golden files to get smoke tests passing again. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .../Bob/gpt-4o-2024-08-06-expected.json | 166 +++++------------- .../gpt-4o-2024-08-06-expected.json | 152 +++++----------- 2 files changed, 86 insertions(+), 232 deletions(-) diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json index ebce34be..3e89e10b 100644 --- a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json @@ -1,13 +1,13 @@ [ { - "time": "2024-08-23T11:30:01.577023-04:00", + "time": "2024-10-14T15:00:24.05439-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-08-23T11:30:01.577297-04:00", + "time": "2024-10-14T15:00:24.054825-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -41,9 +41,9 @@ "usage": {} }, { - "time": "2024-08-23T11:30:01.577466-04:00", + "time": "2024-10-14T15:00:24.054884-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -74,41 +74,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724427003", + "chatCompletionId": "1728932426", "usage": {}, "chatRequest": { - "model": "gpt-4o-2024-08-06", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T11:30:02.103572-04:00", + "time": "2024-10-14T15:00:25.474693-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -139,7 +115,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724427003", + "chatCompletionId": "1728932426", "usage": { "promptTokens": 138, "completionTokens": 17, @@ -151,7 +127,7 @@ { "toolCall": { "index": 0, - "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", "function": { "name": "bob", "arguments": "{\"question\":\"how are you doing\"}" @@ -167,9 +143,9 @@ } }, { - "time": "2024-08-23T11:30:02.103835-04:00", + "time": "2024-10-14T15:00:25.475061-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -200,7 +176,7 @@ "inputContext": null }, "toolSubCalls": { - "call_k5gUMCNtxH9gKglmCEkjfWAR": { + "call_eVgG6VS6jTHyYl4nPzwWqYnT": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\":\"how are you doing\"}" } @@ -209,9 +185,9 @@ "usage": {} }, { - "time": "2024-08-23T11:30:02.103996-04:00", + "time": "2024-10-14T15:00:25.475224-04:00", "callContext": { - "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -241,16 +217,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724427002" + "parentID": "1728932425" }, "type": "callStart", "usage": {}, "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-08-23T11:30:02.104177-04:00", + "time": "2024-10-14T15:00:25.475415-04:00", "callContext": { - "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -280,30 +256,20 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724427002" + "parentID": "1728932425" }, "type": "callChat", - "chatCompletionId": "1724427004", + "chatCompletionId": "1728932427", "usage": {}, "chatRequest": { - "model": "gpt-4o-2024-08-06", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nWhen asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"" - }, - { - "role": "user", - "content": "{\"question\":\"how are you doing\"}" - } - ], - "temperature": 0 + "model": "", + "messages": null } }, { - "time": "2024-08-23T11:30:02.567871-04:00", + "time": "2024-10-14T15:00:26.285181-04:00", "callContext": { - "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -333,10 +299,10 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724427002" + "parentID": "1728932425" }, "type": "callChat", - "chatCompletionId": "1724427004", + "chatCompletionId": "1728932427", "usage": { "promptTokens": 122, "completionTokens": 14, @@ -357,9 +323,9 @@ } }, { - "time": "2024-08-23T11:30:02.568006-04:00", + "time": "2024-10-14T15:00:26.285293-04:00", "callContext": { - "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", + "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -389,16 +355,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724427002" + "parentID": "1728932425" }, "type": "callFinish", "usage": {}, "content": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-08-23T11:30:02.568167-04:00", + "time": "2024-10-14T15:00:26.285444-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -433,9 +399,9 @@ "usage": {} }, { - "time": "2024-08-23T11:30:02.568274-04:00", + "time": "2024-10-14T15:00:26.285687-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -466,61 +432,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724427005", + "chatCompletionId": "1728932428", "usage": {}, "chatRequest": { - "model": "gpt-4o-2024-08-06", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "call_k5gUMCNtxH9gKglmCEkjfWAR", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!", - "name": "bob", - "tool_call_id": "call_k5gUMCNtxH9gKglmCEkjfWAR" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T11:30:03.052817-04:00", + "time": "2024-10-14T15:00:27.147422-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -551,7 +473,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724427005", + "chatCompletionId": "1728932428", "usage": { "promptTokens": 176, "completionTokens": 18, @@ -572,9 +494,9 @@ } }, { - "time": "2024-08-23T11:30:03.05287-04:00", + "time": "2024-10-14T15:00:27.147479-04:00", "callContext": { - "id": "1724427002", + "id": "1728932425", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -609,7 +531,7 @@ "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-08-23T11:30:03.052914-04:00", + "time": "2024-10-14T15:00:27.147523-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json index 23fe9c9d..694e021c 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json @@ -1,13 +1,13 @@ [ { - "time": "2024-08-23T10:47:06.912223-04:00", + "time": "2024-10-14T15:00:27.184787-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-08-23T10:47:06.912642-04:00", + "time": "2024-10-14T15:00:27.185109-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -41,9 +41,9 @@ "usage": {} }, { - "time": "2024-08-23T10:47:06.912689-04:00", + "time": "2024-10-14T15:00:27.185153-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -74,41 +74,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724424428", + "chatCompletionId": "1728932429", "usage": {}, "chatRequest": { - "model": "gpt-4o-2024-08-06", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T10:47:07.441483-04:00", + "time": "2024-10-14T15:00:28.310827-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -139,7 +115,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724424428", + "chatCompletionId": "1728932429", "usage": { "promptTokens": 138, "completionTokens": 17, @@ -151,7 +127,7 @@ { "toolCall": { "index": 0, - "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", "function": { "name": "bob", "arguments": "{\"question\":\"how are you doing\"}" @@ -167,9 +143,9 @@ } }, { - "time": "2024-08-23T10:47:07.441781-04:00", + "time": "2024-10-14T15:00:28.311081-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -200,7 +176,7 @@ "inputContext": null }, "toolSubCalls": { - "call_NjCLsJjY7PegUkXOqx5DA0o0": { + "call_c8HV8M6DRtYLd8MEHgaHAWtZ": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\":\"how are you doing\"}" } @@ -209,9 +185,9 @@ "usage": {} }, { - "time": "2024-08-23T10:47:07.441904-04:00", + "time": "2024-10-14T15:00:28.311186-04:00", "callContext": { - "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -241,7 +217,7 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724424427", + "parentID": "1728932428", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -249,9 +225,9 @@ "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-08-23T10:47:07.443055-04:00", + "time": "2024-10-14T15:00:28.312343-04:00", "callContext": { - "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -281,11 +257,11 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724424427", + "parentID": "1728932428", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1724424429", + "chatCompletionId": "1728932430", "usage": {}, "chatRequest": { "model": "", @@ -293,9 +269,9 @@ } }, { - "time": "2024-08-23T10:47:07.453557-04:00", + "time": "2024-10-14T15:00:28.328987-04:00", "callContext": { - "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -325,20 +301,20 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724424427", + "parentID": "1728932428", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1724424429", + "chatCompletionId": "1728932430", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-08-23T10:47:07.453768-04:00", + "time": "2024-10-14T15:00:28.329156-04:00", "callContext": { - "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", + "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -368,7 +344,7 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724424427", + "parentID": "1728932428", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -376,9 +352,9 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-08-23T10:47:07.453868-04:00", + "time": "2024-10-14T15:00:28.32924-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -413,9 +389,9 @@ "usage": {} }, { - "time": "2024-08-23T10:47:07.453942-04:00", + "time": "2024-10-14T15:00:28.329393-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -446,61 +422,17 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724424430", + "chatCompletionId": "1728932431", "usage": {}, "chatRequest": { - "model": "gpt-4o-2024-08-06", - "messages": [ - { - "role": "system", - "content": "\nYou are task oriented system.\nYou receive input from a user, process the input from the given instructions, and then output the result.\nYour objective is to provide consistent and correct results.\nYou do not need to explain the steps taken, only provide the result to the given instructions.\nYou are referred to as a tool.\nYou don't move to the next step until you have a result.\n\nAsk Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "call_NjCLsJjY7PegUkXOqx5DA0o0", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\":\"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", - "name": "bob", - "tool_call_id": "call_NjCLsJjY7PegUkXOqx5DA0o0" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T10:47:07.950943-04:00", + "time": "2024-10-14T15:00:29.437703-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -531,7 +463,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724424430", + "chatCompletionId": "1728932431", "usage": { "promptTokens": 178, "completionTokens": 17, @@ -552,9 +484,9 @@ } }, { - "time": "2024-08-23T10:47:07.951019-04:00", + "time": "2024-10-14T15:00:29.437754-04:00", "callContext": { - "id": "1724424427", + "id": "1728932428", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, @@ -589,7 +521,7 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-08-23T10:47:07.951093-04:00", + "time": "2024-10-14T15:00:29.437838-04:00", "type": "runFinish", "usage": {} } From 1292452875261b37f2b673b9e949daa417e9116e Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 14 Oct 2024 17:44:07 -0400 Subject: [PATCH 160/270] test: tweak smoke test tool bodies to standardize response text Tweak the tool bodies for smoke test GPTScripts to reduce ambiguity in the response. This prevents models -- like gpt-4o -- from doing things like failing to interpolate strings consistently between runs. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/tests/smoke/testdata/Bob/test.gpt | 4 ++-- pkg/tests/smoke/testdata/BobAsShell/test.gpt | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/tests/smoke/testdata/Bob/test.gpt b/pkg/tests/smoke/testdata/Bob/test.gpt index 20f533e2..535790df 100644 --- a/pkg/tests/smoke/testdata/Bob/test.gpt +++ b/pkg/tests/smoke/testdata/Bob/test.gpt @@ -1,10 +1,10 @@ tools: bob -Ask Bob "how are you doing" and repeat his reply exactly. +Ask Bob "how are you doing" and repeat the response text exactly as given without saying anything else. --- name: bob description: I'm Bob, a friendly guy. args: question: The question to ask Bob. -When asked how I am doing, respond with exactly "Thanks for asking "${QUESTION}", I'm doing great fellow friendly AI tool!" +When asked how I am doing, respond with the following exactly: "Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!" with ${QUESTION} replaced with the question text as given. diff --git a/pkg/tests/smoke/testdata/BobAsShell/test.gpt b/pkg/tests/smoke/testdata/BobAsShell/test.gpt index a0edb9c4..6fdc514b 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/test.gpt +++ b/pkg/tests/smoke/testdata/BobAsShell/test.gpt @@ -1,7 +1,6 @@ - tools: bob -Ask Bob "how are you doing" and repeat his reply exactly. +Ask Bob "how are you doing" and repeat the response text exactly as given without saying anything else. --- name: bob From b7d31f2c97bdddf027fd99b28dd93f7f9e86171a Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 14 Oct 2024 17:50:36 -0400 Subject: [PATCH 161/270] test: regenerate golden files for smoke tests Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .../claude-3-5-sonnet-20240620-expected.json | 122 +++++----- .../Bob/gpt-4o-2024-08-06-expected.json | 140 +++++------ .../Bob/gpt-4o-mini-2024-07-18-expected.json | 140 +++++------ .../Bob/mistral-large-2402-expected.json | 154 ++++++------- .../claude-3-5-sonnet-20240620-expected.json | 198 ++++++---------- .../gpt-4o-2024-08-06-expected.json | 116 +++++----- .../gpt-4o-mini-2024-07-18-expected.json | 116 +++++----- .../mistral-large-2402-expected.json | 218 ++++++------------ 8 files changed, 534 insertions(+), 670 deletions(-) diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json index 1b6342c4..52d975c5 100644 --- a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json +++ b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-10-14T12:30:37.766793-04:00", + "time": "2024-10-14T18:59:12.228692-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T12:30:37.767629-04:00", + "time": "2024-10-14T18:59:12.229038-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-10-14T12:30:38.791552-04:00", + "time": "2024-10-14T18:59:13.520962-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T12:30:38.791851-04:00", + "time": "2024-10-14T18:59:13.521331-04:00", "callContext": { - "id": "1728923439", + "id": "1728946754", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-10-14T12:30:39.80734-04:00", + "time": "2024-10-14T18:59:14.541348-04:00", "callContext": { - "id": "1728923439", + "id": "1728946754", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -138,24 +138,24 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10940" + "content": "http://127.0.0.1:10258" }, { - "time": "2024-10-14T12:30:39.80752-04:00", + "time": "2024-10-14T18:59:14.541518-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-10-14T12:30:39.807592-04:00", + "time": "2024-10-14T18:59:14.541566-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -179,7 +179,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728923440", + "chatCompletionId": "1728946755", "usage": {}, "chatRequest": { "model": "", @@ -187,16 +187,16 @@ } }, { - "time": "2024-10-14T12:30:41.840024-04:00", + "time": "2024-10-14T18:59:17.304351-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -220,7 +220,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728923440", + "chatCompletionId": "1728946755", "usage": {}, "chatResponse": { "role": "assistant", @@ -228,7 +228,7 @@ { "toolCall": { "index": 0, - "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -240,16 +240,16 @@ } }, { - "time": "2024-10-14T12:30:41.840092-04:00", + "time": "2024-10-14T18:59:17.304441-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -273,7 +273,7 @@ "inputContext": null }, "toolSubCalls": { - "toolu_01B2uNGCcfcK9K5oGmBeix8b": { + "toolu_01KtYnAwnQ2cyRieDu98Jopb": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -282,9 +282,9 @@ "usage": {} }, { - "time": "2024-10-14T12:30:41.840134-04:00", + "time": "2024-10-14T18:59:17.304485-04:00", "callContext": { - "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -299,7 +299,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -314,16 +314,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728923438" + "parentID": "1728946753" }, "type": "callStart", "usage": {}, "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-10-14T12:30:42.553374-04:00", + "time": "2024-10-14T18:59:17.394841-04:00", "callContext": { - "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -338,7 +338,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -353,10 +353,10 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728923438" + "parentID": "1728946753" }, "type": "callChat", - "chatCompletionId": "1728923441", + "chatCompletionId": "1728946756", "usage": {}, "chatRequest": { "model": "", @@ -364,9 +364,9 @@ } }, { - "time": "2024-10-14T12:30:43.320476-04:00", + "time": "2024-10-14T18:59:18.202926-04:00", "callContext": { - "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -381,7 +381,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -396,25 +396,25 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728923438" + "parentID": "1728946753" }, "type": "callChat", - "chatCompletionId": "1728923441", + "chatCompletionId": "1728946756", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": {} } }, { - "time": "2024-10-14T12:30:43.320527-04:00", + "time": "2024-10-14T18:59:18.202988-04:00", "callContext": { - "id": "toolu_01B2uNGCcfcK9K5oGmBeix8b", + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -429,7 +429,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -444,23 +444,23 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728923438" + "parentID": "1728946753" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T12:30:43.320565-04:00", + "time": "2024-10-14T18:59:18.203022-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -488,16 +488,16 @@ "usage": {} }, { - "time": "2024-10-14T12:30:43.676243-04:00", + "time": "2024-10-14T18:59:18.295164-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -521,7 +521,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728923442", + "chatCompletionId": "1728946757", "usage": {}, "chatRequest": { "model": "", @@ -529,16 +529,16 @@ } }, { - "time": "2024-10-14T12:30:45.165846-04:00", + "time": "2024-10-14T18:59:19.737028-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -562,29 +562,29 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728923442", + "chatCompletionId": "1728946757", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob's reply was: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": {} } }, { - "time": "2024-10-14T12:30:45.165883-04:00", + "time": "2024-10-14T18:59:19.737045-04:00", "callContext": { - "id": "1728923438", + "id": "1728946753", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -609,10 +609,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob's reply was: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T12:30:45.165904-04:00", + "time": "2024-10-14T18:59:19.737061-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json index 3e89e10b..67d9742b 100644 --- a/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-2024-08-06-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-10-14T15:00:24.05439-04:00", + "time": "2024-10-14T18:59:07.751937-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T15:00:24.054825-04:00", + "time": "2024-10-14T18:59:07.752324-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -41,16 +41,16 @@ "usage": {} }, { - "time": "2024-10-14T15:00:24.054884-04:00", + "time": "2024-10-14T18:59:07.75237-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -74,7 +74,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932426", + "chatCompletionId": "1728946749", "usage": {}, "chatRequest": { "model": "", @@ -82,16 +82,16 @@ } }, { - "time": "2024-10-14T15:00:25.474693-04:00", + "time": "2024-10-14T18:59:08.602251-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -115,11 +115,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932426", + "chatCompletionId": "1728946749", "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 }, "chatResponse": { "role": "assistant", @@ -127,7 +127,7 @@ { "toolCall": { "index": 0, - "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", + "id": "call_1nuYJNsE6SIQrXe4wyoMb8sh", "function": { "name": "bob", "arguments": "{\"question\":\"how are you doing\"}" @@ -136,23 +136,23 @@ } ], "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 } } }, { - "time": "2024-10-14T15:00:25.475061-04:00", + "time": "2024-10-14T18:59:08.602522-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -176,7 +176,7 @@ "inputContext": null }, "toolSubCalls": { - "call_eVgG6VS6jTHyYl4nPzwWqYnT": { + "call_1nuYJNsE6SIQrXe4wyoMb8sh": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\":\"how are you doing\"}" } @@ -185,9 +185,9 @@ "usage": {} }, { - "time": "2024-10-14T15:00:25.475224-04:00", + "time": "2024-10-14T18:59:08.602683-04:00", "callContext": { - "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", + "id": "call_1nuYJNsE6SIQrXe4wyoMb8sh", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -202,7 +202,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -217,16 +217,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932425" + "parentID": "1728946748" }, "type": "callStart", "usage": {}, "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-10-14T15:00:25.475415-04:00", + "time": "2024-10-14T18:59:08.602885-04:00", "callContext": { - "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", + "id": "call_1nuYJNsE6SIQrXe4wyoMb8sh", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -241,7 +241,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -256,10 +256,10 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932425" + "parentID": "1728946748" }, "type": "callChat", - "chatCompletionId": "1728932427", + "chatCompletionId": "1728946750", "usage": {}, "chatRequest": { "model": "", @@ -267,9 +267,9 @@ } }, { - "time": "2024-10-14T15:00:26.285181-04:00", + "time": "2024-10-14T18:59:09.291815-04:00", "callContext": { - "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", + "id": "call_1nuYJNsE6SIQrXe4wyoMb8sh", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -284,7 +284,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -299,33 +299,33 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932425" + "parentID": "1728946748" }, "type": "callChat", - "chatCompletionId": "1728932427", + "chatCompletionId": "1728946750", "usage": { - "promptTokens": 122, - "completionTokens": 14, - "totalTokens": 136 + "promptTokens": 137, + "completionTokens": 16, + "totalTokens": 153 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 122, - "completionTokens": 14, - "totalTokens": 136 + "promptTokens": 137, + "completionTokens": 16, + "totalTokens": 153 } } }, { - "time": "2024-10-14T15:00:26.285293-04:00", + "time": "2024-10-14T18:59:09.291883-04:00", "callContext": { - "id": "call_eVgG6VS6jTHyYl4nPzwWqYnT", + "id": "call_1nuYJNsE6SIQrXe4wyoMb8sh", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -340,7 +340,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -355,23 +355,23 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932425" + "parentID": "1728946748" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T15:00:26.285444-04:00", + "time": "2024-10-14T18:59:09.291934-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -399,16 +399,16 @@ "usage": {} }, { - "time": "2024-10-14T15:00:26.285687-04:00", + "time": "2024-10-14T18:59:09.292559-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -432,7 +432,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932428", + "chatCompletionId": "1728946751", "usage": {}, "chatRequest": { "model": "", @@ -440,16 +440,16 @@ } }, { - "time": "2024-10-14T15:00:27.147422-04:00", + "time": "2024-10-14T18:59:10.065468-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -473,37 +473,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932428", + "chatCompletionId": "1728946751", "usage": { - "promptTokens": 176, - "completionTokens": 18, - "totalTokens": 194 + "promptTokens": 185, + "completionTokens": 17, + "totalTokens": 202 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 176, - "completionTokens": 18, - "totalTokens": 194 + "promptTokens": 185, + "completionTokens": 17, + "totalTokens": 202 } } }, { - "time": "2024-10-14T15:00:27.147479-04:00", + "time": "2024-10-14T18:59:10.065547-04:00", "callContext": { - "id": "1728932425", + "id": "1728946748", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -528,10 +528,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T15:00:27.147523-04:00", + "time": "2024-10-14T18:59:10.065614-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json b/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json index 3c1aebc9..e01dec10 100644 --- a/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json +++ b/pkg/tests/smoke/testdata/Bob/gpt-4o-mini-2024-07-18-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-10-14T11:31:46.97662-04:00", + "time": "2024-10-14T18:59:01.651525-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T11:31:46.977148-04:00", + "time": "2024-10-14T18:59:01.651887-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -41,16 +41,16 @@ "usage": {} }, { - "time": "2024-10-14T11:31:46.977209-04:00", + "time": "2024-10-14T18:59:01.651929-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -74,7 +74,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919908", + "chatCompletionId": "1728946743", "usage": {}, "chatRequest": { "model": "", @@ -82,16 +82,16 @@ } }, { - "time": "2024-10-14T11:31:49.170338-04:00", + "time": "2024-10-14T18:59:05.893238-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -115,11 +115,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919908", + "chatCompletionId": "1728946743", "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 }, "chatResponse": { "role": "assistant", @@ -127,7 +127,7 @@ { "toolCall": { "index": 0, - "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "id": "call_mcJFw1oe8YYFRPD1ZvFR4uZb", "function": { "name": "bob", "arguments": "{\"question\":\"how are you doing\"}" @@ -136,23 +136,23 @@ } ], "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 } } }, { - "time": "2024-10-14T11:31:49.170563-04:00", + "time": "2024-10-14T18:59:05.893515-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -176,7 +176,7 @@ "inputContext": null }, "toolSubCalls": { - "call_qBi5ZvQ2pFwXdENJXmuCb6Oy": { + "call_mcJFw1oe8YYFRPD1ZvFR4uZb": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\":\"how are you doing\"}" } @@ -185,9 +185,9 @@ "usage": {} }, { - "time": "2024-10-14T11:31:49.171155-04:00", + "time": "2024-10-14T18:59:05.893776-04:00", "callContext": { - "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "id": "call_mcJFw1oe8YYFRPD1ZvFR4uZb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -202,7 +202,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -217,16 +217,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919907" + "parentID": "1728946742" }, "type": "callStart", "usage": {}, "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-10-14T11:31:49.171395-04:00", + "time": "2024-10-14T18:59:05.894101-04:00", "callContext": { - "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "id": "call_mcJFw1oe8YYFRPD1ZvFR4uZb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -241,7 +241,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -256,10 +256,10 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919907" + "parentID": "1728946742" }, "type": "callChat", - "chatCompletionId": "1728919909", + "chatCompletionId": "1728946744", "usage": {}, "chatRequest": { "model": "", @@ -267,9 +267,9 @@ } }, { - "time": "2024-10-14T11:31:50.446571-04:00", + "time": "2024-10-14T18:59:08.315365-04:00", "callContext": { - "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "id": "call_mcJFw1oe8YYFRPD1ZvFR4uZb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -284,7 +284,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -299,33 +299,33 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919907" + "parentID": "1728946742" }, "type": "callChat", - "chatCompletionId": "1728919909", + "chatCompletionId": "1728946744", "usage": { - "promptTokens": 122, - "completionTokens": 17, - "totalTokens": 139 + "promptTokens": 137, + "completionTokens": 16, + "totalTokens": 153 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 122, - "completionTokens": 17, - "totalTokens": 139 + "promptTokens": 137, + "completionTokens": 16, + "totalTokens": 153 } } }, { - "time": "2024-10-14T11:31:50.446692-04:00", + "time": "2024-10-14T18:59:08.315556-04:00", "callContext": { - "id": "call_qBi5ZvQ2pFwXdENJXmuCb6Oy", + "id": "call_mcJFw1oe8YYFRPD1ZvFR4uZb", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -340,7 +340,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -355,23 +355,23 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919907" + "parentID": "1728946742" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T11:31:50.446773-04:00", + "time": "2024-10-14T18:59:08.315661-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -399,16 +399,16 @@ "usage": {} }, { - "time": "2024-10-14T11:31:50.446939-04:00", + "time": "2024-10-14T18:59:08.315834-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -432,7 +432,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919910", + "chatCompletionId": "1728946745", "usage": {}, "chatRequest": { "model": "", @@ -440,16 +440,16 @@ } }, { - "time": "2024-10-14T11:31:52.118055-04:00", + "time": "2024-10-14T18:59:09.27109-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -473,37 +473,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919910", + "chatCompletionId": "1728946745", "usage": { - "promptTokens": 179, - "completionTokens": 18, - "totalTokens": 197 + "promptTokens": 185, + "completionTokens": 17, + "totalTokens": 202 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 179, - "completionTokens": 18, - "totalTokens": 197 + "promptTokens": 185, + "completionTokens": 17, + "totalTokens": 202 } } }, { - "time": "2024-10-14T11:31:52.118196-04:00", + "time": "2024-10-14T18:59:09.271259-04:00", "callContext": { - "id": "1728919907", + "id": "1728946742", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -528,10 +528,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T11:31:52.118256-04:00", + "time": "2024-10-14T18:59:09.271406-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json index c2971d57..ca392c03 100644 --- a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-10-14T12:20:24.700667-04:00", + "time": "2024-10-14T18:59:18.199427-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T12:20:24.701071-04:00", + "time": "2024-10-14T18:59:18.19975-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-10-14T12:20:25.518655-04:00", + "time": "2024-10-14T18:59:19.063682-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T12:20:25.518946-04:00", + "time": "2024-10-14T18:59:19.063951-04:00", "callContext": { - "id": "1728922826", + "id": "1728946760", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-10-14T12:20:26.534361-04:00", + "time": "2024-10-14T18:59:20.078127-04:00", "callContext": { - "id": "1728922826", + "id": "1728946760", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -138,24 +138,24 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:11149" + "content": "http://127.0.0.1:10912" }, { - "time": "2024-10-14T12:20:26.534546-04:00", + "time": "2024-10-14T18:59:20.078235-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-10-14T12:20:26.534598-04:00", + "time": "2024-10-14T18:59:20.078285-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -179,7 +179,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728922827", + "chatCompletionId": "1728946761", "usage": {}, "chatRequest": { "model": "", @@ -187,16 +187,16 @@ } }, { - "time": "2024-10-14T12:20:27.793767-04:00", + "time": "2024-10-14T18:59:21.857633-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -220,11 +220,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728922827", + "chatCompletionId": "1728946761", "usage": { - "promptTokens": 188, + "promptTokens": 195, "completionTokens": 23, - "totalTokens": 211 + "totalTokens": 218 }, "chatResponse": { "role": "assistant", @@ -232,7 +232,7 @@ { "toolCall": { "index": 0, - "id": "jSMVlVVyb", + "id": "pIj9ljPqt", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -241,23 +241,23 @@ } ], "usage": { - "promptTokens": 188, + "promptTokens": 195, "completionTokens": 23, - "totalTokens": 211 + "totalTokens": 218 } } }, { - "time": "2024-10-14T12:20:27.793996-04:00", + "time": "2024-10-14T18:59:21.858005-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -281,7 +281,7 @@ "inputContext": null }, "toolSubCalls": { - "jSMVlVVyb": { + "pIj9ljPqt": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -290,9 +290,9 @@ "usage": {} }, { - "time": "2024-10-14T12:20:27.794146-04:00", + "time": "2024-10-14T18:59:21.858212-04:00", "callContext": { - "id": "jSMVlVVyb", + "id": "pIj9ljPqt", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -307,7 +307,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -322,16 +322,16 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728922825" + "parentID": "1728946759" }, "type": "callStart", "usage": {}, "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-10-14T12:20:28.306793-04:00", + "time": "2024-10-14T18:59:22.381191-04:00", "callContext": { - "id": "jSMVlVVyb", + "id": "pIj9ljPqt", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -346,7 +346,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -361,10 +361,10 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728922825" + "parentID": "1728946759" }, "type": "callChat", - "chatCompletionId": "1728922828", + "chatCompletionId": "1728946762", "usage": {}, "chatRequest": { "model": "", @@ -372,9 +372,9 @@ } }, { - "time": "2024-10-14T12:20:29.060571-04:00", + "time": "2024-10-14T18:59:23.160275-04:00", "callContext": { - "id": "jSMVlVVyb", + "id": "pIj9ljPqt", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -389,7 +389,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -404,33 +404,33 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728922825" + "parentID": "1728946759" }, "type": "callChat", - "chatCompletionId": "1728922828", + "chatCompletionId": "1728946762", "usage": { - "promptTokens": 145, - "completionTokens": 19, - "totalTokens": 164 + "promptTokens": 163, + "completionTokens": 18, + "totalTokens": 181 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 145, - "completionTokens": 19, - "totalTokens": 164 + "promptTokens": 163, + "completionTokens": 18, + "totalTokens": 181 } } }, { - "time": "2024-10-14T12:20:29.060766-04:00", + "time": "2024-10-14T18:59:23.160433-04:00", "callContext": { - "id": "jSMVlVVyb", + "id": "pIj9ljPqt", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -445,7 +445,7 @@ }, "type": "object" }, - "instructions": "When asked how I am doing, respond with exactly \"Thanks for asking \"${QUESTION}\", I'm doing great fellow friendly AI tool!\"", + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", "id": "testdata/Bob/test.gpt:bob", "localTools": { "": "testdata/Bob/test.gpt:", @@ -460,23 +460,23 @@ "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728922825" + "parentID": "1728946759" }, "type": "callFinish", "usage": {}, - "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T12:20:29.060906-04:00", + "time": "2024-10-14T18:59:23.160522-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -504,16 +504,16 @@ "usage": {} }, { - "time": "2024-10-14T12:20:29.203429-04:00", + "time": "2024-10-14T18:59:23.531261-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -537,7 +537,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728922829", + "chatCompletionId": "1728946763", "usage": {}, "chatRequest": { "model": "", @@ -545,16 +545,16 @@ } }, { - "time": "2024-10-14T12:20:30.272631-04:00", + "time": "2024-10-14T18:59:24.303745-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -578,37 +578,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728922829", + "chatCompletionId": "1728946763", "usage": { - "promptTokens": 246, - "completionTokens": 23, - "totalTokens": 269 + "promptTokens": 252, + "completionTokens": 18, + "totalTokens": 270 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 246, - "completionTokens": 23, - "totalTokens": 269 + "promptTokens": 252, + "completionTokens": 18, + "totalTokens": 270 } } }, { - "time": "2024-10-14T12:20:30.27277-04:00", + "time": "2024-10-14T18:59:24.303903-04:00", "callContext": { - "id": "1728922825", + "id": "1728946759", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/Bob/test.gpt:", "toolMapping": { "bob": [ @@ -633,10 +633,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said, \"Thanks for asking 'how are you doing', I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T12:20:30.27283-04:00", + "time": "2024-10-14T18:59:24.303961-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json index 3a2838ab..22fe9514 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-08-23T12:02:17.549859-04:00", + "time": "2024-10-14T17:38:39.518668-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-08-23T12:02:17.55023-04:00", + "time": "2024-10-14T17:38:39.519079-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-08-23T12:02:18.283201-04:00", + "time": "2024-10-14T17:38:40.155982-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-08-23T12:02:18.28339-04:00", + "time": "2024-10-14T17:38:40.156405-04:00", "callContext": { - "id": "1724428939", + "id": "1728941921", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -64,7 +64,7 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-08-23T12:02:19.295369-04:00", + "time": "2024-10-14T17:38:41.173004-04:00", "callContext": { - "id": "1724428939", + "id": "1728941921", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", @@ -111,7 +111,7 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, @@ -138,24 +138,24 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10739" + "content": "http://127.0.0.1:10787" }, { - "time": "2024-08-23T12:02:19.295542-04:00", + "time": "2024-10-14T17:38:41.173175-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-08-23T12:02:19.295604-04:00", + "time": "2024-10-14T17:38:41.173247-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -179,48 +179,24 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428940", + "chatCompletionId": "1728941922", "usage": {}, "chatRequest": { - "model": "claude-3-5-sonnet-20240620", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T12:02:21.136785-04:00", + "time": "2024-10-14T17:38:43.937061-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -244,7 +220,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428940", + "chatCompletionId": "1728941922", "usage": {}, "chatResponse": { "role": "assistant", @@ -252,7 +228,7 @@ { "toolCall": { "index": 0, - "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -264,16 +240,16 @@ } }, { - "time": "2024-08-23T12:02:21.136848-04:00", + "time": "2024-10-14T17:38:43.937155-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -297,7 +273,7 @@ "inputContext": null }, "toolSubCalls": { - "toolu_01XzHFJpwHD8hzowvAqGgfSz": { + "toolu_01PQYSGxbwRLw8XuUUkgKvbe": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -306,9 +282,9 @@ "usage": {} }, { - "time": "2024-08-23T12:02:21.136877-04:00", + "time": "2024-10-14T17:38:43.937193-04:00", "callContext": { - "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -331,14 +307,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428938", + "parentID": "1728941920", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -346,9 +322,9 @@ "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-08-23T12:02:21.137223-04:00", + "time": "2024-10-14T17:38:43.938264-04:00", "callContext": { - "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -371,18 +347,18 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428938", + "parentID": "1728941920", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1724428941", + "chatCompletionId": "1728941923", "usage": {}, "chatRequest": { "model": "", @@ -390,9 +366,9 @@ } }, { - "time": "2024-08-23T12:02:21.142624-04:00", + "time": "2024-10-14T17:38:43.943625-04:00", "callContext": { - "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -415,27 +391,27 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428938", + "parentID": "1728941920", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1724428941", + "chatCompletionId": "1728941923", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-08-23T12:02:21.142691-04:00", + "time": "2024-10-14T17:38:43.943703-04:00", "callContext": { - "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -458,14 +434,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428938", + "parentID": "1728941920", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -473,16 +449,16 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-08-23T12:02:21.142723-04:00", + "time": "2024-10-14T17:38:43.943766-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -510,16 +486,16 @@ "usage": {} }, { - "time": "2024-08-23T12:02:21.371211-04:00", + "time": "2024-10-14T17:38:44.494388-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -543,68 +519,24 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428942", + "chatCompletionId": "1728941924", "usage": {}, "chatRequest": { - "model": "claude-3-5-sonnet-20240620", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "toolu_01XzHFJpwHD8hzowvAqGgfSz", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", - "name": "bob", - "tool_call_id": "toolu_01XzHFJpwHD8hzowvAqGgfSz" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T12:02:23.102371-04:00", + "time": "2024-10-14T17:38:45.659797-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -628,29 +560,29 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428942", + "chatCompletionId": "1728941924", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob's reply was: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" } ], "usage": {} } }, { - "time": "2024-08-23T12:02:23.102422-04:00", + "time": "2024-10-14T17:38:45.659891-04:00", "callContext": { - "id": "1724428938", + "id": "1728941920", "tool": { "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -675,10 +607,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob's reply was: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-08-23T12:02:23.102441-04:00", + "time": "2024-10-14T17:38:45.659921-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json index 694e021c..91b6a636 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-2024-08-06-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-10-14T15:00:27.184787-04:00", + "time": "2024-10-14T17:38:25.173529-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T15:00:27.185109-04:00", + "time": "2024-10-14T17:38:25.174285-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -41,16 +41,16 @@ "usage": {} }, { - "time": "2024-10-14T15:00:27.185153-04:00", + "time": "2024-10-14T17:38:25.174351-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -74,7 +74,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932429", + "chatCompletionId": "1728941907", "usage": {}, "chatRequest": { "model": "", @@ -82,16 +82,16 @@ } }, { - "time": "2024-10-14T15:00:28.310827-04:00", + "time": "2024-10-14T17:38:26.165696-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -115,11 +115,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932429", + "chatCompletionId": "1728941907", "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 }, "chatResponse": { "role": "assistant", @@ -127,7 +127,7 @@ { "toolCall": { "index": 0, - "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", + "id": "call_95p8Knb4mdiEgxt5iXxnxOKC", "function": { "name": "bob", "arguments": "{\"question\":\"how are you doing\"}" @@ -136,23 +136,23 @@ } ], "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 } } }, { - "time": "2024-10-14T15:00:28.311081-04:00", + "time": "2024-10-14T17:38:26.165858-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -176,7 +176,7 @@ "inputContext": null }, "toolSubCalls": { - "call_c8HV8M6DRtYLd8MEHgaHAWtZ": { + "call_95p8Knb4mdiEgxt5iXxnxOKC": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\":\"how are you doing\"}" } @@ -185,9 +185,9 @@ "usage": {} }, { - "time": "2024-10-14T15:00:28.311186-04:00", + "time": "2024-10-14T17:38:26.165964-04:00", "callContext": { - "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", + "id": "call_95p8Knb4mdiEgxt5iXxnxOKC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -210,14 +210,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932428", + "parentID": "1728941906", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -225,9 +225,9 @@ "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-10-14T15:00:28.312343-04:00", + "time": "2024-10-14T17:38:26.167235-04:00", "callContext": { - "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", + "id": "call_95p8Knb4mdiEgxt5iXxnxOKC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -250,18 +250,18 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932428", + "parentID": "1728941906", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1728932430", + "chatCompletionId": "1728941908", "usage": {}, "chatRequest": { "model": "", @@ -269,9 +269,9 @@ } }, { - "time": "2024-10-14T15:00:28.328987-04:00", + "time": "2024-10-14T17:38:26.178131-04:00", "callContext": { - "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", + "id": "call_95p8Knb4mdiEgxt5iXxnxOKC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -294,27 +294,27 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932428", + "parentID": "1728941906", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1728932430", + "chatCompletionId": "1728941908", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-10-14T15:00:28.329156-04:00", + "time": "2024-10-14T17:38:26.178199-04:00", "callContext": { - "id": "call_c8HV8M6DRtYLd8MEHgaHAWtZ", + "id": "call_95p8Knb4mdiEgxt5iXxnxOKC", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -337,14 +337,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728932428", + "parentID": "1728941906", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -352,16 +352,16 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-10-14T15:00:28.32924-04:00", + "time": "2024-10-14T17:38:26.178356-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -389,16 +389,16 @@ "usage": {} }, { - "time": "2024-10-14T15:00:28.329393-04:00", + "time": "2024-10-14T17:38:26.178539-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -422,7 +422,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932431", + "chatCompletionId": "1728941909", "usage": {}, "chatRequest": { "model": "", @@ -430,16 +430,16 @@ } }, { - "time": "2024-10-14T15:00:29.437703-04:00", + "time": "2024-10-14T17:38:27.001877-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -463,11 +463,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728932431", + "chatCompletionId": "1728941909", "usage": { - "promptTokens": 178, + "promptTokens": 185, "completionTokens": 17, - "totalTokens": 195 + "totalTokens": 202 }, "chatResponse": { "role": "assistant", @@ -477,23 +477,23 @@ } ], "usage": { - "promptTokens": 178, + "promptTokens": 185, "completionTokens": 17, - "totalTokens": 195 + "totalTokens": 202 } } }, { - "time": "2024-10-14T15:00:29.437754-04:00", + "time": "2024-10-14T17:38:27.001903-04:00", "callContext": { - "id": "1728932428", + "id": "1728941906", "tool": { "modelName": "gpt-4o-2024-08-06", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -521,7 +521,7 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T15:00:29.437838-04:00", + "time": "2024-10-14T17:38:27.001932-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json index 6354054d..bdfee87d 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/gpt-4o-mini-2024-07-18-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-10-14T11:31:52.152013-04:00", + "time": "2024-10-14T17:37:54.379122-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-10-14T11:31:52.152428-04:00", + "time": "2024-10-14T17:37:54.379631-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -41,16 +41,16 @@ "usage": {} }, { - "time": "2024-10-14T11:31:52.152473-04:00", + "time": "2024-10-14T17:37:54.379682-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -74,7 +74,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919914", + "chatCompletionId": "1728941876", "usage": {}, "chatRequest": { "model": "", @@ -82,16 +82,16 @@ } }, { - "time": "2024-10-14T11:31:54.107362-04:00", + "time": "2024-10-14T17:37:55.230509-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -115,11 +115,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919914", + "chatCompletionId": "1728941876", "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 }, "chatResponse": { "role": "assistant", @@ -127,7 +127,7 @@ { "toolCall": { "index": 0, - "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "id": "call_FInUoOxKSR90EOxzIHXivvSX", "function": { "name": "bob", "arguments": "{\"question\":\"how are you doing\"}" @@ -136,23 +136,23 @@ } ], "usage": { - "promptTokens": 138, + "promptTokens": 145, "completionTokens": 17, - "totalTokens": 155 + "totalTokens": 162 } } }, { - "time": "2024-10-14T11:31:54.107585-04:00", + "time": "2024-10-14T17:37:55.23069-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -176,7 +176,7 @@ "inputContext": null }, "toolSubCalls": { - "call_AmrlGivMXtyAzbP85T7lwFN9": { + "call_FInUoOxKSR90EOxzIHXivvSX": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\":\"how are you doing\"}" } @@ -185,9 +185,9 @@ "usage": {} }, { - "time": "2024-10-14T11:31:54.107715-04:00", + "time": "2024-10-14T17:37:55.230816-04:00", "callContext": { - "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "id": "call_FInUoOxKSR90EOxzIHXivvSX", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -210,14 +210,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919913", + "parentID": "1728941875", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -225,9 +225,9 @@ "content": "{\"question\":\"how are you doing\"}" }, { - "time": "2024-10-14T11:31:54.108876-04:00", + "time": "2024-10-14T17:37:55.231913-04:00", "callContext": { - "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "id": "call_FInUoOxKSR90EOxzIHXivvSX", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -250,18 +250,18 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919913", + "parentID": "1728941875", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1728919915", + "chatCompletionId": "1728941877", "usage": {}, "chatRequest": { "model": "", @@ -269,9 +269,9 @@ } }, { - "time": "2024-10-14T11:31:54.121327-04:00", + "time": "2024-10-14T17:37:55.245261-04:00", "callContext": { - "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "id": "call_FInUoOxKSR90EOxzIHXivvSX", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -294,27 +294,27 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919913", + "parentID": "1728941875", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1728919915", + "chatCompletionId": "1728941877", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-10-14T11:31:54.121533-04:00", + "time": "2024-10-14T17:37:55.245348-04:00", "callContext": { - "id": "call_AmrlGivMXtyAzbP85T7lwFN9", + "id": "call_FInUoOxKSR90EOxzIHXivvSX", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -337,14 +337,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1728919913", + "parentID": "1728941875", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -352,16 +352,16 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-10-14T11:31:54.121596-04:00", + "time": "2024-10-14T17:37:55.245528-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -389,16 +389,16 @@ "usage": {} }, { - "time": "2024-10-14T11:31:54.121802-04:00", + "time": "2024-10-14T17:37:55.245751-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -422,7 +422,7 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919916", + "chatCompletionId": "1728941878", "usage": {}, "chatRequest": { "model": "", @@ -430,16 +430,16 @@ } }, { - "time": "2024-10-14T11:31:55.746879-04:00", + "time": "2024-10-14T17:37:56.345692-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -463,11 +463,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1728919916", + "chatCompletionId": "1728941878", "usage": { - "promptTokens": 178, + "promptTokens": 185, "completionTokens": 17, - "totalTokens": 195 + "totalTokens": 202 }, "chatResponse": { "role": "assistant", @@ -477,23 +477,23 @@ } ], "usage": { - "promptTokens": 178, + "promptTokens": 185, "completionTokens": 17, - "totalTokens": 195 + "totalTokens": 202 } } }, { - "time": "2024-10-14T11:31:55.746965-04:00", + "time": "2024-10-14T17:37:56.345742-04:00", "callContext": { - "id": "1728919913", + "id": "1728941875", "tool": { "modelName": "gpt-4o-mini-2024-07-18", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -521,7 +521,7 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-10-14T11:31:55.747227-04:00", + "time": "2024-10-14T17:37:56.345803-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json index 6dc41a08..4506754b 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json @@ -1,20 +1,20 @@ [ { - "time": "2024-08-23T12:02:07.951538-04:00", + "time": "2024-10-14T17:38:47.018065-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-08-23T12:02:07.951742-04:00", + "time": "2024-10-14T17:38:47.018394-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -41,14 +41,14 @@ "usage": {} }, { - "time": "2024-08-23T12:02:08.65216-04:00", + "time": "2024-10-14T17:38:47.47198-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-08-23T12:02:08.65229-04:00", + "time": "2024-10-14T17:38:47.472449-04:00", "callContext": { - "id": "1724428929", + "id": "1728941929", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -64,7 +64,7 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, @@ -93,9 +93,9 @@ "usage": {} }, { - "time": "2024-08-23T12:02:09.659962-04:00", + "time": "2024-10-14T17:38:50.566081-04:00", "callContext": { - "id": "1724428929", + "id": "1728941929", "tool": { "name": "Mistral La Plateforme Provider", "description": "Model provider for Mistral models running on La Plateforme", @@ -111,7 +111,7 @@ "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ { "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/37e5f870e195b896438c0bc35867403a42f82e89/tool.gpt:token" + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" } ] }, @@ -138,24 +138,24 @@ }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:10532" + "content": "http://127.0.0.1:11133" }, { - "time": "2024-08-23T12:02:09.66007-04:00", + "time": "2024-10-14T17:38:50.56681-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-08-23T12:02:09.660117-04:00", + "time": "2024-10-14T17:38:50.567218-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -179,48 +179,24 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428930", + "chatCompletionId": "1728941930", "usage": {}, "chatRequest": { - "model": "mistral-large-2402", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T12:02:11.140706-04:00", + "time": "2024-10-14T17:38:51.51096-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -244,11 +220,11 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428930", + "chatCompletionId": "1728941930", "usage": { - "promptTokens": 188, + "promptTokens": 195, "completionTokens": 23, - "totalTokens": 211 + "totalTokens": 218 }, "chatResponse": { "role": "assistant", @@ -256,7 +232,7 @@ { "toolCall": { "index": 0, - "id": "r1wQzUugN", + "id": "KLMoUpwIL", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -265,23 +241,23 @@ } ], "usage": { - "promptTokens": 188, + "promptTokens": 195, "completionTokens": 23, - "totalTokens": 211 + "totalTokens": 218 } } }, { - "time": "2024-08-23T12:02:11.140968-04:00", + "time": "2024-10-14T17:38:51.511569-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -305,7 +281,7 @@ "inputContext": null }, "toolSubCalls": { - "r1wQzUugN": { + "KLMoUpwIL": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -314,9 +290,9 @@ "usage": {} }, { - "time": "2024-08-23T12:02:11.141094-04:00", + "time": "2024-10-14T17:38:51.511777-04:00", "callContext": { - "id": "r1wQzUugN", + "id": "KLMoUpwIL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -339,14 +315,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428928", + "parentID": "1728941928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -354,9 +330,9 @@ "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-08-23T12:02:11.141978-04:00", + "time": "2024-10-14T17:38:51.513152-04:00", "callContext": { - "id": "r1wQzUugN", + "id": "KLMoUpwIL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -379,18 +355,18 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428928", + "parentID": "1728941928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1724428931", + "chatCompletionId": "1728941931", "usage": {}, "chatRequest": { "model": "", @@ -398,9 +374,9 @@ } }, { - "time": "2024-08-23T12:02:11.153328-04:00", + "time": "2024-10-14T17:38:51.528154-04:00", "callContext": { - "id": "r1wQzUugN", + "id": "KLMoUpwIL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -423,27 +399,27 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428928", + "parentID": "1728941928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1724428931", + "chatCompletionId": "1728941931", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-08-23T12:02:11.153471-04:00", + "time": "2024-10-14T17:38:51.528298-04:00", "callContext": { - "id": "r1wQzUugN", + "id": "KLMoUpwIL", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", @@ -466,14 +442,14 @@ }, "source": { "location": "testdata/BobAsShell/test.gpt", - "lineNo": 7 + "lineNo": 6 }, "workingDir": "testdata/BobAsShell" }, "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1724428928", + "parentID": "1728941928", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -481,16 +457,16 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-08-23T12:02:11.153544-04:00", + "time": "2024-10-14T17:38:51.528421-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -518,16 +494,16 @@ "usage": {} }, { - "time": "2024-08-23T12:02:11.41447-04:00", + "time": "2024-10-14T17:38:51.894619-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -551,68 +527,24 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428932", + "chatCompletionId": "1728941932", "usage": {}, "chatRequest": { - "model": "mistral-large-2402", - "messages": [ - { - "role": "system", - "content": "Ask Bob \"how are you doing\" and repeat his reply exactly." - }, - { - "role": "assistant", - "content": "", - "tool_calls": [ - { - "id": "r1wQzUugN", - "type": "function", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - ] - }, - { - "role": "tool", - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", - "name": "bob", - "tool_call_id": "r1wQzUugN" - } - ], - "temperature": 0, - "tools": [ - { - "type": "function", - "function": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "parameters": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - } - } - } - ] + "model": "", + "messages": null } }, { - "time": "2024-08-23T12:02:12.424283-04:00", + "time": "2024-10-14T17:38:52.586731-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -636,37 +568,37 @@ "inputContext": null }, "type": "callChat", - "chatCompletionId": "1724428932", + "chatCompletionId": "1728941932", "usage": { - "promptTokens": 247, - "completionTokens": 22, - "totalTokens": 269 + "promptTokens": 254, + "completionTokens": 18, + "totalTokens": 272 }, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" } ], "usage": { - "promptTokens": 247, - "completionTokens": 22, - "totalTokens": 269 + "promptTokens": 254, + "completionTokens": 18, + "totalTokens": 272 } } }, { - "time": "2024-08-23T12:02:12.42432-04:00", + "time": "2024-10-14T17:38:52.587128-04:00", "callContext": { - "id": "1724428928", + "id": "1728941928", "tool": { "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", "internalPrompt": null, "tools": [ "bob" ], - "instructions": "Ask Bob \"how are you doing\" and repeat his reply exactly.", + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", "id": "testdata/BobAsShell/test.gpt:", "toolMapping": { "bob": [ @@ -691,10 +623,10 @@ }, "type": "callFinish", "usage": {}, - "content": "Bob said, \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-08-23T12:02:12.424439-04:00", + "time": "2024-10-14T17:38:52.587221-04:00", "type": "runFinish", "usage": {} } From 09271257ff56354309113d435fb80f02f3692739 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 15 Oct 2024 16:29:50 -0400 Subject: [PATCH 162/270] feat: add workspace API to SDK (#872) This change also changes the error behavior when running tools that are simply wrapped commands. Previously, all such tools would not return an error, rather an error message in hopes that the LLM would retry. However, if the tool is just a command (i.e. has no parent), then it should return an error so that the caller doesn't have to guess whether an error occurred. Signed-off-by: Donnie Adams --- pkg/engine/cmd.go | 5 +- pkg/sdkserver/datasets.go | 10 +- pkg/sdkserver/routes.go | 9 + pkg/sdkserver/workspaces.go | 328 ++++++++++++++++++++++++++++++++++++ 4 files changed, 345 insertions(+), 7 deletions(-) create mode 100644 pkg/sdkserver/workspaces.go diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 5b27a579..1dcdaff0 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -154,12 +154,13 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate result = stdout if err := cmd.Run(); err != nil { - if toolCategory == NoCategory { + if toolCategory == NoCategory && ctx.Parent != nil { + // If this is a sub-call, then don't return the error; return the error as a message so that the LLM can retry. return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) combinedOutput = stdoutAndErr.String() - return "", fmt.Errorf("ERROR: %s: %w", result, err) + return "", fmt.Errorf("ERROR: %s: %w", stdoutAndErr, err) } combinedOutput = stdoutAndErr.String() diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index 0085132c..a65566a4 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -62,7 +62,7 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), "List Datasets from "+req.getToolRepo(), "", loader.Options{ + prg, err := loader.Program(r.Context(), req.getToolRepo(), "List Datasets", loader.Options{ Cache: g.Cache, }) @@ -123,7 +123,7 @@ func (s *server) createDataset(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), "Create Dataset from "+req.getToolRepo(), "", loader.Options{ + prg, err := loader.Program(r.Context(), req.getToolRepo(), "Create Dataset", loader.Options{ Cache: g.Cache, }) @@ -192,7 +192,7 @@ func (s *server) addDatasetElement(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), "Add Element from "+req.getToolRepo(), "", loader.Options{ + prg, err := loader.Program(r.Context(), req.getToolRepo(), "Add Element", loader.Options{ Cache: g.Cache, }) if err != nil { @@ -251,7 +251,7 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), "List Elements from "+req.getToolRepo(), "", loader.Options{ + prg, err := loader.Program(r.Context(), req.getToolRepo(), "List Elements", loader.Options{ Cache: g.Cache, }) if err != nil { @@ -314,7 +314,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), "Get Element from "+req.getToolRepo(), "", loader.Options{ + prg, err := loader.Program(r.Context(), req.getToolRepo(), "Get Element", loader.Options{ Cache: g.Cache, }) if err != nil { diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 8427a6a5..894823b3 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -72,6 +72,15 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /datasets/list-elements", s.listDatasetElements) mux.HandleFunc("POST /datasets/get-element", s.getDatasetElement) mux.HandleFunc("POST /datasets/add-element", s.addDatasetElement) + + mux.HandleFunc("POST /workspaces/create", s.createWorkspace) + mux.HandleFunc("POST /workspaces/delete", s.deleteWorkspace) + mux.HandleFunc("POST /workspaces/list", s.listWorkspaceContents) + mux.HandleFunc("POST /workspaces/mkdir", s.mkDirInWorkspace) + mux.HandleFunc("POST /workspaces/rmdir", s.rmDirInWorkspace) + mux.HandleFunc("POST /workspaces/write-file", s.writeFileInWorkspace) + mux.HandleFunc("POST /workspaces/delete-file", s.removeFileInWorkspace) + mux.HandleFunc("POST /workspaces/read-file", s.readFileInWorkspace) } // health just provides an endpoint for checking whether the server is running and accessible. diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go new file mode 100644 index 00000000..c69a6ae6 --- /dev/null +++ b/pkg/sdkserver/workspaces.go @@ -0,0 +1,328 @@ +package sdkserver + +import ( + "encoding/json" + "fmt" + "net/http" + + gcontext "github.com/gptscript-ai/gptscript/pkg/context" + "github.com/gptscript-ai/gptscript/pkg/loader" +) + +type workspaceCommonRequest struct { + ID string `json:"id"` + WorkspaceToolRepo string `json:"workspaceToolRepo"` +} + +func (w workspaceCommonRequest) getToolRepo() string { + if w.WorkspaceToolRepo != "" { + return w.WorkspaceToolRepo + } + return "github.com/gptscript-ai/workspace-provider" +} + +type createWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + ProviderType string `json:"providerType"` +} + +func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject createWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Create Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + if reqObject.ProviderType == "" { + reqObject.ProviderType = "directory" + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"provider": "%s"}`, + reqObject.ProviderType, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type deleteWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` +} + +func (s *server) deleteWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject deleteWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Delete Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s"}`, + reqObject.ID, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type listWorkspaceContentsRequest struct { + workspaceCommonRequest `json:",inline"` + ID string `json:"id"` + SubDir string `json:"subDir"` + NonRecursive bool `json:"nonRecursive"` + ExcludeHidden bool `json:"excludeHidden"` + JSON bool `json:"json"` +} + +func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject listWorkspaceContentsRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "List Workspace Contents", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "ls_sub_dir": "%s", "ls_non_recursive": %t, "ls_exclude_hidden": %t, "ls_json": %t}`, + reqObject.ID, reqObject.SubDir, reqObject.NonRecursive, reqObject.ExcludeHidden, reqObject.JSON, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type mkDirRequest struct { + workspaceCommonRequest `json:",inline"` + DirectoryName string `json:"directoryName"` + IgnoreExists bool `json:"ignoreExists"` + CreateDirs bool `json:"createDirs"` +} + +func (s *server) mkDirInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject mkDirRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Create Directory In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "directory_name": "%s", "mk_dir_ignore_exists": %t, "mk_dir_create_dirs": %t}`, + reqObject.ID, reqObject.DirectoryName, reqObject.IgnoreExists, reqObject.CreateDirs, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type rmDirRequest struct { + workspaceCommonRequest `json:",inline"` + DirectoryName string `json:"directoryName"` + IgnoreNotFound bool `json:"ignoreNotFound"` + MustBeEmpty bool `json:"mustBeEmpty"` +} + +func (s *server) rmDirInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject rmDirRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Remove Directory In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "directory_name": "%s", "ignore_not_found": %t, "rm_dir_must_be_empty": %t}`, + reqObject.ID, reqObject.DirectoryName, reqObject.IgnoreNotFound, reqObject.MustBeEmpty, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type writeFileInWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` + Contents string `json:"contents"` + Base64EncodedInput bool `json:"base64EncodedInput"` + MustNotExist bool `json:"mustNotExist"` + CreateDirs bool `json:"createDirs"` + WithoutCreate bool `json:"withoutCreate"` +} + +func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject writeFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Write File In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s", "file_contents": "%s", "write_file_must_not_exist": %t, "write_file_create_dirs": %t, "write_file_without_create": %t, "write_file_base64_encoded_input": %t}`, + reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.MustNotExist, reqObject.CreateDirs, reqObject.WithoutCreate, reqObject.Base64EncodedInput, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type rmFileInWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` + IgnoreNotFound bool `json:"ignoreNotFound"` +} + +func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject rmFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Remove File In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s", "ignore_not_found": %t}`, + reqObject.ID, reqObject.FilePath, reqObject.IgnoreNotFound, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type readFileInWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` + Base64EncodeOutput bool `json:"base64EncodeOutput"` +} + +func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject readFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Read File In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.gptscriptOpts.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s", "read_file_base64_encode_output": %t}`, + reqObject.ID, reqObject.FilePath, reqObject.Base64EncodeOutput, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} From 419ccbbdfc1882b5d40f111393e9ec3a322cd19b Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 16 Oct 2024 11:03:13 -0400 Subject: [PATCH 163/270] fix: add ability to create workspaces from other workspaces (#877) Signed-off-by: Donnie Adams --- pkg/engine/cmd.go | 2 +- pkg/sdkserver/workspaces.go | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 1dcdaff0..9ef6e834 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -223,7 +223,7 @@ func appendInputAsEnv(env []string, input string) []string { newEnv = appendEnv(newEnv, "GPTSCRIPT_INPUT", input) - if err := json.Unmarshal([]byte(input), &data); err != nil { + if err := dec.Decode(&data); err != nil { // ignore invalid JSON return newEnv } diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index c69a6ae6..ef73b347 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -4,26 +4,29 @@ import ( "encoding/json" "fmt" "net/http" + "strings" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/loader" ) type workspaceCommonRequest struct { - ID string `json:"id"` - WorkspaceToolRepo string `json:"workspaceToolRepo"` + ID string `json:"id"` + WorkspaceTool string `json:"workspaceTool"` } func (w workspaceCommonRequest) getToolRepo() string { - if w.WorkspaceToolRepo != "" { - return w.WorkspaceToolRepo + if w.WorkspaceTool != "" { + return w.WorkspaceTool } return "github.com/gptscript-ai/workspace-provider" } type createWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` - ProviderType string `json:"providerType"` + ProviderType string `json:"providerType"` + DirectoryDataHome string `json:"directoryDataHome"` + FromWorkspaceIDs []string `json:"fromWorkspaceIDs"` } func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { @@ -49,8 +52,8 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { prg, s.gptscriptOpts.Env, fmt.Sprintf( - `{"provider": "%s"}`, - reqObject.ProviderType, + `{"provider": "%s", "data_home": "%s", "workspace_ids": "%s"}`, + reqObject.ProviderType, reqObject.DirectoryDataHome, strings.Join(reqObject.FromWorkspaceIDs, ","), ), ) if err != nil { From c3a3279b0c2ca5d22c8aebb4b35761de2797af6b Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 17 Oct 2024 15:19:44 -0400 Subject: [PATCH 164/270] feat: simplify the workspace API and add support for s3 (#878) Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 3 +- pkg/sdkserver/workspaces.go | 94 ++++++++++--------------------------- 2 files changed, 25 insertions(+), 72 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 894823b3..4d4ceb2e 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -76,8 +76,7 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /workspaces/create", s.createWorkspace) mux.HandleFunc("POST /workspaces/delete", s.deleteWorkspace) mux.HandleFunc("POST /workspaces/list", s.listWorkspaceContents) - mux.HandleFunc("POST /workspaces/mkdir", s.mkDirInWorkspace) - mux.HandleFunc("POST /workspaces/rmdir", s.rmDirInWorkspace) + mux.HandleFunc("POST /workspaces/remove-all-with-prefix", s.removeAllWithPrefixInWorkspace) mux.HandleFunc("POST /workspaces/write-file", s.writeFileInWorkspace) mux.HandleFunc("POST /workspaces/delete-file", s.removeFileInWorkspace) mux.HandleFunc("POST /workspaces/read-file", s.readFileInWorkspace) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index ef73b347..87bc4583 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -11,8 +11,9 @@ import ( ) type workspaceCommonRequest struct { - ID string `json:"id"` - WorkspaceTool string `json:"workspaceTool"` + ID string `json:"id"` + WorkspaceTool string `json:"workspaceTool"` + Env []string `json:"env"` } func (w workspaceCommonRequest) getToolRepo() string { @@ -50,7 +51,7 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( `{"provider": "%s", "data_home": "%s", "workspace_ids": "%s"}`, reqObject.ProviderType, reqObject.DirectoryDataHome, strings.Join(reqObject.FromWorkspaceIDs, ","), @@ -85,7 +86,7 @@ func (s *server) deleteWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( `{"workspace_id": "%s"}`, reqObject.ID, @@ -102,10 +103,7 @@ func (s *server) deleteWorkspace(w http.ResponseWriter, r *http.Request) { type listWorkspaceContentsRequest struct { workspaceCommonRequest `json:",inline"` ID string `json:"id"` - SubDir string `json:"subDir"` - NonRecursive bool `json:"nonRecursive"` - ExcludeHidden bool `json:"excludeHidden"` - JSON bool `json:"json"` + Prefix string `json:"prefix"` } func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { @@ -125,10 +123,10 @@ func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( - `{"workspace_id": "%s", "ls_sub_dir": "%s", "ls_non_recursive": %t, "ls_exclude_hidden": %t, "ls_json": %t}`, - reqObject.ID, reqObject.SubDir, reqObject.NonRecursive, reqObject.ExcludeHidden, reqObject.JSON, + `{"workspace_id": "%s", "ls_prefix": "%s"}`, + reqObject.ID, reqObject.Prefix, ), ) if err != nil { @@ -139,22 +137,20 @@ func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { writeResponse(logger, w, map[string]any{"stdout": out}) } -type mkDirRequest struct { +type removeAllWithPrefixInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` - DirectoryName string `json:"directoryName"` - IgnoreExists bool `json:"ignoreExists"` - CreateDirs bool `json:"createDirs"` + Prefix string `json:"prefix"` } -func (s *server) mkDirInWorkspace(w http.ResponseWriter, r *http.Request) { +func (s *server) removeAllWithPrefixInWorkspace(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) - var reqObject mkDirRequest + var reqObject removeAllWithPrefixInWorkspaceRequest if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Create Directory In Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Remove All With Prefix In Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -163,48 +159,10 @@ func (s *server) mkDirInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( - `{"workspace_id": "%s", "directory_name": "%s", "mk_dir_ignore_exists": %t, "mk_dir_create_dirs": %t}`, - reqObject.ID, reqObject.DirectoryName, reqObject.IgnoreExists, reqObject.CreateDirs, - ), - ) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) - return - } - - writeResponse(logger, w, map[string]any{"stdout": out}) -} - -type rmDirRequest struct { - workspaceCommonRequest `json:",inline"` - DirectoryName string `json:"directoryName"` - IgnoreNotFound bool `json:"ignoreNotFound"` - MustBeEmpty bool `json:"mustBeEmpty"` -} - -func (s *server) rmDirInWorkspace(w http.ResponseWriter, r *http.Request) { - logger := gcontext.GetLogger(r.Context()) - var reqObject rmDirRequest - if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { - writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) - return - } - - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Remove Directory In Workspace", loader.Options{Cache: s.client.Cache}) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) - return - } - - out, err := s.client.Run( - r.Context(), - prg, - s.gptscriptOpts.Env, - fmt.Sprintf( - `{"workspace_id": "%s", "directory_name": "%s", "ignore_not_found": %t, "rm_dir_must_be_empty": %t}`, - reqObject.ID, reqObject.DirectoryName, reqObject.IgnoreNotFound, reqObject.MustBeEmpty, + `{"workspace_id": "%s", "prefix": "%s"}`, + reqObject.ID, reqObject.Prefix, ), ) if err != nil { @@ -220,9 +178,6 @@ type writeFileInWorkspaceRequest struct { FilePath string `json:"filePath"` Contents string `json:"contents"` Base64EncodedInput bool `json:"base64EncodedInput"` - MustNotExist bool `json:"mustNotExist"` - CreateDirs bool `json:"createDirs"` - WithoutCreate bool `json:"withoutCreate"` } func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -242,10 +197,10 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "file_contents": "%s", "write_file_must_not_exist": %t, "write_file_create_dirs": %t, "write_file_without_create": %t, "write_file_base64_encoded_input": %t}`, - reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.MustNotExist, reqObject.CreateDirs, reqObject.WithoutCreate, reqObject.Base64EncodedInput, + `{"workspace_id": "%s", "file_path": "%s", "file_contents": "%s", "write_file_base64_encoded_input": %t}`, + reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.Base64EncodedInput, ), ) if err != nil { @@ -259,7 +214,6 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { type rmFileInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` FilePath string `json:"filePath"` - IgnoreNotFound bool `json:"ignoreNotFound"` } func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -279,10 +233,10 @@ func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "ignore_not_found": %t}`, - reqObject.ID, reqObject.FilePath, reqObject.IgnoreNotFound, + `{"workspace_id": "%s", "file_path": "%s"}`, + reqObject.ID, reqObject.FilePath, ), ) if err != nil { @@ -316,7 +270,7 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - s.gptscriptOpts.Env, + reqObject.Env, fmt.Sprintf( `{"workspace_id": "%s", "file_path": "%s", "read_file_base64_encode_output": %t}`, reqObject.ID, reqObject.FilePath, reqObject.Base64EncodeOutput, From f15e7fb8d415f56736fbe29cfacc4b355dc2d001 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 18 Oct 2024 09:34:33 -0400 Subject: [PATCH 165/270] fix: return error if build tool fails A change was made to return an error if running a code-based tool fails. This should also happen if building the tool fails. Signed-off-by: Donnie Adams --- pkg/engine/cmd.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 9ef6e834..a4f6d3ed 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -124,10 +124,10 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate } cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input, true) if err != nil { - if toolCategory == NoCategory { + if toolCategory == NoCategory && ctx.Parent != nil { return fmt.Sprintf("ERROR: got (%v) while parsing command", err), nil } - return "", err + return "", fmt.Errorf("got (%v) while parsing command", err) } defer stop() From d6ae14d4e97f882cbd8a93e9c754ea717bde3741 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 18 Oct 2024 10:30:33 -0400 Subject: [PATCH 166/270] chore: remove directory data home workspace option Signed-off-by: Donnie Adams --- pkg/sdkserver/workspaces.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index 87bc4583..0cbf94b2 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -26,7 +26,6 @@ func (w workspaceCommonRequest) getToolRepo() string { type createWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` ProviderType string `json:"providerType"` - DirectoryDataHome string `json:"directoryDataHome"` FromWorkspaceIDs []string `json:"fromWorkspaceIDs"` } @@ -53,8 +52,8 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { prg, reqObject.Env, fmt.Sprintf( - `{"provider": "%s", "data_home": "%s", "workspace_ids": "%s"}`, - reqObject.ProviderType, reqObject.DirectoryDataHome, strings.Join(reqObject.FromWorkspaceIDs, ","), + `{"provider": "%s", "workspace_ids": "%s"}`, + reqObject.ProviderType, strings.Join(reqObject.FromWorkspaceIDs, ","), ), ) if err != nil { From 36f97084ed456979813d897a539275f6fb32830d Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 18 Oct 2024 12:09:27 -0700 Subject: [PATCH 167/270] bug: lock when setting up runtimes --- pkg/repos/runtimes/busybox/busybox.go | 5 +++++ pkg/repos/runtimes/golang/golang.go | 6 ++++++ pkg/repos/runtimes/node/node.go | 6 ++++++ pkg/repos/runtimes/python/python.go | 6 ++++++ 4 files changed, 23 insertions(+) diff --git a/pkg/repos/runtimes/busybox/busybox.go b/pkg/repos/runtimes/busybox/busybox.go index e4604b06..5c77ee2b 100644 --- a/pkg/repos/runtimes/busybox/busybox.go +++ b/pkg/repos/runtimes/busybox/busybox.go @@ -14,6 +14,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/hash" @@ -27,6 +28,7 @@ var releasesData []byte const downloadURL = "https://github.com/gptscript-ai/busybox-w32/releases/download/%s" type Runtime struct { + runtimeSetupLock sync.Mutex } func (r *Runtime) ID() string { @@ -75,6 +77,9 @@ func (r *Runtime) getReleaseAndDigest() (string, string, error) { } func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { + r.runtimeSetupLock.Lock() + defer r.runtimeSetupLock.Unlock() + url, sha, err := r.getReleaseAndDigest() if err != nil { return "", err diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index f86fa88d..23c12f1a 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -17,6 +17,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/debugcmd" @@ -34,6 +35,8 @@ const downloadURL = "https://go.dev/dl/" type Runtime struct { // version something like "1.22.1" Version string + + runtimeSetupLock sync.Mutex } func (r *Runtime) ID() string { @@ -355,6 +358,9 @@ func (r *Runtime) binDir(rel string) string { } func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { + r.runtimeSetupLock.Lock() + defer r.runtimeSetupLock.Unlock() + url, sha, err := r.getReleaseAndDigest() if err != nil { return "", err diff --git a/pkg/repos/runtimes/node/node.go b/pkg/repos/runtimes/node/node.go index 4d73c13b..53b77ca2 100644 --- a/pkg/repos/runtimes/node/node.go +++ b/pkg/repos/runtimes/node/node.go @@ -12,6 +12,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" "github.com/gptscript-ai/gptscript/pkg/debugcmd" runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" @@ -34,6 +35,8 @@ type Runtime struct { Version string // If true this is the version that will be used for python or python3 Default bool + + runtimeSetupLock sync.Mutex } func (r *Runtime) ID() string { @@ -175,6 +178,9 @@ func (r *Runtime) binDir(rel string) (string, error) { } func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { + r.runtimeSetupLock.Lock() + defer r.runtimeSetupLock.Unlock() + url, sha, err := r.getReleaseAndDigest() if err != nil { return "", err diff --git a/pkg/repos/runtimes/python/python.go b/pkg/repos/runtimes/python/python.go index ee4bf571..4aebe0cf 100644 --- a/pkg/repos/runtimes/python/python.go +++ b/pkg/repos/runtimes/python/python.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "runtime" + "sync" "github.com/gptscript-ai/gptscript/pkg/debugcmd" runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" @@ -42,6 +43,8 @@ type Runtime struct { Version string // If true this is the version that will be used for python or python3 Default bool + + runtimeSetupLock sync.Mutex } func (r *Runtime) ID() string { @@ -234,6 +237,9 @@ func (r *Runtime) setupUV(ctx context.Context, tmp string) error { } func (r *Runtime) getRuntime(ctx context.Context, cwd string) (string, error) { + r.runtimeSetupLock.Lock() + defer r.runtimeSetupLock.Unlock() + url, sha, err := r.getReleaseAndDigest() if err != nil { return "", err From 376315dc9a26c3b038c2f0797b371e97b945a136 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 18 Oct 2024 16:04:13 -0400 Subject: [PATCH 168/270] chore: move the workspace-provider tool to a daemon This also adds support in the daemon implementation for dynamic paths, something that is needed for the workspace-provider daemon to work. Signed-off-by: Donnie Adams --- pkg/engine/http.go | 11 ++++++++++- pkg/sdkserver/workspaces.go | 13 ++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index a81f1bb3..b46a040a 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -18,6 +18,11 @@ const DaemonURLSuffix = ".daemon.gptscript.local" func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Tool, input string) (cmdRet *Return, cmdErr error) { envMap := map[string]string{} + for _, env := range appendInputAsEnv(nil, input) { + k, v, _ := strings.Cut(env, "=") + envMap[k] = v + } + for _, env := range e.Env { k, v, _ := strings.Cut(env, "=") envMap[k] = v @@ -25,7 +30,7 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too toolURL := strings.Split(tool.Instructions, "\n")[0][2:] toolURL = os.Expand(toolURL, func(s string) string { - return envMap[s] + return url.PathEscape(envMap[s]) }) parsed, err := url.Parse(toolURL) @@ -61,6 +66,10 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too }, nil } + if body, ok := envMap["BODY"]; ok { + input = body + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, toolURL, strings.NewReader(input)) if err != nil { return nil, err diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index 0cbf94b2..dd2df692 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -1,6 +1,7 @@ package sdkserver import ( + "encoding/base64" "encoding/json" "fmt" "net/http" @@ -175,8 +176,7 @@ func (s *server) removeAllWithPrefixInWorkspace(w http.ResponseWriter, r *http.R type writeFileInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` FilePath string `json:"filePath"` - Contents string `json:"contents"` - Base64EncodedInput bool `json:"base64EncodedInput"` + Contents []byte `json:"contents"` } func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -198,8 +198,8 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { prg, reqObject.Env, fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "file_contents": "%s", "write_file_base64_encoded_input": %t}`, - reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.Base64EncodedInput, + `{"workspace_id": "%s", "file_path": "%s", "body": "%s"}`, + reqObject.ID, reqObject.FilePath, base64.StdEncoding.EncodeToString(reqObject.Contents), ), ) if err != nil { @@ -249,7 +249,6 @@ func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { type readFileInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` FilePath string `json:"filePath"` - Base64EncodeOutput bool `json:"base64EncodeOutput"` } func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -271,8 +270,8 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { prg, reqObject.Env, fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "read_file_base64_encode_output": %t}`, - reqObject.ID, reqObject.FilePath, reqObject.Base64EncodeOutput, + `{"workspace_id": "%s", "file_path": "%s"}`, + reqObject.ID, reqObject.FilePath, ), ) if err != nil { From 366104477158d46fb6f975ff386df5210df91a87 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Sun, 20 Oct 2024 10:48:43 -0400 Subject: [PATCH 169/270] feat: make the SDK server workspace tool a global option Signed-off-by: Donnie Adams --- pkg/cli/sdk_server.go | 2 ++ pkg/sdkserver/routes.go | 1 + pkg/sdkserver/server.go | 7 +++++++ pkg/sdkserver/workspaces.go | 29 +++++++++++++++-------------- 4 files changed, 25 insertions(+), 14 deletions(-) diff --git a/pkg/cli/sdk_server.go b/pkg/cli/sdk_server.go index c9cf480f..5ce65305 100644 --- a/pkg/cli/sdk_server.go +++ b/pkg/cli/sdk_server.go @@ -11,6 +11,7 @@ import ( type SDKServer struct { *GPTScript + WorkspaceTool string `usage:"Tool to use for workspace"` } func (c *SDKServer) Customize(cmd *cobra.Command) { @@ -37,5 +38,6 @@ func (c *SDKServer) Run(cmd *cobra.Command, _ []string) error { Options: opts, ListenAddress: c.ListenAddress, Debug: c.Debug, + WorkspaceTool: c.WorkspaceTool, }) } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 4d4ceb2e..8afdb8a4 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -28,6 +28,7 @@ import ( type server struct { gptscriptOpts gptscript.Options address, token string + workspaceTool string client *gptscript.GPTScript events *broadcaster.Broadcaster[event] diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 0a68f0fa..1368fd54 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -27,6 +27,7 @@ type Options struct { gptscript.Options ListenAddress string + WorkspaceTool string Debug bool DisableServerErrorLogging bool } @@ -107,6 +108,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { gptscriptOpts: opts.Options, address: listener.Addr().String(), token: token, + workspaceTool: opts.WorkspaceTool, client: g, events: events, runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir), // TODO - do we always want to use runtimes.Default here? @@ -157,6 +159,7 @@ func complete(opts ...Options) Options { for _, opt := range opts { result.Options = gptscript.Complete(result.Options, opt.Options) result.ListenAddress = types.FirstSet(opt.ListenAddress, result.ListenAddress) + result.WorkspaceTool = types.FirstSet(opt.WorkspaceTool, result.WorkspaceTool) result.Debug = types.FirstSet(opt.Debug, result.Debug) result.DisableServerErrorLogging = types.FirstSet(opt.DisableServerErrorLogging, result.DisableServerErrorLogging) } @@ -165,5 +168,9 @@ func complete(opts ...Options) Options { result.ListenAddress = "127.0.0.1:0" } + if result.WorkspaceTool == "" { + result.WorkspaceTool = "github.com/gptscript-ai/workspace-provider" + } + return result } diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index dd2df692..e3989117 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -11,19 +11,20 @@ import ( "github.com/gptscript-ai/gptscript/pkg/loader" ) +func (s *server) getWorkspaceTool(req workspaceCommonRequest) string { + if req.WorkspaceTool != "" { + return req.WorkspaceTool + } + + return s.workspaceTool +} + type workspaceCommonRequest struct { ID string `json:"id"` WorkspaceTool string `json:"workspaceTool"` Env []string `json:"env"` } -func (w workspaceCommonRequest) getToolRepo() string { - if w.WorkspaceTool != "" { - return w.WorkspaceTool - } - return "github.com/gptscript-ai/workspace-provider" -} - type createWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` ProviderType string `json:"providerType"` @@ -38,7 +39,7 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Create Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Create Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -77,7 +78,7 @@ func (s *server) deleteWorkspace(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Delete Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Delete Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -114,7 +115,7 @@ func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "List Workspace Contents", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "List Workspace Contents", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -150,7 +151,7 @@ func (s *server) removeAllWithPrefixInWorkspace(w http.ResponseWriter, r *http.R return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Remove All With Prefix In Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Remove All With Prefix In Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -187,7 +188,7 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Write File In Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Write File In Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -223,7 +224,7 @@ func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Remove File In Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Remove File In Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return @@ -259,7 +260,7 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), reqObject.getToolRepo(), "Read File In Workspace", loader.Options{Cache: s.client.Cache}) + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Read File In Workspace", loader.Options{Cache: s.client.Cache}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return From 87863a7ef6405b088400fc014658907ae1073fdf Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 21 Oct 2024 08:21:09 -0700 Subject: [PATCH 170/270] chore: handle workspace ids from sdk natively --- pkg/engine/http.go | 6 ++++++ pkg/gptscript/gptscript.go | 19 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index b46a040a..304f89c4 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -75,6 +75,12 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too return nil, err } + for _, env := range e.Env { + if strings.HasPrefix(env, "GPTSCRIPT_") { + req.Header.Add("X-GPTScript-Env", env) + } + } + req.Header.Set("X-GPTScript-Tool-Name", tool.Parameters.Name) if err := json.Unmarshal([]byte(input), &map[string]any{}); err == nil { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 679eb503..454debf6 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -168,6 +168,20 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { } func (g *GPTScript) getEnv(env []string) ([]string, error) { + var ( + id string + ) + + scheme, rest, isScheme := strings.Cut(g.WorkspacePath, "://") + if isScheme && scheme == "directory" { + id = g.WorkspacePath + g.WorkspacePath = rest + } else if isScheme { + id = g.WorkspacePath + g.WorkspacePath = "" + g.DeleteWorkspaceOnClose = true + } + if g.WorkspacePath == "" { var err error g.WorkspacePath, err = os.MkdirTemp("", "gptscript-workspace-*") @@ -184,9 +198,12 @@ func (g *GPTScript) getEnv(env []string) ([]string, error) { if err := os.MkdirAll(g.WorkspacePath, 0700); err != nil { return nil, err } + if id == "" { + id = hash.ID(g.WorkspacePath) + } return slices.Concat(g.ExtraEnv, env, []string{ fmt.Sprintf("GPTSCRIPT_WORKSPACE_DIR=%s", g.WorkspacePath), - fmt.Sprintf("GPTSCRIPT_WORKSPACE_ID=%s", hash.ID(g.WorkspacePath)), + fmt.Sprintf("GPTSCRIPT_WORKSPACE_ID=%s", id), }), nil } From e7e923134d63e472b338bd59ed94b42a923b5e16 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 21 Oct 2024 16:34:39 -0700 Subject: [PATCH 171/270] bug: log http response bodies in failed daemon calls --- pkg/engine/http.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index 304f89c4..0742b236 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -96,8 +96,8 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too defer resp.Body.Close() if resp.StatusCode > 299 { - _, _ = io.ReadAll(resp.Body) - return nil, fmt.Errorf("error in request to [%s] [%d]: %s", toolURL, resp.StatusCode, resp.Status) + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("error in request to [%s] [%d]: %s: %s", toolURL, resp.StatusCode, resp.Status, body) } content, err := io.ReadAll(resp.Body) From af2e82f277fdff9e5ff9d1d4415078a1f12fce71 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 23 Oct 2024 15:16:53 -0400 Subject: [PATCH 172/270] fix: remove base64 encoding from workspace API Signed-off-by: Donnie Adams --- pkg/sdkserver/workspaces.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index e3989117..4356a583 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -1,7 +1,6 @@ package sdkserver import ( - "encoding/base64" "encoding/json" "fmt" "net/http" @@ -177,7 +176,7 @@ func (s *server) removeAllWithPrefixInWorkspace(w http.ResponseWriter, r *http.R type writeFileInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` FilePath string `json:"filePath"` - Contents []byte `json:"contents"` + Contents string `json:"contents"` } func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -200,7 +199,7 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { reqObject.Env, fmt.Sprintf( `{"workspace_id": "%s", "file_path": "%s", "body": "%s"}`, - reqObject.ID, reqObject.FilePath, base64.StdEncoding.EncodeToString(reqObject.Contents), + reqObject.ID, reqObject.FilePath, reqObject.Contents, ), ) if err != nil { From dd84ea22db090139142a7d0f40ea35bc0f01a93b Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 23 Oct 2024 15:26:03 -0400 Subject: [PATCH 173/270] chore: add Add Elements tool to dataset sdk (#885) Signed-off-by: Grant Linville --- pkg/sdkserver/datasets.go | 100 +++++++++++++++++++++++++++++++++----- pkg/sdkserver/routes.go | 1 + 2 files changed, 89 insertions(+), 12 deletions(-) diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index a65566a4..bfef595a 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -11,16 +11,19 @@ import ( ) type datasetRequest struct { - Input string `json:"input"` - Workspace string `json:"workspace"` - DatasetToolRepo string `json:"datasetToolRepo"` + Input string `json:"input"` + WorkspaceID string `json:"workspaceID"` + DatasetToolRepo string `json:"datasetToolRepo"` + Env []string `json:"env"` } func (r datasetRequest) validate(requireInput bool) error { - if r.Workspace == "" { - return fmt.Errorf("workspace is required") + if r.WorkspaceID == "" { + return fmt.Errorf("workspaceID is required") } else if requireInput && r.Input == "" { return fmt.Errorf("input is required") + } else if len(r.Env) == 0 { + return fmt.Errorf("env is required") } return nil } @@ -30,7 +33,7 @@ func (r datasetRequest) opts(o gptscript.Options) gptscript.Options { Cache: o.Cache, Monitor: o.Monitor, Runner: o.Runner, - Workspace: r.Workspace, + Workspace: r.WorkspaceID, } return opts } @@ -39,7 +42,7 @@ func (r datasetRequest) getToolRepo() string { if r.DatasetToolRepo != "" { return r.DatasetToolRepo } - return "github.com/gptscript-ai/datasets" + return "github.com/otto8-ai/datasets" } func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { @@ -71,7 +74,7 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + result, err := g.Run(r.Context(), prg, req.Env, req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -132,7 +135,7 @@ func (s *server) createDataset(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + result, err := g.Run(r.Context(), prg, req.Env, req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -200,7 +203,80 @@ func (s *server) addDatasetElement(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + result, err := g.Run(r.Context(), prg, req.Env, req.Input) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": result}) +} + +type addDatasetElementsArgs struct { + DatasetID string `json:"datasetID"` + Elements []struct { + Name string `json:"name"` + Description string `json:"description"` + Contents string `json:"contents"` + } +} + +func (a addDatasetElementsArgs) validate() error { + if a.DatasetID == "" { + return fmt.Errorf("datasetID is required") + } + if len(a.Elements) == 0 { + return fmt.Errorf("elements is required") + } + return nil +} + +func (s *server) addDatasetElements(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + var req datasetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) + return + } + + if err := req.validate(true); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) + return + } + + var args addDatasetElementsArgs + if err := json.Unmarshal([]byte(req.Input), &args); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) + return + } + + if err := args.validate(); err != nil { + writeError(logger, w, http.StatusBadRequest, err) + return + } + + prg, err := loader.Program(r.Context(), req.getToolRepo(), "Add Elements", loader.Options{ + Cache: g.Cache, + }) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + elementsJSON, err := json.Marshal(args.Elements) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to marshal elements: %w", err)) + return + } + + result, err := g.Run(r.Context(), prg, req.Env, fmt.Sprintf(`{"datasetID":%q, "elements":%q}`, args.DatasetID, string(elementsJSON))) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -259,7 +335,7 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + result, err := g.Run(r.Context(), prg, req.Env, req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -322,7 +398,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.gptscriptOpts.Env, req.Input) + result, err := g.Run(r.Context(), prg, req.Env, req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 8afdb8a4..713f74fe 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -73,6 +73,7 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /datasets/list-elements", s.listDatasetElements) mux.HandleFunc("POST /datasets/get-element", s.getDatasetElement) mux.HandleFunc("POST /datasets/add-element", s.addDatasetElement) + mux.HandleFunc("POST /datasets/add-elements", s.addDatasetElements) mux.HandleFunc("POST /workspaces/create", s.createWorkspace) mux.HandleFunc("POST /workspaces/delete", s.deleteWorkspace) From 519883a07af7ad3ba5faec2408da9e476061c864 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 24 Oct 2024 23:00:37 -0700 Subject: [PATCH 174/270] chore: do not setup runtime for special system dir --- pkg/repos/get.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/repos/get.go b/pkg/repos/get.go index a36c2fe0..cbc88be3 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -58,6 +58,7 @@ type Manager struct { storageDir string gitDir string runtimeDir string + systemDir string runtimes []Runtime credHelperConfig *credHelperConfig } @@ -75,6 +76,7 @@ func New(cacheDir string, runtimes ...Runtime) *Manager { storageDir: root, gitDir: filepath.Join(root, "git"), runtimeDir: filepath.Join(root, "runtimes"), + systemDir: filepath.Join(root, "system"), runtimes: runtimes, } } @@ -271,6 +273,10 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e } func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { + if strings.HasPrefix(tool.WorkingDir, m.systemDir) { + return tool.WorkingDir, env, nil + } + var isLocal bool if tool.Source.Repo == nil { isLocal = true From 2d1632ceaecb798eab4d46eb8f333c5208aa01af Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 25 Oct 2024 15:48:26 -0700 Subject: [PATCH 175/270] feat: add system-tools-dir for system managed tools --- .../04-command-line-reference/gptscript.md | 1 + .../gptscript_eval.md | 1 + .../gptscript_fmt.md | 1 + .../gptscript_getenv.md | 1 + .../gptscript_parse.md | 1 + pkg/cli/credential.go | 2 +- pkg/cli/credential_delete.go | 2 +- pkg/cli/credential_show.go | 2 +- pkg/cli/gptscript.go | 2 ++ pkg/gptscript/gptscript.go | 4 +++- pkg/repos/get.go | 23 ++++++++++++++----- pkg/repos/get_test.go | 2 +- pkg/repos/runtimes/default.go | 4 ++-- pkg/sdkserver/server.go | 2 +- pkg/tests/tester/runner.go | 2 +- 15 files changed, 35 insertions(+), 15 deletions(-) diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 8a726c64..4ca35228 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -43,6 +43,7 @@ gptscript [flags] PROGRAM_FILE [INPUT...] -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) + --system-tools-dir string Directory that contains system managed tool for which GPTScript will not manage the runtime ($GPTSCRIPT_SYSTEM_TOOLS_DIR) --ui Launch the UI ($GPTSCRIPT_UI) --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 257cf609..ddbecc9f 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -46,6 +46,7 @@ gptscript eval [flags] --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --system-tools-dir string Directory that contains system managed tool for which GPTScript will not manage the runtime ($GPTSCRIPT_SYSTEM_TOOLS_DIR) --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index 1175a1f1..2b042623 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -40,6 +40,7 @@ gptscript fmt [flags] --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --system-tools-dir string Directory that contains system managed tool for which GPTScript will not manage the runtime ($GPTSCRIPT_SYSTEM_TOOLS_DIR) --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` diff --git a/docs/docs/04-command-line-reference/gptscript_getenv.md b/docs/docs/04-command-line-reference/gptscript_getenv.md index 4a688439..7e677c5c 100644 --- a/docs/docs/04-command-line-reference/gptscript_getenv.md +++ b/docs/docs/04-command-line-reference/gptscript_getenv.md @@ -39,6 +39,7 @@ gptscript getenv [flags] KEY [DEFAULT] --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --system-tools-dir string Directory that contains system managed tool for which GPTScript will not manage the runtime ($GPTSCRIPT_SYSTEM_TOOLS_DIR) --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index 66d2791c..567b0c05 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -40,6 +40,7 @@ gptscript parse [flags] --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --system-tools-dir string Directory that contains system managed tool for which GPTScript will not manage the runtime ($GPTSCRIPT_SYSTEM_TOOLS_DIR) --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index 674160b9..a46c483b 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -48,7 +48,7 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { } opts = gptscript.Complete(opts) if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) + opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir) } ctxs := opts.CredentialContexts diff --git a/pkg/cli/credential_delete.go b/pkg/cli/credential_delete.go index b17ae851..81392f36 100644 --- a/pkg/cli/credential_delete.go +++ b/pkg/cli/credential_delete.go @@ -35,7 +35,7 @@ func (c *Delete) Run(cmd *cobra.Command, args []string) error { opts = gptscript.Complete(opts) if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) + opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir) } if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { diff --git a/pkg/cli/credential_show.go b/pkg/cli/credential_show.go index d8ea980b..ab2e9cd1 100644 --- a/pkg/cli/credential_show.go +++ b/pkg/cli/credential_show.go @@ -37,7 +37,7 @@ func (c *Show) Run(cmd *cobra.Command, args []string) error { opts = gptscript.Complete(opts) if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) + opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir) } if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 66719adc..d0481ec8 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -47,6 +47,7 @@ type GPTScript struct { CacheOptions OpenAIOptions DisplayOptions + SystemToolsDir string `usage:"Directory that contains system managed tool for which GPTScript will not manage the runtime"` Color *bool `usage:"Use color in output (default true)" default:"true"` Confirm bool `usage:"Prompt before running potentially dangerous commands"` Debug bool `usage:"Enable debug logging"` @@ -146,6 +147,7 @@ func (r *GPTScript) NewGPTScriptOpts() (gptscript.Options, error) { Workspace: r.Workspace, DisablePromptServer: r.UI, DefaultModelProvider: r.DefaultModelProvider, + SystemToolsDir: r.SystemToolsDir, } if r.Confirm { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 454debf6..3771b124 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -52,6 +52,7 @@ type Options struct { Quiet *bool Workspace string DisablePromptServer bool + SystemToolsDir string Env []string } @@ -63,6 +64,7 @@ func Complete(opts ...Options) Options { result.Runner = runner.Complete(result.Runner, opt.Runner) result.OpenAI = openai.Complete(result.OpenAI, opt.OpenAI) + result.SystemToolsDir = types.FirstSet(opt.SystemToolsDir, result.SystemToolsDir) result.CredentialContexts = opt.CredentialContexts result.Quiet = types.FirstSet(opt.Quiet, result.Quiet) result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) @@ -99,7 +101,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { } if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(cacheClient.CacheDir()) + opts.Runner.RuntimeManager = runtimes.Default(cacheClient.CacheDir(), opts.SystemToolsDir) } if err := opts.Runner.RuntimeManager.SetUpCredentialHelpers(context.Background(), cliCfg); err != nil { diff --git a/pkg/repos/get.go b/pkg/repos/get.go index cbc88be3..0a50ce15 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -8,6 +8,7 @@ import ( "io/fs" "os" "path/filepath" + "regexp" "runtime" "strings" "sync" @@ -58,7 +59,7 @@ type Manager struct { storageDir string gitDir string runtimeDir string - systemDir string + systemDirs []string runtimes []Runtime credHelperConfig *credHelperConfig } @@ -69,14 +70,22 @@ type credHelperConfig struct { cliCfg *config.CLIConfig } -func New(cacheDir string, runtimes ...Runtime) *Manager { - root := filepath.Join(cacheDir, "repos") +func New(cacheDir, systemDir string, runtimes ...Runtime) *Manager { + var ( + systemDirs []string + root = filepath.Join(cacheDir, "repos") + ) + + if strings.TrimSpace(systemDir) != "" { + systemDirs = regexp.MustCompile("[;:,]").Split(strings.TrimSpace(systemDir), -1) + } + return &Manager{ cacheDir: cacheDir, storageDir: root, gitDir: filepath.Join(root, "git"), runtimeDir: filepath.Join(root, "runtimes"), - systemDir: filepath.Join(root, "system"), + systemDirs: systemDirs, runtimes: runtimes, } } @@ -273,8 +282,10 @@ func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, e } func (m *Manager) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { - if strings.HasPrefix(tool.WorkingDir, m.systemDir) { - return tool.WorkingDir, env, nil + for _, systemDir := range m.systemDirs { + if strings.HasPrefix(tool.WorkingDir, systemDir) { + return tool.WorkingDir, env, nil + } } var isLocal bool diff --git a/pkg/repos/get_test.go b/pkg/repos/get_test.go index d59e5513..3a656dc0 100644 --- a/pkg/repos/get_test.go +++ b/pkg/repos/get_test.go @@ -19,7 +19,7 @@ var ( ) func TestManager_GetContext(t *testing.T) { - m := New(testCacheHome, &python.Runtime{ + m := New(testCacheHome, "", &python.Runtime{ Version: "3.11", }) cwd, env, err := m.GetContext(context.Background(), types.Tool{ diff --git a/pkg/repos/runtimes/default.go b/pkg/repos/runtimes/default.go index a93fb735..ea237cc4 100644 --- a/pkg/repos/runtimes/default.go +++ b/pkg/repos/runtimes/default.go @@ -30,6 +30,6 @@ var Runtimes = []repos.Runtime{ }, } -func Default(cacheDir string) engine.RuntimeManager { - return repos.New(cacheDir, Runtimes...) +func Default(cacheDir, systemDir string) engine.RuntimeManager { + return repos.New(cacheDir, systemDir, Runtimes...) } diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 1368fd54..f0c61940 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -111,7 +111,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { workspaceTool: opts.WorkspaceTool, client: g, events: events, - runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir), // TODO - do we always want to use runtimes.Default here? + runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir, opts.SystemToolsDir), waitingToConfirm: make(map[string]chan runner.AuthorizerResponse), waitingToPrompt: make(map[string]chan map[string]string), } diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index b460ce18..fa7f7683 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -196,7 +196,7 @@ func NewRunner(t *testing.T) *Runner { cacheDir, err := xdg.CacheFile("gptscript-test-cache/runtime") require.NoError(t, err) - rm := runtimes.Default(cacheDir) + rm := runtimes.Default(cacheDir, "") run, err := runner.New(c, credentials.NoopStore{}, runner.Options{ Sequential: true, From 9d30b268e3465118f16a3c0d7076e4a7a2dea405 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 28 Oct 2024 07:12:24 -0400 Subject: [PATCH 176/270] feat: add file stat to workspace API Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 1 + pkg/sdkserver/workspaces.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 713f74fe..e9b1cca8 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -82,6 +82,7 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /workspaces/write-file", s.writeFileInWorkspace) mux.HandleFunc("POST /workspaces/delete-file", s.removeFileInWorkspace) mux.HandleFunc("POST /workspaces/read-file", s.readFileInWorkspace) + mux.HandleFunc("POST /workspaces/stat-file", s.statFileInWorkspace) } // health just provides an endpoint for checking whether the server is running and accessible. diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index 4356a583..ed6602ea 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -281,3 +281,39 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { writeResponse(logger, w, map[string]any{"stdout": out}) } + +type statFileInWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` +} + +func (s *server) statFileInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject statFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Stat File In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + reqObject.Env, + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s"}`, + reqObject.ID, reqObject.FilePath, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} From 8180c2629303599584b9754da9bd7e19bcb0a0d1 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 29 Oct 2024 12:26:13 -0700 Subject: [PATCH 177/270] bug: fix workspace sdk use outside of otto --- pkg/gptscript/gptscript.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 3771b124..df4b0792 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -15,7 +15,6 @@ import ( context2 "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/engine" - "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/llm" "github.com/gptscript-ai/gptscript/pkg/monitor" "github.com/gptscript-ai/gptscript/pkg/mvl" @@ -201,7 +200,7 @@ func (g *GPTScript) getEnv(env []string) ([]string, error) { return nil, err } if id == "" { - id = hash.ID(g.WorkspacePath) + id = "directory://" + g.WorkspacePath } return slices.Concat(g.ExtraEnv, env, []string{ fmt.Sprintf("GPTSCRIPT_WORKSPACE_DIR=%s", g.WorkspacePath), From 886f94462ad56f860047bec87a44e86a8db1960e Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 1 Nov 2024 15:44:29 -0400 Subject: [PATCH 178/270] chore: sdkserver: use the Get Element SDK tool (#890) Signed-off-by: Grant Linville --- pkg/sdkserver/datasets.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index bfef595a..1a547953 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -390,7 +390,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "Get Element", loader.Options{ + prg, err := loader.Program(r.Context(), req.getToolRepo(), "Get Element SDK", loader.Options{ Cache: g.Cache, }) if err != nil { From 50489f2b1b5f29b28dfe0af8cd18f5539a690da6 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 1 Nov 2024 23:17:29 -0700 Subject: [PATCH 179/270] chore: increase memory efficiency --- pkg/cache/cache.go | 1 + pkg/engine/http.go | 2 +- pkg/openai/client.go | 96 ++++++++++++++++++++--------------------- pkg/types/completion.go | 1 - 4 files changed, 48 insertions(+), 52 deletions(-) diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 031bd166..90e8ee10 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -92,6 +92,7 @@ func (c *Client) CacheDir() string { func (c *Client) cacheKey(key any) (string, error) { hash := sha256.New() + hash.Write([]byte("v2")) if err := json.NewEncoder(hash).Encode(key); err != nil { return "", err } diff --git a/pkg/engine/http.go b/pkg/engine/http.go index 0742b236..87348ce9 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -76,7 +76,7 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too } for _, env := range e.Env { - if strings.HasPrefix(env, "GPTSCRIPT_") { + if strings.HasPrefix(env, "GPTSCRIPT_WORKSPACE_") { req.Header.Add("X-GPTScript-Env", env) } } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index af518f93..be5c6253 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -9,6 +9,7 @@ import ( "slices" "sort" "strings" + "time" openai "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -212,15 +213,15 @@ func (c *Client) seed(request openai.ChatCompletionRequest) int { return hash.Seed(newRequest) } -func (c *Client) fromCache(ctx context.Context, messageRequest types.CompletionRequest, request openai.ChatCompletionRequest) (result []openai.ChatCompletionStreamResponse, _ bool, _ error) { +func (c *Client) fromCache(ctx context.Context, messageRequest types.CompletionRequest, request openai.ChatCompletionRequest) (result types.CompletionMessage, _ bool, _ error) { if !messageRequest.GetCache() { - return nil, false, nil + return types.CompletionMessage{}, false, nil } found, err := c.cache.Get(ctx, c.cacheKey(request), &result) if err != nil { - return nil, false, err + return types.CompletionMessage{}, false, err } else if !found { - return nil, false, nil + return types.CompletionMessage{}, false, nil } return result, true, nil } @@ -396,11 +397,11 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques IncludeUsage: true, } } - response, ok, err := c.fromCache(ctx, messageRequest, request) + result, ok, err := c.fromCache(ctx, messageRequest, request) if err != nil { return nil, err } else if !ok { - response, err = c.call(ctx, request, id, status) + result, err = c.call(ctx, request, id, status) // If we got back a context length exceeded error, keep retrying and shrinking the message history until we pass. var apiError *openai.APIError @@ -408,9 +409,8 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques // Decrease maxTokens by 10% to make garbage collection more aggressive. // The retry loop will further decrease maxTokens if needed. maxTokens := decreaseTenPercent(messageRequest.MaxTokens) - response, err = c.contextLimitRetryLoop(ctx, request, id, maxTokens, status) + result, err = c.contextLimitRetryLoop(ctx, request, id, maxTokens, status) } - if err != nil { return nil, err } @@ -418,11 +418,6 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques cacheResponse = true } - result := types.CompletionMessage{} - for _, response := range response { - result = appendMessage(result, response) - } - for i, content := range result.Content { if content.ToolCall != nil && content.ToolCall.ID == "" { content.ToolCall.ID = "call_" + hash.ID(content.ToolCall.Function.Name, content.ToolCall.Function.Arguments)[:8] @@ -440,7 +435,6 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques status <- types.CompletionStatus{ CompletionID: id, - Chunks: response, Response: result, Usage: result.Usage, Cached: cacheResponse, @@ -449,9 +443,9 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return &result, nil } -func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, maxTokens int, status chan<- types.CompletionStatus) ([]openai.ChatCompletionStreamResponse, error) { +func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, maxTokens int, status chan<- types.CompletionStatus) (types.CompletionMessage, error) { var ( - response []openai.ChatCompletionStreamResponse + response types.CompletionMessage err error ) @@ -469,10 +463,10 @@ func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatC maxTokens = decreaseTenPercent(maxTokens) continue } - return nil, err + return types.CompletionMessage{}, err } - return nil, err + return types.CompletionMessage{}, err } func appendMessage(msg types.CompletionMessage, response openai.ChatCompletionStreamResponse) types.CompletionMessage { @@ -548,7 +542,7 @@ func override(left, right string) string { return left } -func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, transactionID string, partial chan<- types.CompletionStatus) (responses []openai.ChatCompletionStreamResponse, _ error) { +func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, transactionID string, partial chan<- types.CompletionStatus) (types.CompletionMessage, error) { streamResponse := os.Getenv("GPTSCRIPT_INTERNAL_OPENAI_STREAMING") != "false" partial <- types.CompletionStatus{ @@ -565,56 +559,58 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, request.StreamOptions = nil resp, err := c.c.CreateChatCompletion(ctx, request) if err != nil { - return nil, err + return types.CompletionMessage{}, err } - return []openai.ChatCompletionStreamResponse{ - { - ID: resp.ID, - Object: resp.Object, - Created: resp.Created, - Model: resp.Model, - Usage: resp.Usage, - Choices: []openai.ChatCompletionStreamChoice{ - { - Index: resp.Choices[0].Index, - Delta: openai.ChatCompletionStreamChoiceDelta{ - Content: resp.Choices[0].Message.Content, - Role: resp.Choices[0].Message.Role, - FunctionCall: resp.Choices[0].Message.FunctionCall, - ToolCalls: resp.Choices[0].Message.ToolCalls, - }, - FinishReason: resp.Choices[0].FinishReason, + return appendMessage(types.CompletionMessage{}, openai.ChatCompletionStreamResponse{ + ID: resp.ID, + Object: resp.Object, + Created: resp.Created, + Model: resp.Model, + Usage: resp.Usage, + Choices: []openai.ChatCompletionStreamChoice{ + { + Index: resp.Choices[0].Index, + Delta: openai.ChatCompletionStreamChoiceDelta{ + Content: resp.Choices[0].Message.Content, + Role: resp.Choices[0].Message.Role, + FunctionCall: resp.Choices[0].Message.FunctionCall, + ToolCalls: resp.Choices[0].Message.ToolCalls, }, + FinishReason: resp.Choices[0].FinishReason, }, }, - }, nil + }), nil } stream, err := c.c.CreateChatCompletionStream(ctx, request) if err != nil { - return nil, err + return types.CompletionMessage{}, err } defer stream.Close() - var partialMessage types.CompletionMessage + var ( + partialMessage types.CompletionMessage + start = time.Now() + last []string + ) for { response, err := stream.Recv() if err == io.EOF { - return responses, c.cache.Store(ctx, c.cacheKey(request), responses) + return partialMessage, c.cache.Store(ctx, c.cacheKey(request), partialMessage) } else if err != nil { - return nil, err - } - if len(response.Choices) > 0 { - slog.Debug("stream", "content", response.Choices[0].Delta.Content) + return types.CompletionMessage{}, err } + partialMessage = appendMessage(partialMessage, response) if partial != nil { - partialMessage = appendMessage(partialMessage, response) - partial <- types.CompletionStatus{ - CompletionID: transactionID, - PartialResponse: &partialMessage, + if time.Since(start) > 500*time.Millisecond { + last = last[:0] + partial <- types.CompletionStatus{ + CompletionID: transactionID, + PartialResponse: &partialMessage, + } + start = time.Now() } } - responses = append(responses, response) } } diff --git a/pkg/types/completion.go b/pkg/types/completion.go index 6a05effa..2362071f 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -82,7 +82,6 @@ type CompletionStatus struct { Response any Usage Usage Cached bool - Chunks any PartialResponse *CompletionMessage } From bda5f60310b83d7886c05ec7e3b3a960ce0e1db9 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 4 Nov 2024 07:35:04 -0500 Subject: [PATCH 180/270] feat: add ability to pass request-specific env vars to chat completion This will allow authentication per-request in model providers. Signed-off-by: Donnie Adams --- go.mod | 2 +- go.sum | 4 ++-- pkg/context/context.go | 11 ----------- pkg/engine/engine.go | 5 ++--- pkg/llm/proxy.go | 4 ++-- pkg/llm/registry.go | 16 ++++++++-------- pkg/openai/client.go | 39 ++++++++++++++++++++++++++------------ pkg/remote/remote.go | 19 +++++++++---------- pkg/runner/output.go | 2 +- pkg/tests/judge/judge.go | 2 +- pkg/tests/tester/runner.go | 2 +- 11 files changed, 54 insertions(+), 52 deletions(-) diff --git a/go.mod b/go.mod index 4a95a521..5fa1a5c8 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 + github.com/gptscript-ai/chat-completion-client v0.0.0-20241104122544-5fe75f07c131 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 diff --git a/go.sum b/go.sum index 80cbcea1..3661a6c6 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3 h1:EQiFTZv+BnOWJX2B9XdF09fL2Zj7h19n1l23TpWCafc= -github.com/gptscript-ai/chat-completion-client v0.0.0-20240813051153-a440ada7e3c3/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20241104122544-5fe75f07c131 h1:y2FcmT4X8U606gUS0teX5+JWX9K/NclsLEhHiyrd+EU= +github.com/gptscript-ai/chat-completion-client v0.0.0-20241104122544-5fe75f07c131/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= diff --git a/pkg/context/context.go b/pkg/context/context.go index 0169d0e0..31474f6c 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -46,14 +46,3 @@ func GetLogger(ctx context.Context) mvl.Logger { return l } - -type envKey struct{} - -func WithEnv(ctx context.Context, env []string) context.Context { - return context.WithValue(ctx, envKey{}, env) -} - -func GetEnv(ctx context.Context) []string { - l, _ := ctx.Value(envKey{}).([]string) - return l -} diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 0665991c..44ed50bb 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -8,14 +8,13 @@ import ( "sync" "github.com/gptscript-ai/gptscript/pkg/config" - gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/counter" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/gptscript-ai/gptscript/pkg/version" ) type Model interface { - Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) + Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) ProxyInfo() (string, string, error) } @@ -389,7 +388,7 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { } }() - resp, err := e.Model.Call(gcontext.WithEnv(ctx, e.Env), state.Completion, progress) + resp, err := e.Model.Call(ctx, state.Completion, e.Env, progress) if err != nil { return nil, err } diff --git a/pkg/llm/proxy.go b/pkg/llm/proxy.go index 7c3091b3..aa8802be 100644 --- a/pkg/llm/proxy.go +++ b/pkg/llm/proxy.go @@ -54,7 +54,7 @@ func (r *Registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { var ( model string - data = map[string]any{} + data map[string]any ) if json.Unmarshal(inBytes, &data) == nil { @@ -65,7 +65,7 @@ func (r *Registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { model = builtin.GetDefaultModel() } - c, err := r.getClient(req.Context(), model) + c, err := r.getClient(req.Context(), model, nil) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/pkg/llm/registry.go b/pkg/llm/registry.go index 8129c788..09fe1dce 100644 --- a/pkg/llm/registry.go +++ b/pkg/llm/registry.go @@ -15,7 +15,7 @@ import ( ) type Client interface { - Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) + Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) ListModels(ctx context.Context, providers ...string) (result []string, _ error) Supports(ctx context.Context, modelName string) (bool, error) } @@ -78,7 +78,7 @@ func (r *Registry) fastPath(modelName string) Client { return r.clients[0] } -func (r *Registry) getClient(ctx context.Context, modelName string) (Client, error) { +func (r *Registry) getClient(ctx context.Context, modelName string, env []string) (Client, error) { if c := r.fastPath(modelName); c != nil { return c, nil } @@ -101,7 +101,7 @@ func (r *Registry) getClient(ctx context.Context, modelName string) (Client, err if len(errs) > 0 && oaiClient != nil { // Prompt the user to enter their OpenAI API key and try again. - if err := oaiClient.RetrieveAPIKey(ctx); err != nil { + if err := oaiClient.RetrieveAPIKey(ctx, env); err != nil { return nil, err } ok, err := oaiClient.Supports(ctx, modelName) @@ -119,13 +119,13 @@ func (r *Registry) getClient(ctx context.Context, modelName string) (Client, err return nil, errors.Join(errs...) } -func (r *Registry) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { +func (r *Registry) Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { if messageRequest.Model == "" { return nil, fmt.Errorf("model is required") } if c := r.fastPath(messageRequest.Model); c != nil { - return c.Call(ctx, messageRequest, status) + return c.Call(ctx, messageRequest, env, status) } var errs []error @@ -140,20 +140,20 @@ func (r *Registry) Call(ctx context.Context, messageRequest types.CompletionRequ errs = append(errs, err) } else if ok { - return client.Call(ctx, messageRequest, status) + return client.Call(ctx, messageRequest, env, status) } } if len(errs) > 0 && oaiClient != nil { // Prompt the user to enter their OpenAI API key and try again. - if err := oaiClient.RetrieveAPIKey(ctx); err != nil { + if err := oaiClient.RetrieveAPIKey(ctx, env); err != nil { return nil, err } ok, err := oaiClient.Supports(ctx, messageRequest.Model) if err != nil { return nil, err } else if ok { - return oaiClient.Call(ctx, messageRequest, status) + return oaiClient.Call(ctx, messageRequest, env, status) } } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index be5c6253..6178c997 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -13,7 +13,6 @@ import ( openai "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/cache" - gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/counter" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/hash" @@ -303,9 +302,9 @@ func toMessages(request types.CompletionRequest, compat bool) (result []openai.C return } -func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { +func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { if err := c.ValidAuth(); err != nil { - if err := c.RetrieveAPIKey(ctx); err != nil { + if err := c.RetrieveAPIKey(ctx, env); err != nil { return nil, err } } @@ -401,7 +400,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques if err != nil { return nil, err } else if !ok { - result, err = c.call(ctx, request, id, status) + result, err = c.call(ctx, request, id, env, status) // If we got back a context length exceeded error, keep retrying and shrinking the message history until we pass. var apiError *openai.APIError @@ -409,7 +408,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques // Decrease maxTokens by 10% to make garbage collection more aggressive. // The retry loop will further decrease maxTokens if needed. maxTokens := decreaseTenPercent(messageRequest.MaxTokens) - result, err = c.contextLimitRetryLoop(ctx, request, id, maxTokens, status) + result, err = c.contextLimitRetryLoop(ctx, request, id, env, maxTokens, status) } if err != nil { return nil, err @@ -443,7 +442,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return &result, nil } -func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, maxTokens int, status chan<- types.CompletionStatus) (types.CompletionMessage, error) { +func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, env []string, maxTokens int, status chan<- types.CompletionStatus) (types.CompletionMessage, error) { var ( response types.CompletionMessage err error @@ -452,7 +451,7 @@ func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatC for range 10 { // maximum 10 tries // Try to drop older messages again, with a decreased max tokens. request.Messages = dropMessagesOverCount(maxTokens, request.Messages) - response, err = c.call(ctx, request, id, status) + response, err = c.call(ctx, request, id, env, status) if err == nil { return response, nil } @@ -542,7 +541,7 @@ func override(left, right string) string { return left } -func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, transactionID string, partial chan<- types.CompletionStatus) (types.CompletionMessage, error) { +func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, transactionID string, env []string, partial chan<- types.CompletionStatus) (types.CompletionMessage, error) { streamResponse := os.Getenv("GPTSCRIPT_INTERNAL_OPENAI_STREAMING") != "false" partial <- types.CompletionStatus{ @@ -553,11 +552,27 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, }, } + var ( + headers map[string]string + modelProviderEnv []string + ) + for _, e := range env { + if strings.HasPrefix(e, "GPTSCRIPT_MODEL_PROVIDER_") { + modelProviderEnv = append(modelProviderEnv, e) + } + } + + if len(modelProviderEnv) > 0 { + headers = map[string]string{ + "X-GPTScript-Env": strings.Join(modelProviderEnv, ","), + } + } + slog.Debug("calling openai", "message", request.Messages) if !streamResponse { request.StreamOptions = nil - resp, err := c.c.CreateChatCompletion(ctx, request) + resp, err := c.c.CreateChatCompletion(ctx, request, headers) if err != nil { return types.CompletionMessage{}, err } @@ -582,7 +597,7 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, }), nil } - stream, err := c.c.CreateChatCompletionStream(ctx, request) + stream, err := c.c.CreateChatCompletionStream(ctx, request, headers) if err != nil { return types.CompletionMessage{}, err } @@ -614,8 +629,8 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, } } -func (c *Client) RetrieveAPIKey(ctx context.Context) error { - k, err := prompt.GetModelProviderCredential(ctx, c.credStore, BuiltinCredName, "OPENAI_API_KEY", "Please provide your OpenAI API key:", gcontext.GetEnv(ctx)) +func (c *Client) RetrieveAPIKey(ctx context.Context, env []string) error { + k, err := prompt.GetModelProviderCredential(ctx, c.credStore, BuiltinCredName, "OPENAI_API_KEY", "Please provide your OpenAI API key:", env) if err != nil { return err } diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index fa1d40c2..5542372b 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -10,7 +10,6 @@ import ( "sync" "github.com/gptscript-ai/gptscript/pkg/cache" - gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/engine" env2 "github.com/gptscript-ai/gptscript/pkg/env" @@ -42,13 +41,13 @@ func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credent } } -func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { +func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { _, provider := c.parseModel(messageRequest.Model) if provider == "" { return nil, fmt.Errorf("failed to find remote model %s", messageRequest.Model) } - client, err := c.load(ctx, provider) + client, err := c.load(ctx, provider, env...) if err != nil { return nil, err } @@ -60,7 +59,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques modelName = toolName } messageRequest.Model = modelName - return client.Call(ctx, messageRequest, status) + return client.Call(ctx, messageRequest, env, status) } func (c *Client) ListModels(ctx context.Context, providers ...string) (result []string, _ error) { @@ -111,7 +110,7 @@ func isHTTPURL(toolName string) bool { strings.HasPrefix(toolName, "https://") } -func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Client, error) { +func (c *Client) clientFromURL(ctx context.Context, apiURL string, envs []string) (*openai.Client, error) { parsed, err := url.Parse(apiURL) if err != nil { return nil, err @@ -121,7 +120,7 @@ func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Clie if key == "" && !isLocalhost(apiURL) { var err error - key, err = c.retrieveAPIKey(ctx, env, apiURL) + key, err = c.retrieveAPIKey(ctx, env, apiURL, envs) if err != nil { return nil, err } @@ -134,7 +133,7 @@ func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Clie }) } -func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, error) { +func (c *Client) load(ctx context.Context, toolName string, env ...string) (*openai.Client, error) { c.clientsLock.Lock() defer c.clientsLock.Unlock() @@ -144,7 +143,7 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err } if isHTTPURL(toolName) { - remoteClient, err := c.clientFromURL(ctx, toolName) + remoteClient, err := c.clientFromURL(ctx, toolName, env) if err != nil { return nil, err } @@ -183,8 +182,8 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err return oClient, nil } -func (c *Client) retrieveAPIKey(ctx context.Context, env, url string) (string, error) { - return prompt.GetModelProviderCredential(ctx, c.credStore, url, env, fmt.Sprintf("Please provide your API key for %s", url), append(gcontext.GetEnv(ctx), c.envs...)) +func (c *Client) retrieveAPIKey(ctx context.Context, env, url string, envs []string) (string, error) { + return prompt.GetModelProviderCredential(ctx, c.credStore, url, env, fmt.Sprintf("Please provide your API key for %s", url), append(envs, c.envs...)) } func isLocalhost(url string) bool { diff --git a/pkg/runner/output.go b/pkg/runner/output.go index 8a6aefdb..5f1d2818 100644 --- a/pkg/runner/output.go +++ b/pkg/runner/output.go @@ -84,7 +84,7 @@ func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []str if err != nil { return nil, fmt.Errorf("marshaling input for output filter: %w", err) } - res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, outputToolRef.ToolID, string(inputData), "", engine.OutputToolCategory) + res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, outputToolRef.ToolID, inputData, "", engine.OutputToolCategory) if err != nil { return nil, err } diff --git a/pkg/tests/judge/judge.go b/pkg/tests/judge/judge.go index f6581dcc..26464386 100644 --- a/pkg/tests/judge/judge.go +++ b/pkg/tests/judge/judge.go @@ -112,7 +112,7 @@ func (j *Judge[T]) Equal(ctx context.Context, expected, actual T, criteria strin }, }, } - response, err := j.client.CreateChatCompletion(ctx, request) + response, err := j.client.CreateChatCompletion(ctx, request, nil) if err != nil { return false, "", fmt.Errorf("failed to create chat completion request: %w", err) } diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index fa7f7683..1f59ea03 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -35,7 +35,7 @@ func (c *Client) ProxyInfo() (string, string, error) { return "test-auth", "test-url", nil } -func (c *Client) Call(_ context.Context, messageRequest types.CompletionRequest, _ chan<- types.CompletionStatus) (resp *types.CompletionMessage, respErr error) { +func (c *Client) Call(_ context.Context, messageRequest types.CompletionRequest, _ []string, _ chan<- types.CompletionStatus) (resp *types.CompletionMessage, respErr error) { msgData, err := json.MarshalIndent(messageRequest, "", " ") require.NoError(c.t, err) From 4ce687f29ae8804b9b18b0684e0b9b9e51145b62 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 4 Nov 2024 10:49:55 -0500 Subject: [PATCH 181/270] chore: add postgres credential helper (#891) Signed-off-by: Grant Linville --- pkg/config/cliconfig.go | 5 +++-- pkg/credentials/util.go | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index d0ef00c8..215e2f39 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -22,14 +22,15 @@ const ( PassCredHelper = "pass" FileCredHelper = "file" SqliteCredHelper = "sqlite" + PostgresCredHelper = "postgres" GPTScriptHelperPrefix = "gptscript-credential-" ) var ( - darwinHelpers = []string{OsxkeychainCredHelper, FileCredHelper, SqliteCredHelper} + darwinHelpers = []string{OsxkeychainCredHelper, FileCredHelper, SqliteCredHelper, PostgresCredHelper} windowsHelpers = []string{WincredCredHelper, FileCredHelper} - linuxHelpers = []string{SecretserviceCredHelper, PassCredHelper, FileCredHelper, SqliteCredHelper} + linuxHelpers = []string{SecretserviceCredHelper, PassCredHelper, FileCredHelper, SqliteCredHelper, PostgresCredHelper} ) func listAsString(helpers []string) string { diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go index 72f9eab9..2a4ad96b 100644 --- a/pkg/credentials/util.go +++ b/pkg/credentials/util.go @@ -14,8 +14,8 @@ type CredentialHelperDirs struct { func RepoNameForCredentialStore(store string) string { switch store { - case config.SqliteCredHelper: - return "gptscript-credential-sqlite" + case config.SqliteCredHelper, config.PostgresCredHelper: + return "gptscript-credential-database" default: return "gptscript-credential-helpers" } @@ -23,8 +23,8 @@ func RepoNameForCredentialStore(store string) string { func GitURLForRepoName(repoName string) (string, error) { switch repoName { - case "gptscript-credential-sqlite": - return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_SQLITE_ROOT", "https://github.com/gptscript-ai/gptscript-credential-sqlite.git"), nil + case "gptscript-credential-database": + return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_SQLITE_ROOT", "https://github.com/gptscript-ai/gptscript-credential-database.git"), nil case "gptscript-credential-helpers": return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_HELPERS_ROOT", "https://github.com/gptscript-ai/gptscript-credential-helpers.git"), nil default: From 9aeb1cd77754ec77cfe43fe0ddfbfb5939e20eb1 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 4 Nov 2024 22:43:14 -0700 Subject: [PATCH 182/270] chore: make cred stores be tools --- pkg/cli/credential.go | 27 +--- pkg/cli/credential_delete.go | 19 +-- pkg/cli/credential_show.go | 19 +-- pkg/config/cliconfig.go | 72 +++------- pkg/credentials/factory.go | 77 ++++++++++ pkg/credentials/noop.go | 4 +- pkg/credentials/runnerprogram.go | 29 ++++ pkg/credentials/store.go | 82 ++++------- pkg/credentials/{helper.go => toolstore.go} | 18 +-- pkg/credentials/util.go | 49 ------- pkg/engine/cmd.go | 3 + pkg/engine/engine.go | 3 - pkg/gptscript/gptscript.go | 85 ++++++++--- pkg/parser/parser.go | 6 + pkg/repos/get.go | 149 +------------------- pkg/repos/runtimes/golang/golang.go | 13 -- pkg/runner/runner.go | 5 - pkg/runner/runtimemanager.go | 9 -- pkg/sdkserver/credentials.go | 17 +-- pkg/types/tool.go | 4 + 20 files changed, 268 insertions(+), 422 deletions(-) create mode 100644 pkg/credentials/factory.go create mode 100644 pkg/credentials/runnerprogram.go rename pkg/credentials/{helper.go => toolstore.go} (82%) delete mode 100644 pkg/credentials/util.go diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index a46c483b..eaf7665b 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -9,10 +9,7 @@ import ( "time" cmd2 "github.com/gptscript-ai/cmd" - "github.com/gptscript-ai/gptscript/pkg/config" - "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/gptscript" - "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/spf13/cobra" ) @@ -37,33 +34,19 @@ func (c *Credential) Customize(cmd *cobra.Command) { } func (c *Credential) Run(cmd *cobra.Command, _ []string) error { - cfg, err := config.ReadCLIConfig(c.root.ConfigFile) - if err != nil { - return fmt.Errorf("failed to read CLI config: %w", err) - } - opts, err := c.root.NewGPTScriptOpts() if err != nil { return err } - opts = gptscript.Complete(opts) - if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir) - } - - ctxs := opts.CredentialContexts - if c.AllContexts { - ctxs = []string{credentials.AllCredentialContexts} - } - - if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { + gptScript, err := gptscript.New(cmd.Context(), opts) + if err != nil { return err } + defer gptScript.Close(true) - // Initialize the credential store and get all the credentials. - store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, ctxs, opts.Cache.CacheDir) + store, err := gptScript.CredentialStoreFactory.NewStore(gptScript.DefaultCredentialContexts) if err != nil { - return fmt.Errorf("failed to get credentials store: %w", err) + return err } creds, err := store.List(cmd.Context()) diff --git a/pkg/cli/credential_delete.go b/pkg/cli/credential_delete.go index 81392f36..6c43a41b 100644 --- a/pkg/cli/credential_delete.go +++ b/pkg/cli/credential_delete.go @@ -3,10 +3,7 @@ package cli import ( "fmt" - "github.com/gptscript-ai/gptscript/pkg/config" - "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/gptscript" - "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/spf13/cobra" ) @@ -28,23 +25,15 @@ func (c *Delete) Run(cmd *cobra.Command, args []string) error { return err } - cfg, err := config.ReadCLIConfig(c.root.ConfigFile) + gptScript, err := gptscript.New(cmd.Context(), opts) if err != nil { - return fmt.Errorf("failed to read CLI config: %w", err) - } - - opts = gptscript.Complete(opts) - if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir) - } - - if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { return err } + defer gptScript.Close(true) - store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, opts.CredentialContexts, opts.Cache.CacheDir) + store, err := gptScript.CredentialStoreFactory.NewStore(gptScript.DefaultCredentialContexts) if err != nil { - return fmt.Errorf("failed to get credentials store: %w", err) + return err } if err = store.Remove(cmd.Context(), args[0]); err != nil { diff --git a/pkg/cli/credential_show.go b/pkg/cli/credential_show.go index ab2e9cd1..95cb4f11 100644 --- a/pkg/cli/credential_show.go +++ b/pkg/cli/credential_show.go @@ -5,10 +5,7 @@ import ( "os" "text/tabwriter" - "github.com/gptscript-ai/gptscript/pkg/config" - "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/gptscript" - "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/spf13/cobra" ) @@ -30,23 +27,15 @@ func (c *Show) Run(cmd *cobra.Command, args []string) error { return err } - cfg, err := config.ReadCLIConfig(c.root.ConfigFile) + gptScript, err := gptscript.New(cmd.Context(), opts) if err != nil { - return fmt.Errorf("failed to read CLI config: %w", err) - } - - opts = gptscript.Complete(opts) - if opts.Runner.RuntimeManager == nil { - opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir) - } - - if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg); err != nil { return err } + defer gptScript.Close(true) - store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, opts.CredentialContexts, opts.Cache.CacheDir) + store, err := gptScript.CredentialStoreFactory.NewStore(gptScript.DefaultCredentialContexts) if err != nil { - return fmt.Errorf("failed to get credentials store: %w", err) + return err } cred, exists, err := store.Get(cmd.Context(), args[0]) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index 215e2f39..73741ab3 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -3,11 +3,9 @@ package config import ( "encoding/base64" "encoding/json" - "errors" "fmt" "os" "runtime" - "slices" "strings" "sync" @@ -21,28 +19,13 @@ const ( SecretserviceCredHelper = "secretservice" PassCredHelper = "pass" FileCredHelper = "file" - SqliteCredHelper = "sqlite" - PostgresCredHelper = "postgres" - - GPTScriptHelperPrefix = "gptscript-credential-" ) var ( - darwinHelpers = []string{OsxkeychainCredHelper, FileCredHelper, SqliteCredHelper, PostgresCredHelper} - windowsHelpers = []string{WincredCredHelper, FileCredHelper} - linuxHelpers = []string{SecretserviceCredHelper, PassCredHelper, FileCredHelper, SqliteCredHelper, PostgresCredHelper} + // Helpers is a list of all supported credential helpers from github.com/gptscript-ai/gptscript-credential-helpers + Helpers = []string{WincredCredHelper, OsxkeychainCredHelper, SecretserviceCredHelper, PassCredHelper} ) -func listAsString(helpers []string) string { - if len(helpers) == 0 { - return "" - } else if len(helpers) == 1 { - return helpers[0] - } - - return strings.Join(helpers[:len(helpers)-1], ", ") + " or " + helpers[len(helpers)-1] -} - type AuthConfig types.AuthConfig func (a AuthConfig) MarshalJSON() ([]byte, error) { @@ -74,8 +57,8 @@ func (a *AuthConfig) UnmarshalJSON(data []byte) error { type CLIConfig struct { Auths map[string]AuthConfig `json:"auths,omitempty"` CredentialsStore string `json:"credsStore,omitempty"` - Integrations map[string]string `json:"integrations,omitempty"` + raw []byte auths map[string]types.AuthConfig authsLock *sync.Mutex location string @@ -108,7 +91,19 @@ func (c *CLIConfig) Save() error { } c.auths = nil } - data, err := json.Marshal(c) + + // This is to not overwrite additional fields that might be the config file + out := map[string]any{} + if len(c.raw) > 0 { + err := json.Unmarshal(c.raw, &out) + if err != nil { + return err + } + } + out["auths"] = c.Auths + out["credsStore"] = c.CredentialsStore + + data, err := json.Marshal(out) if err != nil { return err } @@ -154,34 +149,22 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { result := &CLIConfig{ authsLock: &sync.Mutex{}, location: gptscriptConfigFile, + raw: data, } if err := json.Unmarshal(data, result); err != nil { return nil, fmt.Errorf("failed to unmarshal %s: %v", gptscriptConfigFile, err) } + if store := os.Getenv("GPTSCRIPT_CREDENTIAL_STORE"); store != "" { + result.CredentialsStore = store + } + if result.CredentialsStore == "" { if err := result.setDefaultCredentialsStore(); err != nil { return nil, err } } - if !isValidCredentialHelper(result.CredentialsStore) { - errMsg := fmt.Sprintf("invalid credential store '%s'", result.CredentialsStore) - switch runtime.GOOS { - case "darwin": - errMsg += fmt.Sprintf(" (use %s)", listAsString(darwinHelpers)) - case "windows": - errMsg += fmt.Sprintf(" (use %s)", listAsString(windowsHelpers)) - case "linux": - errMsg += fmt.Sprintf(" (use %s)", listAsString(linuxHelpers)) - default: - errMsg += " (use file)" - } - errMsg += fmt.Sprintf("\nPlease edit your config file at %s to fix this.", result.location) - - return nil, errors.New(errMsg) - } - return result, nil } @@ -197,19 +180,6 @@ func (c *CLIConfig) setDefaultCredentialsStore() error { return c.Save() } -func isValidCredentialHelper(helper string) bool { - switch runtime.GOOS { - case "darwin": - return slices.Contains(darwinHelpers, helper) - case "windows": - return slices.Contains(windowsHelpers, helper) - case "linux": - return slices.Contains(linuxHelpers, helper) - default: - return helper == FileCredHelper - } -} - func readFile(path string) ([]byte, error) { data, err := os.ReadFile(path) if os.IsNotExist(err) { diff --git a/pkg/credentials/factory.go b/pkg/credentials/factory.go new file mode 100644 index 00000000..ca6f1d18 --- /dev/null +++ b/pkg/credentials/factory.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "context" + + "github.com/docker/docker-credential-helpers/client" + "github.com/gptscript-ai/gptscript/pkg/config" + "github.com/gptscript-ai/gptscript/pkg/types" +) + +type ProgramLoaderRunner interface { + Load(ctx context.Context, toolName string) (prg types.Program, err error) + Run(ctx context.Context, prg types.Program, input string) (output string, err error) +} + +func NewFactory(ctx context.Context, cfg *config.CLIConfig, plr ProgramLoaderRunner) (StoreFactory, error) { + toolName := translateToolName(cfg.CredentialsStore) + if toolName == config.FileCredHelper { + return StoreFactory{ + file: true, + cfg: cfg, + }, nil + } + + prg, err := plr.Load(ctx, toolName) + if err != nil { + return StoreFactory{}, err + } + + return StoreFactory{ + ctx: ctx, + prg: prg, + runner: plr, + cfg: cfg, + }, nil +} + +type StoreFactory struct { + ctx context.Context + prg types.Program + file bool + runner ProgramLoaderRunner + cfg *config.CLIConfig +} + +func (s *StoreFactory) NewStore(credCtxs []string) (CredentialStore, error) { + if err := validateCredentialCtx(credCtxs); err != nil { + return nil, err + } + if s.file { + return Store{ + credCtxs: credCtxs, + cfg: s.cfg, + }, nil + } + return Store{ + credCtxs: credCtxs, + cfg: s.cfg, + program: s.program, + }, nil +} + +func (s *StoreFactory) program(args ...string) client.Program { + return &runnerProgram{ + factory: s, + action: args[0], + } +} + +func translateToolName(toolName string) string { + for _, helper := range config.Helpers { + if helper == toolName { + return "github.com/gptscript-ai/gptscript-credential-helpers/" + toolName + "/cmd" + } + } + return toolName +} diff --git a/pkg/credentials/noop.go b/pkg/credentials/noop.go index 3a13b907..414f8a12 100644 --- a/pkg/credentials/noop.go +++ b/pkg/credentials/noop.go @@ -1,6 +1,8 @@ package credentials -import "context" +import ( + "context" +) type NoopStore struct{} diff --git a/pkg/credentials/runnerprogram.go b/pkg/credentials/runnerprogram.go new file mode 100644 index 00000000..4ae123a0 --- /dev/null +++ b/pkg/credentials/runnerprogram.go @@ -0,0 +1,29 @@ +package credentials + +import ( + "io" +) + +type runnerProgram struct { + factory *StoreFactory + action string + output string + err error +} + +func (r *runnerProgram) Output() ([]byte, error) { + return []byte(r.output), r.err +} + +func (r *runnerProgram) Input(in io.Reader) { + input, err := io.ReadAll(in) + if err != nil { + r.err = err + return + } + + prg := r.factory.prg + prg.EntryToolID = prg.ToolSet[prg.EntryToolID].LocalTools[r.action] + + r.output, r.err = r.factory.runner.Run(r.factory.ctx, prg, string(input)) +} diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 1843cd8d..56555cd4 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -3,13 +3,12 @@ package credentials import ( "context" "fmt" - "path/filepath" "regexp" "slices" - "strings" "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" credentials2 "github.com/docker/docker-credential-helpers/credentials" "github.com/gptscript-ai/gptscript/pkg/config" "golang.org/x/exp/maps" @@ -20,10 +19,6 @@ const ( AllCredentialContexts = "*" ) -type CredentialBuilder interface { - EnsureCredentialHelpers(ctx context.Context) error -} - type CredentialStore interface { Get(ctx context.Context, toolName string) (*Credential, bool, error) Add(ctx context.Context, cred Credential) error @@ -33,30 +28,17 @@ type CredentialStore interface { } type Store struct { - credCtxs []string - credBuilder CredentialBuilder - credHelperDirs CredentialHelperDirs - cfg *config.CLIConfig + credCtxs []string + cfg *config.CLIConfig + program client.ProgramFunc } -func NewStore(cfg *config.CLIConfig, credentialBuilder CredentialBuilder, credCtxs []string, cacheDir string) (CredentialStore, error) { - if err := validateCredentialCtx(credCtxs); err != nil { - return nil, err - } - return Store{ - credCtxs: credCtxs, - credBuilder: credentialBuilder, - credHelperDirs: GetCredentialHelperDirs(cacheDir, cfg.CredentialsStore), - cfg: cfg, - }, nil -} - -func (s Store) Get(ctx context.Context, toolName string) (*Credential, bool, error) { - if first(s.credCtxs) == AllCredentialContexts { +func (s Store) Get(_ context.Context, toolName string) (*Credential, bool, error) { + if len(s.credCtxs) > 0 && s.credCtxs[0] == AllCredentialContexts { return nil, false, fmt.Errorf("cannot get a credential with context %q", AllCredentialContexts) } - store, err := s.getStore(ctx) + store, err := s.getStore() if err != nil { return nil, false, err } @@ -99,14 +81,14 @@ func (s Store) Get(ctx context.Context, toolName string) (*Credential, bool, err // Add adds a new credential to the credential store. // Any context set on the credential object will be overwritten with the first context of the credential store. -func (s Store) Add(ctx context.Context, cred Credential) error { +func (s Store) Add(_ context.Context, cred Credential) error { first := first(s.credCtxs) if first == AllCredentialContexts { return fmt.Errorf("cannot add a credential with context %q", AllCredentialContexts) } cred.Context = first - store, err := s.getStore(ctx) + store, err := s.getStore() if err != nil { return err } @@ -118,12 +100,12 @@ func (s Store) Add(ctx context.Context, cred Credential) error { } // Refresh updates an existing credential in the credential store. -func (s Store) Refresh(ctx context.Context, cred Credential) error { +func (s Store) Refresh(_ context.Context, cred Credential) error { if !slices.Contains(s.credCtxs, cred.Context) { return fmt.Errorf("context %q not in list of valid contexts for this credential store", cred.Context) } - store, err := s.getStore(ctx) + store, err := s.getStore() if err != nil { return err } @@ -134,13 +116,13 @@ func (s Store) Refresh(ctx context.Context, cred Credential) error { return store.Store(auth) } -func (s Store) Remove(ctx context.Context, toolName string) error { +func (s Store) Remove(_ context.Context, toolName string) error { first := first(s.credCtxs) if len(s.credCtxs) > 1 || first == AllCredentialContexts { return fmt.Errorf("error: credential deletion is not supported when multiple credential contexts are provided") } - store, err := s.getStore(ctx) + store, err := s.getStore() if err != nil { return err } @@ -148,8 +130,8 @@ func (s Store) Remove(ctx context.Context, toolName string) error { return store.Erase(toolNameWithCtx(toolName, first)) } -func (s Store) List(ctx context.Context) ([]Credential, error) { - store, err := s.getStore(ctx) +func (s Store) List(_ context.Context) ([]Credential, error) { + store, err := s.getStore() if err != nil { return nil, err } @@ -179,7 +161,7 @@ func (s Store) List(ctx context.Context) ([]Credential, error) { } } - if first(s.credCtxs) == AllCredentialContexts { + if len(s.credCtxs) > 0 && s.credCtxs[0] == AllCredentialContexts { return allCreds, nil } @@ -194,25 +176,14 @@ func (s Store) List(ctx context.Context) ([]Credential, error) { return maps.Values(credsByName), nil } -func (s *Store) getStore(ctx context.Context) (credentials.Store, error) { - return s.getStoreByHelper(ctx, config.GPTScriptHelperPrefix+s.cfg.CredentialsStore) -} - -func (s *Store) getStoreByHelper(ctx context.Context, helper string) (credentials.Store, error) { - if helper == "" || helper == config.GPTScriptHelperPrefix+config.FileCredHelper { - return credentials.NewFileStore(s.cfg), nil +func (s *Store) getStore() (credentials.Store, error) { + if s.program != nil { + return &toolCredentialStore{ + file: credentials.NewFileStore(s.cfg), + program: s.program, + }, nil } - - // If the helper is referencing one of the credential helper programs, then reference the full path. - if strings.HasPrefix(helper, "gptscript-credential-") { - if err := s.credBuilder.EnsureCredentialHelpers(ctx); err != nil { - return nil, err - } - - helper = filepath.Join(s.credHelperDirs.BinDir, helper) - } - - return NewHelper(s.cfg, helper) + return credentials.NewFileStore(s.cfg), nil } func validateCredentialCtx(ctxs []string) error { @@ -234,3 +205,10 @@ func validateCredentialCtx(ctxs []string) error { return nil } + +func first(s []string) string { + if len(s) == 0 { + return "" + } + return s[0] +} diff --git a/pkg/credentials/helper.go b/pkg/credentials/toolstore.go similarity index 82% rename from pkg/credentials/helper.go rename to pkg/credentials/toolstore.go index e5cd34f6..3e31ea12 100644 --- a/pkg/credentials/helper.go +++ b/pkg/credentials/toolstore.go @@ -10,22 +10,14 @@ import ( "github.com/docker/cli/cli/config/types" "github.com/docker/docker-credential-helpers/client" credentials2 "github.com/docker/docker-credential-helpers/credentials" - "github.com/gptscript-ai/gptscript/pkg/config" ) -func NewHelper(c *config.CLIConfig, helper string) (credentials.Store, error) { - return &HelperStore{ - file: credentials.NewFileStore(c), - program: client.NewShellProgramFunc(helper), - }, nil -} - -type HelperStore struct { +type toolCredentialStore struct { file credentials.Store program client.ProgramFunc } -func (h *HelperStore) Erase(serverAddress string) error { +func (h *toolCredentialStore) Erase(serverAddress string) error { var errs []error if err := client.Erase(h.program, serverAddress); err != nil { errs = append(errs, err) @@ -36,7 +28,7 @@ func (h *HelperStore) Erase(serverAddress string) error { return errors.Join(errs...) } -func (h *HelperStore) Get(serverAddress string) (types.AuthConfig, error) { +func (h *toolCredentialStore) Get(serverAddress string) (types.AuthConfig, error) { creds, err := client.Get(h.program, serverAddress) if credentials2.IsErrCredentialsNotFound(err) { return h.file.Get(serverAddress) @@ -50,7 +42,7 @@ func (h *HelperStore) Get(serverAddress string) (types.AuthConfig, error) { }, nil } -func (h *HelperStore) GetAll() (map[string]types.AuthConfig, error) { +func (h *toolCredentialStore) GetAll() (map[string]types.AuthConfig, error) { result := map[string]types.AuthConfig{} serverAddresses, err := client.List(h.program) @@ -103,7 +95,7 @@ func (h *HelperStore) GetAll() (map[string]types.AuthConfig, error) { return result, nil } -func (h *HelperStore) Store(authConfig types.AuthConfig) error { +func (h *toolCredentialStore) Store(authConfig types.AuthConfig) error { return client.Store(h.program, &credentials2.Credentials{ ServerURL: authConfig.ServerAddress, Username: authConfig.Username, diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go deleted file mode 100644 index 2a4ad96b..00000000 --- a/pkg/credentials/util.go +++ /dev/null @@ -1,49 +0,0 @@ -package credentials - -import ( - "fmt" - "path/filepath" - - "github.com/gptscript-ai/gptscript/pkg/config" - runtimeEnv "github.com/gptscript-ai/gptscript/pkg/env" -) - -type CredentialHelperDirs struct { - RevisionFile, LastCheckedFile, BinDir string -} - -func RepoNameForCredentialStore(store string) string { - switch store { - case config.SqliteCredHelper, config.PostgresCredHelper: - return "gptscript-credential-database" - default: - return "gptscript-credential-helpers" - } -} - -func GitURLForRepoName(repoName string) (string, error) { - switch repoName { - case "gptscript-credential-database": - return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_SQLITE_ROOT", "https://github.com/gptscript-ai/gptscript-credential-database.git"), nil - case "gptscript-credential-helpers": - return runtimeEnv.VarOrDefault("GPTSCRIPT_CRED_HELPERS_ROOT", "https://github.com/gptscript-ai/gptscript-credential-helpers.git"), nil - default: - return "", fmt.Errorf("unknown repo name: %s", repoName) - } -} - -func GetCredentialHelperDirs(cacheDir, store string) CredentialHelperDirs { - repoName := RepoNameForCredentialStore(store) - return CredentialHelperDirs{ - RevisionFile: filepath.Join(cacheDir, "repos", repoName, "revision"), - LastCheckedFile: filepath.Join(cacheDir, "repos", repoName, "last-checked"), - BinDir: filepath.Join(cacheDir, "repos", repoName, "bin"), - } -} - -func first(s []string) string { - if len(s) == 0 { - return "" - } - return s[0] -} diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index a4f6d3ed..b0c1ab4b 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -149,6 +149,9 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate result *bytes.Buffer ) + if tool.Stdin { + cmd.Stdin = strings.NewReader(input) + } cmd.Stdout = io.MultiWriter(stdout, stdoutAndErr, progressOut) cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut, os.Stderr) result = stdout diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 44ed50bb..7232157e 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -7,7 +7,6 @@ import ( "strings" "sync" - "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/counter" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/gptscript-ai/gptscript/pkg/version" @@ -20,8 +19,6 @@ type Model interface { type RuntimeManager interface { GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) - EnsureCredentialHelpers(ctx context.Context) error - SetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig) error } type Engine struct { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index df4b0792..63a67647 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -2,6 +2,7 @@ package gptscript import ( "context" + "errors" "fmt" "os" "os/user" @@ -16,6 +17,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/llm" + "github.com/gptscript-ai/gptscript/pkg/loader" "github.com/gptscript-ai/gptscript/pkg/monitor" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/openai" @@ -32,13 +34,15 @@ import ( var log = mvl.Package() type GPTScript struct { - Registry *llm.Registry - Runner *runner.Runner - Cache *cache.Client - WorkspacePath string - DeleteWorkspaceOnClose bool - ExtraEnv []string - close func() + Registry *llm.Registry + Runner *runner.Runner + Cache *cache.Client + CredentialStoreFactory credentials.StoreFactory + DefaultCredentialContexts []string + WorkspacePath string + DeleteWorkspaceOnClose bool + ExtraEnv []string + close func() } type Options struct { @@ -103,11 +107,17 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { opts.Runner.RuntimeManager = runtimes.Default(cacheClient.CacheDir(), opts.SystemToolsDir) } - if err := opts.Runner.RuntimeManager.SetUpCredentialHelpers(context.Background(), cliCfg); err != nil { + simplerRunner, err := newSimpleRunner(cacheClient, opts.Runner.RuntimeManager, opts.Env) + if err != nil { + return nil, err + } + + storeFactory, err := credentials.NewFactory(ctx, cliCfg, simplerRunner) + if err != nil { return nil, err } - credStore, err := credentials.NewStore(cliCfg, opts.Runner.RuntimeManager, opts.CredentialContexts, cacheClient.CacheDir()) + credStore, err := storeFactory.NewStore(opts.CredentialContexts) if err != nil { return nil, err } @@ -158,13 +168,15 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { } return &GPTScript{ - Registry: registry, - Runner: runner, - Cache: cacheClient, - WorkspacePath: opts.Workspace, - DeleteWorkspaceOnClose: opts.Workspace == "", - ExtraEnv: extraEnv, - close: closeServer, + Registry: registry, + Runner: runner, + Cache: cacheClient, + CredentialStoreFactory: storeFactory, + DefaultCredentialContexts: opts.CredentialContexts, + WorkspacePath: opts.Workspace, + DeleteWorkspaceOnClose: opts.Workspace == "", + ExtraEnv: extraEnv, + close: closeServer, }, nil } @@ -266,3 +278,44 @@ func (g *GPTScript) ListTools(_ context.Context, prg types.Program) []types.Tool func (g *GPTScript) ListModels(ctx context.Context, providers ...string) ([]string, error) { return g.Registry.ListModels(ctx, providers...) } + +type simpleRunner struct { + cache *cache.Client + runner *runner.Runner + env []string +} + +func newSimpleRunner(cache *cache.Client, rm engine.RuntimeManager, env []string) (*simpleRunner, error) { + runner, err := runner.New(noopModel{}, credentials.NoopStore{}, runner.Options{ + RuntimeManager: rm, + }) + if err != nil { + return nil, err + } + return &simpleRunner{ + cache: cache, + runner: runner, + env: env, + }, nil +} + +func (s *simpleRunner) Load(ctx context.Context, toolName string) (prg types.Program, err error) { + return loader.Program(ctx, toolName, "", loader.Options{ + Cache: s.cache, + }) +} + +func (s *simpleRunner) Run(ctx context.Context, prg types.Program, input string) (output string, err error) { + return s.runner.Run(ctx, prg, s.env, input) +} + +type noopModel struct { +} + +func (n noopModel) Call(_ context.Context, _ types.CompletionRequest, _ []string, _ chan<- types.CompletionStatus) (*types.CompletionMessage, error) { + return nil, errors.New("unsupported") +} + +func (n noopModel) ProxyInfo() (string, string, error) { + return "", "", errors.New("unsupported") +} diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index c0beb8f2..626056a7 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -124,6 +124,12 @@ func isParam(line string, tool *types.Tool, scan *simplescanner) (_ bool, err er tool.Parameters.ExportContext = append(tool.Parameters.ExportContext, csv(scan.AddMultiline(value))...) case "context": tool.Parameters.Context = append(tool.Parameters.Context, csv(scan.AddMultiline(value))...) + case "stdin": + b, err := toBool(value) + if err != nil { + return false, err + } + tool.Parameters.Stdin = b case "metadata": mkey, mvalue, _ := strings.Cut(scan.AddMultiline(value), ":") if tool.MetaData == nil { diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 0a50ce15..d77fb526 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -4,22 +4,15 @@ import ( "context" "encoding/json" "errors" - "fmt" "io/fs" "os" "path/filepath" "regexp" - "runtime" "strings" - "sync" - "time" "github.com/BurntSushi/locker" - "github.com/gptscript-ai/gptscript/pkg/config" - "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/repos/git" - "github.com/gptscript-ai/gptscript/pkg/repos/runtimes/golang" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -55,19 +48,12 @@ func (n noopRuntime) Setup(_ context.Context, _ types.Tool, _, _ string, _ []str } type Manager struct { - cacheDir string - storageDir string - gitDir string - runtimeDir string - systemDirs []string - runtimes []Runtime - credHelperConfig *credHelperConfig -} - -type credHelperConfig struct { - lock sync.Mutex - initialized bool - cliCfg *config.CLIConfig + cacheDir string + storageDir string + gitDir string + runtimeDir string + systemDirs []string + runtimes []Runtime } func New(cacheDir, systemDir string, runtimes ...Runtime) *Manager { @@ -90,129 +76,6 @@ func New(cacheDir, systemDir string, runtimes ...Runtime) *Manager { } } -func (m *Manager) EnsureCredentialHelpers(ctx context.Context) error { - if m.credHelperConfig == nil { - return nil - } - m.credHelperConfig.lock.Lock() - defer m.credHelperConfig.lock.Unlock() - - if !m.credHelperConfig.initialized { - if err := m.deferredSetUpCredentialHelpers(ctx, m.credHelperConfig.cliCfg); err != nil { - return err - } - m.credHelperConfig.initialized = true - } - - return nil -} - -func (m *Manager) SetUpCredentialHelpers(_ context.Context, cliCfg *config.CLIConfig) error { - m.credHelperConfig = &credHelperConfig{ - cliCfg: cliCfg, - } - return nil -} - -func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *config.CLIConfig) error { - var ( - helperName = cliCfg.CredentialsStore - distInfo, suffix string - ) - // The file helper is built-in and does not need to be downloaded. - if helperName == config.FileCredHelper { - return nil - } - switch helperName { - case config.WincredCredHelper: - suffix = ".exe" - default: - distInfo = fmt.Sprintf("-%s-%s", runtime.GOOS, runtime.GOARCH) - } - - repoName := credentials.RepoNameForCredentialStore(helperName) - - locker.Lock(repoName) - defer locker.Unlock(repoName) - - credHelperDirs := credentials.GetCredentialHelperDirs(m.cacheDir, helperName) - - // Load the last-checked file to make sure we haven't checked the repo in the last 24 hours. - now := time.Now() - lastChecked, err := os.ReadFile(credHelperDirs.LastCheckedFile) - if err == nil { - if t, err := time.Parse(time.RFC3339, strings.TrimSpace(string(lastChecked))); err == nil && now.Sub(t) < 24*time.Hour { - // Make sure the binary still exists, and if it does, return. - if _, err := os.Stat(filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { - log.Debugf("Credential helper %s up-to-date as of %v, checking for updates after %v", helperName, t, t.Add(24*time.Hour)) - return nil - } - } - } - - if err := os.MkdirAll(filepath.Dir(credHelperDirs.LastCheckedFile), 0755); err != nil { - return err - } - - // Update the last-checked file. - if err := os.WriteFile(credHelperDirs.LastCheckedFile, []byte(now.Format(time.RFC3339)), 0644); err != nil { - return err - } - - gitURL, err := credentials.GitURLForRepoName(repoName) - if err != nil { - return err - } - - tool := types.Tool{ - ToolDef: types.ToolDef{ - Parameters: types.Parameters{ - Name: repoName, - }, - }, - Source: types.ToolSource{ - Repo: &types.Repo{ - Root: gitURL, - }, - }, - } - tag, err := golang.GetLatestTag(tool) - if err != nil { - return err - } - - var needsDownloaded bool - // Check the last revision shasum and see if it is different from the current one. - lastRevision, err := os.ReadFile(credHelperDirs.RevisionFile) - if (err == nil && strings.TrimSpace(string(lastRevision)) != tool.Source.Repo.Root+tag) || errors.Is(err, fs.ErrNotExist) { - // Need to pull the latest version. - needsDownloaded = true - // Update the revision file to the new revision. - if err = os.WriteFile(credHelperDirs.RevisionFile, []byte(tool.Source.Repo.Root+tag), 0644); err != nil { - return err - } - } else if err != nil { - return err - } - - if !needsDownloaded { - // Check for the existence of the credential helper binary. - // If it's there, we have no need to download it and can just return. - if _, err = os.Stat(filepath.Join(credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { - return nil - } - } - - // Find the Go runtime and use it to build the credential helper. - for _, rt := range m.runtimes { - if strings.HasPrefix(rt.ID(), "go") { - return rt.(*golang.Runtime).DownloadCredentialHelper(ctx, tool, helperName, distInfo, suffix, credHelperDirs.BinDir) - } - } - - return fmt.Errorf("no Go runtime found to build the credential helper") -} - func (m *Manager) setup(ctx context.Context, runtime Runtime, tool types.Tool, env []string) (string, []string, error) { locker.Lock(tool.ID) defer locker.Unlock(tool.ID) diff --git a/pkg/repos/runtimes/golang/golang.go b/pkg/repos/runtimes/golang/golang.go index 23c12f1a..5cba4779 100644 --- a/pkg/repos/runtimes/golang/golang.go +++ b/pkg/repos/runtimes/golang/golang.go @@ -100,19 +100,6 @@ type tag struct { } `json:"commit"` } -func GetLatestTag(tool types.Tool) (string, error) { - r, ok, err := getLatestRelease(tool) - if err != nil { - return "", err - } - - if !ok { - return "", fmt.Errorf("failed to get latest release for %s", tool.Name) - } - - return r.label, nil -} - func getLatestRelease(tool types.Tool) (*release, bool, error) { if tool.Source.Repo == nil || !strings.HasPrefix(tool.Source.Repo.Root, "https://github.com/") { return nil, false, nil diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 18bc1bc4..37a90d48 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -862,11 +862,6 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env refresh bool ) - rm := runtimeWithLogger(callCtx, monitor, r.runtimeManager) - if err := rm.EnsureCredentialHelpers(callCtx.Ctx); err != nil { - return nil, fmt.Errorf("failed to setup credential helpers: %w", err) - } - // Only try to look up the cred if the tool is on GitHub or has an alias. // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. if isGitHubTool(toolName) && credentialAlias == "" { diff --git a/pkg/runner/runtimemanager.go b/pkg/runner/runtimemanager.go index ed191d15..1c293215 100644 --- a/pkg/runner/runtimemanager.go +++ b/pkg/runner/runtimemanager.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/types" @@ -40,11 +39,3 @@ func (r runtimeManagerLogger) Infof(msg string, args ...any) { func (r runtimeManagerLogger) GetContext(ctx context.Context, tool types.Tool, cmd, env []string) (string, []string, error) { return r.rm.GetContext(mvl.WithInfo(ctx, r), tool, cmd, env) } - -func (r runtimeManagerLogger) EnsureCredentialHelpers(ctx context.Context) error { - return r.rm.EnsureCredentialHelpers(mvl.WithInfo(ctx, r)) -} - -func (r runtimeManagerLogger) SetUpCredentialHelpers(_ context.Context, _ *config.CLIConfig) error { - panic("not implemented") -} diff --git a/pkg/sdkserver/credentials.go b/pkg/sdkserver/credentials.go index b0246621..2b527b2b 100644 --- a/pkg/sdkserver/credentials.go +++ b/pkg/sdkserver/credentials.go @@ -7,25 +7,12 @@ import ( "net/http" "slices" - "github.com/gptscript-ai/gptscript/pkg/config" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/credentials" ) -func (s *server) initializeCredentialStore(ctx context.Context, credCtxs []string) (credentials.CredentialStore, error) { - cfg, err := config.ReadCLIConfig(s.gptscriptOpts.OpenAI.ConfigFile) - if err != nil { - return nil, fmt.Errorf("failed to read CLI config: %w", err) - } - - if err := s.runtimeManager.SetUpCredentialHelpers(ctx, cfg); err != nil { - return nil, fmt.Errorf("failed to set up credential helpers: %w", err) - } - if err := s.runtimeManager.EnsureCredentialHelpers(ctx); err != nil { - return nil, fmt.Errorf("failed to ensure credential helpers: %w", err) - } - - store, err := credentials.NewStore(cfg, s.runtimeManager, credCtxs, s.gptscriptOpts.Cache.CacheDir) +func (s *server) initializeCredentialStore(_ context.Context, credCtxs []string) (credentials.CredentialStore, error) { + store, err := s.client.CredentialStoreFactory.NewStore(credCtxs) if err != nil { return nil, fmt.Errorf("failed to initialize credential store: %w", err) } diff --git a/pkg/types/tool.go b/pkg/types/tool.go index cefbd311..e6cbf37f 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -141,6 +141,7 @@ type Parameters struct { OutputFilters []string `json:"outputFilters,omitempty"` ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` Blocking bool `json:"-"` + Stdin bool `json:"stdin,omitempty"` Type ToolType `json:"type,omitempty"` } @@ -445,6 +446,9 @@ func (t ToolDef) String() string { if t.Parameters.Cache != nil && !*t.Parameters.Cache { _, _ = fmt.Fprintln(buf, "Cache: false") } + if t.Parameters.Stdin { + _, _ = fmt.Fprintln(buf, "Stdin: true") + } if t.Parameters.Temperature != nil { _, _ = fmt.Fprintf(buf, "Temperature: %f\n", *t.Parameters.Temperature) } From 2a9f664b71f9ee021d7585b12b633aa49da03474 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 5 Nov 2024 11:00:36 -0700 Subject: [PATCH 183/270] chore: add simple logging for cred stores --- pkg/gptscript/gptscript.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 63a67647..65aa2209 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -288,6 +288,7 @@ type simpleRunner struct { func newSimpleRunner(cache *cache.Client, rm engine.RuntimeManager, env []string) (*simpleRunner, error) { runner, err := runner.New(noopModel{}, credentials.NoopStore{}, runner.Options{ RuntimeManager: rm, + MonitorFactory: simpleMonitorFactory{}, }) if err != nil { return nil, err @@ -319,3 +320,33 @@ func (n noopModel) Call(_ context.Context, _ types.CompletionRequest, _ []string func (n noopModel) ProxyInfo() (string, string, error) { return "", "", errors.New("unsupported") } + +type simpleMonitorFactory struct { +} + +func (s simpleMonitorFactory) Start(_ context.Context, _ *types.Program, _ []string, _ string) (runner.Monitor, error) { + return simpleMonitor{}, nil +} + +func (s simpleMonitorFactory) Pause() func() { + //TODO implement me + panic("implement me") +} + +type simpleMonitor struct { +} + +func (s simpleMonitor) Stop(_ context.Context, _ string, _ error) { +} + +func (s simpleMonitor) Event(event runner.Event) { + if event.Type == runner.EventTypeCallProgress { + if !strings.HasPrefix(event.Content, "{") { + fmt.Println(event.Content) + } + } +} + +func (s simpleMonitor) Pause() func() { + return func() {} +} From d21c001097efae157b7a5e2d8133aa1b41590b70 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 5 Nov 2024 19:59:14 -0500 Subject: [PATCH 184/270] chore: allow setting of dataset tool in SDK server config Signed-off-by: Donnie Adams --- pkg/cli/sdk_server.go | 2 ++ pkg/sdkserver/datasets.go | 27 ++++++++++++++------------- pkg/sdkserver/routes.go | 10 +++++----- pkg/sdkserver/server.go | 13 +++++++++---- 4 files changed, 30 insertions(+), 22 deletions(-) diff --git a/pkg/cli/sdk_server.go b/pkg/cli/sdk_server.go index 5ce65305..42f0f949 100644 --- a/pkg/cli/sdk_server.go +++ b/pkg/cli/sdk_server.go @@ -11,6 +11,7 @@ import ( type SDKServer struct { *GPTScript + DatasetTool string `usage:"Tool to use for datasets"` WorkspaceTool string `usage:"Tool to use for workspace"` } @@ -38,6 +39,7 @@ func (c *SDKServer) Run(cmd *cobra.Command, _ []string) error { Options: opts, ListenAddress: c.ListenAddress, Debug: c.Debug, + DatasetTool: c.DatasetTool, WorkspaceTool: c.WorkspaceTool, }) } diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index 1a547953..5db90bf7 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -10,6 +10,14 @@ import ( "github.com/gptscript-ai/gptscript/pkg/loader" ) +func (s *server) getDatasetTool(req datasetRequest) string { + if req.DatasetToolRepo != "" { + return req.DatasetToolRepo + } + + return s.datasetTool +} + type datasetRequest struct { Input string `json:"input"` WorkspaceID string `json:"workspaceID"` @@ -38,13 +46,6 @@ func (r datasetRequest) opts(o gptscript.Options) gptscript.Options { return opts } -func (r datasetRequest) getToolRepo() string { - if r.DatasetToolRepo != "" { - return r.DatasetToolRepo - } - return "github.com/otto8-ai/datasets" -} - func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) @@ -65,7 +66,7 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "List Datasets", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "List Datasets", loader.Options{ Cache: g.Cache, }) @@ -126,7 +127,7 @@ func (s *server) createDataset(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "Create Dataset", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Create Dataset", loader.Options{ Cache: g.Cache, }) @@ -195,7 +196,7 @@ func (s *server) addDatasetElement(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "Add Element", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Add Element", loader.Options{ Cache: g.Cache, }) if err != nil { @@ -262,7 +263,7 @@ func (s *server) addDatasetElements(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "Add Elements", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Add Elements", loader.Options{ Cache: g.Cache, }) if err != nil { @@ -327,7 +328,7 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "List Elements", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "List Elements", loader.Options{ Cache: g.Cache, }) if err != nil { @@ -390,7 +391,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), req.getToolRepo(), "Get Element SDK", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Get Element SDK", loader.Options{ Cache: g.Cache, }) if err != nil { diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index e9b1cca8..ea7fdb09 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -26,11 +26,11 @@ import ( ) type server struct { - gptscriptOpts gptscript.Options - address, token string - workspaceTool string - client *gptscript.GPTScript - events *broadcaster.Broadcaster[event] + gptscriptOpts gptscript.Options + address, token string + datasetTool, workspaceTool string + client *gptscript.GPTScript + events *broadcaster.Broadcaster[event] runtimeManager engine.RuntimeManager diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index f0c61940..7d98ae60 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -26,10 +26,10 @@ import ( type Options struct { gptscript.Options - ListenAddress string - WorkspaceTool string - Debug bool - DisableServerErrorLogging bool + ListenAddress string + DatasetTool, WorkspaceTool string + Debug bool + DisableServerErrorLogging bool } // Run will start the server and block until the server is shut down. @@ -108,6 +108,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { gptscriptOpts: opts.Options, address: listener.Addr().String(), token: token, + datasetTool: opts.DatasetTool, workspaceTool: opts.WorkspaceTool, client: g, events: events, @@ -159,6 +160,7 @@ func complete(opts ...Options) Options { for _, opt := range opts { result.Options = gptscript.Complete(result.Options, opt.Options) result.ListenAddress = types.FirstSet(opt.ListenAddress, result.ListenAddress) + result.DatasetTool = types.FirstSet(opt.DatasetTool, result.DatasetTool) result.WorkspaceTool = types.FirstSet(opt.WorkspaceTool, result.WorkspaceTool) result.Debug = types.FirstSet(opt.Debug, result.Debug) result.DisableServerErrorLogging = types.FirstSet(opt.DisableServerErrorLogging, result.DisableServerErrorLogging) @@ -171,6 +173,9 @@ func complete(opts ...Options) Options { if result.WorkspaceTool == "" { result.WorkspaceTool = "github.com/gptscript-ai/workspace-provider" } + if result.DatasetTool == "" { + result.DatasetTool = "github.com/otto8-ai/datasets" + } return result } From 1c2d1de86a398e6883a459b9a6ae7b1b3f1b05d5 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 4 Nov 2024 15:05:20 -0500 Subject: [PATCH 185/270] chore: sdkserver: update dataset methods for the rewrite Signed-off-by: Grant Linville --- pkg/sdkserver/datasets.go | 186 +++++--------------------------------- pkg/sdkserver/routes.go | 2 - 2 files changed, 23 insertions(+), 165 deletions(-) diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index 5db90bf7..e922cd97 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -11,24 +11,21 @@ import ( ) func (s *server) getDatasetTool(req datasetRequest) string { - if req.DatasetToolRepo != "" { - return req.DatasetToolRepo + if req.DatasetTool != "" { + return req.DatasetTool } return s.datasetTool } type datasetRequest struct { - Input string `json:"input"` - WorkspaceID string `json:"workspaceID"` - DatasetToolRepo string `json:"datasetToolRepo"` - Env []string `json:"env"` + Input string `json:"input"` + DatasetTool string `json:"datasetTool"` + Env []string `json:"env"` } func (r datasetRequest) validate(requireInput bool) error { - if r.WorkspaceID == "" { - return fmt.Errorf("workspaceID is required") - } else if requireInput && r.Input == "" { + if requireInput && r.Input == "" { return fmt.Errorf("input is required") } else if len(r.Env) == 0 { return fmt.Errorf("env is required") @@ -38,10 +35,9 @@ func (r datasetRequest) validate(requireInput bool) error { func (r datasetRequest) opts(o gptscript.Options) gptscript.Options { opts := gptscript.Options{ - Cache: o.Cache, - Monitor: o.Monitor, - Runner: o.Runner, - Workspace: r.WorkspaceID, + Cache: o.Cache, + Monitor: o.Monitor, + Runner: o.Runner, } return opts } @@ -84,148 +80,19 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { writeResponse(logger, w, map[string]any{"stdout": result}) } -type createDatasetArgs struct { - Name string `json:"datasetName"` - Description string `json:"datasetDescription"` -} - -func (a createDatasetArgs) validate() error { - if a.Name == "" { - return fmt.Errorf("datasetName is required") - } - return nil -} - -func (s *server) createDataset(w http.ResponseWriter, r *http.Request) { - logger := gcontext.GetLogger(r.Context()) - - var req datasetRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) - return - } - - if err := req.validate(true); err != nil { - writeError(logger, w, http.StatusBadRequest, err) - return - } - - g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) - return - } - - var args createDatasetArgs - if err := json.Unmarshal([]byte(req.Input), &args); err != nil { - writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) - return - } - - if err := args.validate(); err != nil { - writeError(logger, w, http.StatusBadRequest, err) - return - } - - prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Create Dataset", loader.Options{ - Cache: g.Cache, - }) - - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) - return - } - - result, err := g.Run(r.Context(), prg, req.Env, req.Input) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) - return - } - - writeResponse(logger, w, map[string]any{"stdout": result}) -} - -type addDatasetElementArgs struct { - DatasetID string `json:"datasetID"` - ElementName string `json:"elementName"` - ElementDescription string `json:"elementDescription"` - ElementContent string `json:"elementContent"` -} - -func (a addDatasetElementArgs) validate() error { - if a.DatasetID == "" { - return fmt.Errorf("datasetID is required") - } - if a.ElementName == "" { - return fmt.Errorf("elementName is required") - } - if a.ElementContent == "" { - return fmt.Errorf("elementContent is required") - } - return nil -} - -func (s *server) addDatasetElement(w http.ResponseWriter, r *http.Request) { - logger := gcontext.GetLogger(r.Context()) - - var req datasetRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to decode request body: %w", err)) - return - } - - if err := req.validate(true); err != nil { - writeError(logger, w, http.StatusBadRequest, err) - return - } - - g, err := gptscript.New(r.Context(), req.opts(s.gptscriptOpts)) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) - return - } - - var args addDatasetElementArgs - if err := json.Unmarshal([]byte(req.Input), &args); err != nil { - writeError(logger, w, http.StatusBadRequest, fmt.Errorf("failed to unmarshal input: %w", err)) - return - } - - if err := args.validate(); err != nil { - writeError(logger, w, http.StatusBadRequest, err) - return - } - - prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Add Element", loader.Options{ - Cache: g.Cache, - }) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) - return - } - - result, err := g.Run(r.Context(), prg, req.Env, req.Input) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) - return - } - - writeResponse(logger, w, map[string]any{"stdout": result}) -} - type addDatasetElementsArgs struct { - DatasetID string `json:"datasetID"` - Elements []struct { - Name string `json:"name"` - Description string `json:"description"` - Contents string `json:"contents"` - } + DatasetID string `json:"datasetID"` + Name string `json:"name"` + Description string `json:"description"` + Elements []struct { + Name string `json:"name"` + Description string `json:"description"` + Contents string `json:"contents"` + BinaryContents []byte `json:"binaryContents"` + } `json:"elements"` } func (a addDatasetElementsArgs) validate() error { - if a.DatasetID == "" { - return fmt.Errorf("datasetID is required") - } if len(a.Elements) == 0 { return fmt.Errorf("elements is required") } @@ -271,13 +138,7 @@ func (s *server) addDatasetElements(w http.ResponseWriter, r *http.Request) { return } - elementsJSON, err := json.Marshal(args.Elements) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to marshal elements: %w", err)) - return - } - - result, err := g.Run(r.Context(), prg, req.Env, fmt.Sprintf(`{"datasetID":%q, "elements":%q}`, args.DatasetID, string(elementsJSON))) + result, err := g.Run(r.Context(), prg, req.Env, req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -347,15 +208,14 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { type getDatasetElementArgs struct { DatasetID string `json:"datasetID"` - Element string `json:"element"` + Name string `json:"name"` } func (a getDatasetElementArgs) validate() error { if a.DatasetID == "" { return fmt.Errorf("datasetID is required") - } - if a.Element == "" { - return fmt.Errorf("element is required") + } else if a.Name == "" { + return fmt.Errorf("name is required") } return nil } @@ -391,7 +251,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Get Element SDK", loader.Options{ + prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "Get Element", loader.Options{ Cache: g.Cache, }) if err != nil { diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index ea7fdb09..73bf5d58 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -69,10 +69,8 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /credentials/delete", s.deleteCredential) mux.HandleFunc("POST /datasets", s.listDatasets) - mux.HandleFunc("POST /datasets/create", s.createDataset) mux.HandleFunc("POST /datasets/list-elements", s.listDatasetElements) mux.HandleFunc("POST /datasets/get-element", s.getDatasetElement) - mux.HandleFunc("POST /datasets/add-element", s.addDatasetElement) mux.HandleFunc("POST /datasets/add-elements", s.addDatasetElements) mux.HandleFunc("POST /workspaces/create", s.createWorkspace) From 2e26a109ad7cffc3d580fbdd3732139581c1af8d Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 5 Nov 2024 20:51:22 -0700 Subject: [PATCH 186/270] bug: fix dataset leaking workspace directories --- pkg/engine/http.go | 8 +++++--- pkg/loader/loader.go | 25 ++++++++++++++++++++++++- pkg/openai/client.go | 2 +- pkg/runner/input.go | 3 +++ pkg/runner/output.go | 3 +++ pkg/sdkserver/datasets.go | 11 +++++++++++ 6 files changed, 47 insertions(+), 5 deletions(-) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index 87348ce9..a30c01e1 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -5,9 +5,11 @@ import ( "encoding/json" "fmt" "io" + "maps" "net/http" "net/url" "os" + "slices" "strings" "github.com/gptscript-ai/gptscript/pkg/types" @@ -75,9 +77,9 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too return nil, err } - for _, env := range e.Env { - if strings.HasPrefix(env, "GPTSCRIPT_WORKSPACE_") { - req.Header.Add("X-GPTScript-Env", env) + for _, k := range slices.Sorted(maps.Keys(envMap)) { + if strings.HasPrefix(k, "GPTSCRIPT_WORKSPACE_") { + req.Header.Add("X-GPTScript-Env", k+"="+envMap[k]) } } diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index 80342f2b..f2679c6f 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -29,6 +29,18 @@ import ( const CacheTimeout = time.Hour +var Remap = map[string]string{} + +func init() { + remap := os.Getenv("GPTSCRIPT_TOOL_REMAP") + for _, pair := range strings.Split(remap, ",") { + k, v, ok := strings.Cut(pair, "=") + if ok { + Remap[k] = v + } + } +} + type source struct { // Content The content of the source Content []byte @@ -68,8 +80,19 @@ func openFile(path string) (io.ReadCloser, bool, error) { } func loadLocal(base *source, name string) (*source, bool, error) { + var remapped bool + if !strings.HasPrefix(name, ".") { + for k, v := range Remap { + if strings.HasPrefix(name, k) { + name = v + name[len(k):] + remapped = true + break + } + } + } + filePath := name - if !filepath.IsAbs(name) { + if !remapped && !filepath.IsAbs(name) { // We want to keep all strings in / format, and only convert to platform specific when reading // This is why we use path instead of filepath. filePath = path.Join(base.Path, name) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 6178c997..3dbbca44 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -617,7 +617,7 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, } partialMessage = appendMessage(partialMessage, response) if partial != nil { - if time.Since(start) > 500*time.Millisecond { + if time.Since(start) > 100*time.Millisecond { last = last[:0] partial <- types.CompletionStatus{ CompletionID: transactionID, diff --git a/pkg/runner/input.go b/pkg/runner/input.go index 360e6274..04d17cc3 100644 --- a/pkg/runner/input.go +++ b/pkg/runner/input.go @@ -15,6 +15,9 @@ func (r *Runner) handleInput(callCtx engine.Context, monitor Monitor, env []stri } for _, inputToolRef := range inputToolRefs { + if callCtx.Program.ToolSet[inputToolRef.ToolID].IsNoop() { + continue + } data := map[string]any{} _ = json.Unmarshal([]byte(input), &data) data["input"] = input diff --git a/pkg/runner/output.go b/pkg/runner/output.go index 5f1d2818..87e9670f 100644 --- a/pkg/runner/output.go +++ b/pkg/runner/output.go @@ -76,6 +76,9 @@ func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []str } for _, outputToolRef := range outputToolRefs { + if callCtx.Program.ToolSet[outputToolRef.ToolID].IsNoop() { + continue + } inputData, err := argsForFilters(callCtx.Program, outputToolRef, startState, map[string]any{ "output": output, "continuation": continuation, diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index e922cd97..c00308e7 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "strings" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/gptscript" @@ -39,6 +40,12 @@ func (r datasetRequest) opts(o gptscript.Options) gptscript.Options { Monitor: o.Monitor, Runner: o.Runner, } + for _, e := range r.Env { + v, ok := strings.CutPrefix(e, "GPTSCRIPT_WORKSPACE_ID=") + if ok { + opts.Workspace = v + } + } return opts } @@ -61,6 +68,7 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) return } + defer g.Close(false) prg, err := loader.Program(r.Context(), s.getDatasetTool(req), "List Datasets", loader.Options{ Cache: g.Cache, @@ -118,6 +126,7 @@ func (s *server) addDatasetElements(w http.ResponseWriter, r *http.Request) { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) return } + defer g.Close(false) var args addDatasetElementsArgs if err := json.Unmarshal([]byte(req.Input), &args); err != nil { @@ -177,6 +186,7 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) return } + defer g.Close(false) var args listDatasetElementsArgs if err := json.Unmarshal([]byte(req.Input), &args); err != nil { @@ -239,6 +249,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) return } + defer g.Close(false) var args getDatasetElementArgs if err := json.Unmarshal([]byte(req.Input), &args); err != nil { From 53a6ae019c8633d03f0e433db552d1c4d8d227e4 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 6 Nov 2024 12:53:16 -0500 Subject: [PATCH 187/270] fix: credentials: check for not found errors properly (#899) Signed-off-by: Grant Linville --- pkg/credentials/error.go | 12 ++++++++++++ pkg/credentials/store.go | 3 +-- pkg/credentials/toolstore.go | 2 +- pkg/runner/runner.go | 2 +- 4 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 pkg/credentials/error.go diff --git a/pkg/credentials/error.go b/pkg/credentials/error.go new file mode 100644 index 00000000..f3990f7c --- /dev/null +++ b/pkg/credentials/error.go @@ -0,0 +1,12 @@ +package credentials + +import ( + "strings" +) + +func IsCredentialsNotFoundError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "credentials not found in native keychain") +} diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 56555cd4..53e81f6b 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -9,7 +9,6 @@ import ( "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" "github.com/docker/docker-credential-helpers/client" - credentials2 "github.com/docker/docker-credential-helpers/credentials" "github.com/gptscript-ai/gptscript/pkg/config" "golang.org/x/exp/maps" ) @@ -50,7 +49,7 @@ func (s Store) Get(_ context.Context, toolName string) (*Credential, bool, error for _, c := range s.credCtxs { auth, err := store.Get(toolNameWithCtx(toolName, c)) if err != nil { - if credentials2.IsErrCredentialsNotFound(err) { + if IsCredentialsNotFoundError(err) { continue } return nil, false, err diff --git a/pkg/credentials/toolstore.go b/pkg/credentials/toolstore.go index 3e31ea12..5f910069 100644 --- a/pkg/credentials/toolstore.go +++ b/pkg/credentials/toolstore.go @@ -30,7 +30,7 @@ func (h *toolCredentialStore) Erase(serverAddress string) error { func (h *toolCredentialStore) Get(serverAddress string) (types.AuthConfig, error) { creds, err := client.Get(h.program, serverAddress) - if credentials2.IsErrCredentialsNotFound(err) { + if IsCredentialsNotFoundError(err) { return h.file.Get(serverAddress) } else if err != nil { return types.AuthConfig{}, err diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 37a90d48..fc5737ef 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -872,7 +872,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } else if credentialAlias != "" { c, exists, err = r.credStore.Get(callCtx.Ctx, credentialAlias) if err != nil { - return nil, fmt.Errorf("failed to get credentials for tool %s: %w", credentialAlias, err) + return nil, fmt.Errorf("failed to get credential %s: %w", credentialAlias, err) } } From 266bc58956c6446dadf69c8456c5f6b747151505 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 6 Nov 2024 13:08:31 -0500 Subject: [PATCH 188/270] feat: allow bypassing of built-in proxy If specific proxy environment variables are set, the built-in proxy will be bypassed. Signed-off-by: Donnie Adams --- pkg/builtin/builtin.go | 4 ++-- pkg/engine/engine.go | 2 +- pkg/gptscript/gptscript.go | 2 +- pkg/llm/proxy.go | 17 +++++++++++++++-- pkg/openai/client.go | 2 +- pkg/tests/tester/runner.go | 2 +- 6 files changed, 21 insertions(+), 8 deletions(-) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index c17f2c80..90f36919 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -695,9 +695,9 @@ func invalidArgument(input string, err error) string { return fmt.Sprintf("Failed to parse arguments %s: %v", input, err) } -func SysModelProviderCredential(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { +func SysModelProviderCredential(ctx context.Context, env []string, _ string, _ chan<- string) (string, error) { engineContext, _ := engine.FromContext(ctx) - auth, url, err := engineContext.Engine.Model.ProxyInfo() + auth, url, err := engineContext.Engine.Model.ProxyInfo(env) if err != nil { return "", err } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 7232157e..ae5a1368 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -14,7 +14,7 @@ import ( type Model interface { Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) - ProxyInfo() (string, string, error) + ProxyInfo([]string) (string, string, error) } type RuntimeManager interface { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 65aa2209..dfb1771a 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -317,7 +317,7 @@ func (n noopModel) Call(_ context.Context, _ types.CompletionRequest, _ []string return nil, errors.New("unsupported") } -func (n noopModel) ProxyInfo() (string, string, error) { +func (n noopModel) ProxyInfo([]string) (string, string, error) { return "", "", errors.New("unsupported") } diff --git a/pkg/llm/proxy.go b/pkg/llm/proxy.go index aa8802be..82b33d02 100644 --- a/pkg/llm/proxy.go +++ b/pkg/llm/proxy.go @@ -15,7 +15,20 @@ import ( "github.com/gptscript-ai/gptscript/pkg/openai" ) -func (r *Registry) ProxyInfo() (string, string, error) { +func (r *Registry) ProxyInfo(env []string) (string, string, error) { + var proxyURL, proxyToken string + for _, e := range env { + if url, ok := strings.CutPrefix(e, "GPTSCRIPT_MODEL_PROVIDER_PROXY_URL="); ok { + proxyURL = url + } else if token, ok := strings.CutPrefix(e, "GPTSCRIPT_MODEL_PROVIDER_PROXY_TOKEN="); ok { + proxyToken = token + } + } + + if proxyToken != "" && proxyURL != "" { + return proxyToken, proxyURL, nil + } + r.proxyLock.Lock() defer r.proxyLock.Unlock() @@ -77,7 +90,7 @@ func (r *Registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } - auth, targetURL := oai.ProxyInfo() + auth, targetURL := oai.ProxyInfo(nil) if targetURL == "" { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) return diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 3dbbca44..1894bdda 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -132,7 +132,7 @@ func NewClient(ctx context.Context, credStore credentials.CredentialStore, opts }, nil } -func (c *Client) ProxyInfo() (token, urlBase string) { +func (c *Client) ProxyInfo([]string) (token, urlBase string) { if c.invalidAuth { return "", "" } diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index 1f59ea03..44ec4e3c 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -31,7 +31,7 @@ type Result struct { Err error } -func (c *Client) ProxyInfo() (string, string, error) { +func (c *Client) ProxyInfo([]string) (string, string, error) { return "test-auth", "test-url", nil } From 61e6ded094aa40e21c70bac579719ce96eaea30b Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 7 Nov 2024 13:08:22 -0700 Subject: [PATCH 189/270] chore: allow dot in credential context --- pkg/credentials/store.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 53e81f6b..e602d074 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -195,7 +195,7 @@ func validateCredentialCtx(ctxs []string) error { } // check alphanumeric - r := regexp.MustCompile("^[-a-zA-Z0-9]+$") + r := regexp.MustCompile("^[-a-zA-Z0-9.]+$") for _, c := range ctxs { if !r.MatchString(c) { return fmt.Errorf("credential contexts must be alphanumeric") From 02bcf6cd869ead7643074762b71714bfdfa8a38d Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 8 Nov 2024 14:40:13 -0500 Subject: [PATCH 190/270] fix: add default model to the loader The tool loader will set the models on the tools if none is set. The way that that happens works for the CLI, but is not compatible with the SDK. This change makes the default model logic work with the SDK server. Signed-off-by: Donnie Adams --- pkg/builtin/builtin.go | 5 +++++ pkg/loader/loader.go | 44 +++++++++++++++++++++++--------------- pkg/loader/openapi_test.go | 18 ++++++++-------- pkg/sdkserver/run.go | 6 +++++- 4 files changed, 46 insertions(+), 27 deletions(-) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 90f36919..42ff373b 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -277,10 +277,15 @@ func ListTools() (result []types.Tool) { } func Builtin(name string) (types.Tool, bool) { + return BuiltinWithDefaultModel(name, "") +} + +func BuiltinWithDefaultModel(name, defaultModel string) (types.Tool, bool) { // Legacy syntax not used anymore name = strings.TrimSuffix(name, "?") t, ok := tools[name] t.Parameters.Name = name + t.Parameters.ModelName = defaultModel t.ID = name t.Instructions = "#!" + name return SetDefaults(t), ok diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index f2679c6f..5a907f5b 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -132,7 +132,7 @@ func loadLocal(base *source, name string) (*source, bool, error) { }, true, nil } -func loadProgram(data []byte, into *types.Program, targetToolName string) (types.Tool, error) { +func loadProgram(data []byte, into *types.Program, targetToolName, defaultModel string) (types.Tool, error) { var ext types.Program if err := json.Unmarshal(data[len(assemble.Header):], &ext); err != nil { @@ -141,7 +141,7 @@ func loadProgram(data []byte, into *types.Program, targetToolName string) (types into.ToolSet = make(map[string]types.Tool, len(ext.ToolSet)) for k, v := range ext.ToolSet { - if builtinTool, ok := builtin.Builtin(k); ok { + if builtinTool, ok := builtin.BuiltinWithDefaultModel(k, defaultModel); ok { v = builtinTool } into.ToolSet[k] = v @@ -186,11 +186,11 @@ func loadOpenAPI(prg *types.Program, data []byte) *openapi3.T { return openAPIDocument } -func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, targetToolName string) ([]types.Tool, error) { +func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, targetToolName, defaultModel string) ([]types.Tool, error) { data := base.Content if bytes.HasPrefix(data, assemble.Header) { - tool, err := loadProgram(data, prg, targetToolName) + tool, err := loadProgram(data, prg, targetToolName, defaultModel) if err != nil { return nil, err } @@ -310,17 +310,17 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base localTools[strings.ToLower(tool.Parameters.Name)] = tool } - return linkAll(ctx, cache, prg, base, targetTools, localTools) + return linkAll(ctx, cache, prg, base, targetTools, localTools, defaultModel) } -func linkAll(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tools []types.Tool, localTools types.ToolSet) (result []types.Tool, _ error) { +func linkAll(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tools []types.Tool, localTools types.ToolSet, defaultModel string) (result []types.Tool, _ error) { localToolsMapping := make(map[string]string, len(tools)) for _, localTool := range localTools { localToolsMapping[strings.ToLower(localTool.Parameters.Name)] = localTool.ID } for _, tool := range tools { - tool, err := link(ctx, cache, prg, base, tool, localTools, localToolsMapping) + tool, err := link(ctx, cache, prg, base, tool, localTools, localToolsMapping, defaultModel) if err != nil { return nil, err } @@ -329,7 +329,7 @@ func linkAll(ctx context.Context, cache *cache.Client, prg *types.Program, base return } -func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tool types.Tool, localTools types.ToolSet, localToolsMapping map[string]string) (types.Tool, error) { +func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tool types.Tool, localTools types.ToolSet, localToolsMapping map[string]string, defaultModel string) (types.Tool, error) { if existing, ok := prg.ToolSet[tool.ID]; ok { return existing, nil } @@ -354,7 +354,7 @@ func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *so linkedTool = existing } else { var err error - linkedTool, err = link(ctx, cache, prg, base, localTool, localTools, localToolsMapping) + linkedTool, err = link(ctx, cache, prg, base, localTool, localTools, localToolsMapping, defaultModel) if err != nil { return types.Tool{}, fmt.Errorf("failed linking %s at %s: %w", targetToolName, base, err) } @@ -364,7 +364,7 @@ func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *so toolNames[targetToolName] = struct{}{} } else { toolName, subTool := types.SplitToolRef(targetToolName) - resolvedTools, err := resolve(ctx, cache, prg, base, toolName, subTool) + resolvedTools, err := resolve(ctx, cache, prg, base, toolName, subTool, defaultModel) if err != nil { return types.Tool{}, fmt.Errorf("failed resolving %s from %s: %w", targetToolName, base, err) } @@ -376,6 +376,10 @@ func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *so tool.LocalTools = localToolsMapping + if tool.ModelName == "" { + tool.ModelName = defaultModel + } + tool = builtin.SetDefaults(tool) prg.ToolSet[tool.ID] = tool @@ -405,7 +409,7 @@ func ProgramFromSource(ctx context.Context, content, subToolName string, opts .. Path: locationPath, Name: locationName, Location: opt.Location, - }, subToolName) + }, subToolName, opt.DefaultModel) if err != nil { return types.Program{}, err } @@ -414,20 +418,26 @@ func ProgramFromSource(ctx context.Context, content, subToolName string, opts .. } type Options struct { - Cache *cache.Client - Location string + Cache *cache.Client + Location string + DefaultModel string } func complete(opts ...Options) (result Options) { for _, opt := range opts { result.Cache = types.FirstSet(opt.Cache, result.Cache) result.Location = types.FirstSet(opt.Location, result.Location) + result.DefaultModel = types.FirstSet(opt.DefaultModel, result.DefaultModel) } if result.Location == "" { result.Location = "inline" } + if result.DefaultModel == "" { + result.DefaultModel = builtin.GetDefaultModel() + } + return } @@ -451,7 +461,7 @@ func Program(ctx context.Context, name, subToolName string, opts ...Options) (ty Name: name, ToolSet: types.ToolSet{}, } - tools, err := resolve(ctx, opt.Cache, &prg, &source{}, name, subToolName) + tools, err := resolve(ctx, opt.Cache, &prg, &source{}, name, subToolName, opt.DefaultModel) if err != nil { return types.Program{}, err } @@ -459,9 +469,9 @@ func Program(ctx context.Context, name, subToolName string, opts ...Options) (ty return prg, nil } -func resolve(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, name, subTool string) ([]types.Tool, error) { +func resolve(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, name, subTool, defaultModel string) ([]types.Tool, error) { if subTool == "" { - t, ok := builtin.Builtin(name) + t, ok := builtin.BuiltinWithDefaultModel(name, defaultModel) if ok { prg.ToolSet[t.ID] = t return []types.Tool{t}, nil @@ -473,7 +483,7 @@ func resolve(ctx context.Context, cache *cache.Client, prg *types.Program, base return nil, err } - result, err := readTool(ctx, cache, prg, s, subTool) + result, err := readTool(ctx, cache, prg, s, subTool, defaultModel) if err != nil { return nil, err } diff --git a/pkg/loader/openapi_test.go b/pkg/loader/openapi_test.go index 1a7eaa76..423246d1 100644 --- a/pkg/loader/openapi_test.go +++ b/pkg/loader/openapi_test.go @@ -26,7 +26,7 @@ func TestLoadOpenAPI(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err, "failed to read openapi v3") require.Equal(t, 3, numOpenAPITools(prgv3.ToolSet), "expected 3 openapi tools") @@ -35,7 +35,7 @@ func TestLoadOpenAPI(t *testing.T) { } datav2, err := os.ReadFile("testdata/openapi_v2.json") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2json, &source{Content: datav2}, "") + _, err = readTool(context.Background(), nil, &prgv2json, &source{Content: datav2}, "", "") require.NoError(t, err, "failed to read openapi v2") require.Equal(t, 3, numOpenAPITools(prgv2json.ToolSet), "expected 3 openapi tools") @@ -44,7 +44,7 @@ func TestLoadOpenAPI(t *testing.T) { } datav2, err = os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2yaml, &source{Content: datav2}, "") + _, err = readTool(context.Background(), nil, &prgv2yaml, &source{Content: datav2}, "", "") require.NoError(t, err, "failed to read openapi v2 (yaml)") require.Equal(t, 3, numOpenAPITools(prgv2yaml.ToolSet), "expected 3 openapi tools") @@ -57,7 +57,7 @@ func TestOpenAPIv3(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -69,7 +69,7 @@ func TestOpenAPIv3NoOperationIDs(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -81,7 +81,7 @@ func TestOpenAPIv2(t *testing.T) { } datav2, err := os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "") + _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) @@ -94,7 +94,7 @@ func TestOpenAPIv3Revamp(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -107,7 +107,7 @@ func TestOpenAPIv3NoOperationIDsRevamp(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -120,7 +120,7 @@ func TestOpenAPIv2Revamp(t *testing.T) { } datav2, err := os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "") + _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index b6b5a049..1c0f7c4b 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -32,7 +32,11 @@ func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, lo } defer g.Close(false) - prg, err := programLoader(ctx, toolDef.String(), subTool, loader.Options{Cache: g.Cache}) + defaultModel := opts.OpenAI.DefaultModel + if defaultModel == "" { + defaultModel = s.gptscriptOpts.OpenAI.DefaultModel + } + prg, err := programLoader(ctx, toolDef.String(), subTool, loader.Options{Cache: g.Cache, DefaultModel: defaultModel}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return From 176942ab4a49cfe938f55ee91b12fd9f6b271b03 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 11 Nov 2024 15:00:30 -0700 Subject: [PATCH 191/270] chore: Add GPTSCRIPT_HTTP_ENV_PREFIX to specify http env vars to send --- pkg/engine/http.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index a30c01e1..c1dd67e7 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -82,6 +82,16 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too req.Header.Add("X-GPTScript-Env", k+"="+envMap[k]) } } + for _, prefix := range strings.Split(os.Getenv("GPTSCRIPT_HTTP_ENV_PREFIX"), ",") { + if prefix == "" { + continue + } + for _, k := range slices.Sorted(maps.Keys(envMap)) { + if strings.HasPrefix(k, prefix) { + req.Header.Add("X-GPTScript-Env", k+"="+envMap[k]) + } + } + } req.Header.Set("X-GPTScript-Tool-Name", tool.Parameters.Name) From 31f2edfbe970a88d6609dc1b096c6a06b8b5ad76 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 11 Nov 2024 15:13:03 -0700 Subject: [PATCH 192/270] chore: add key for exact match --- pkg/engine/http.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index c1dd67e7..109db559 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -82,7 +82,8 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too req.Header.Add("X-GPTScript-Env", k+"="+envMap[k]) } } - for _, prefix := range strings.Split(os.Getenv("GPTSCRIPT_HTTP_ENV_PREFIX"), ",") { + + for _, prefix := range strings.Split(envMap["GPTSCRIPT_HTTP_ENV_PREFIX"], ",") { if prefix == "" { continue } @@ -93,6 +94,16 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too } } + for _, k := range strings.Split(envMap["GPTSCRIPT_HTTP_ENV"], ",") { + if k == "" { + continue + } + v := envMap[k] + if v != "" { + req.Header.Add("X-GPTScript-Env", k+"="+v) + } + } + req.Header.Set("X-GPTScript-Tool-Name", tool.Parameters.Name) if err := json.Unmarshal([]byte(input), &map[string]any{}); err == nil { From 4e1c10cac779db2e7a78c9f692d74f1d3cfe3da5 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 12 Nov 2024 07:44:19 -0500 Subject: [PATCH 193/270] chore: change the dataset tool to the gptscript-ai org This tool was moved back to the gptscript-ai org. Signed-off-by: Donnie Adams --- pkg/sdkserver/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 7d98ae60..04bff085 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -174,7 +174,7 @@ func complete(opts ...Options) Options { result.WorkspaceTool = "github.com/gptscript-ai/workspace-provider" } if result.DatasetTool == "" { - result.DatasetTool = "github.com/otto8-ai/datasets" + result.DatasetTool = "github.com/gptscript-ai/datasets" } return result From 164d6a499f26354dcdb45b87d59612387cc7b4e2 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 15 Nov 2024 08:59:40 -0700 Subject: [PATCH 194/270] chore: add more appropriate error message --- pkg/engine/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index ae5a1368..a195a8b4 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -387,7 +387,7 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { resp, err := e.Model.Call(ctx, state.Completion, e.Env, progress) if err != nil { - return nil, err + return nil, fmt.Errorf("failed calling model for completion: %w", err) } state.Completion.Messages = append(state.Completion.Messages, *resp) From b1c9204444bae5f01295b4f7d403001b9aa73031 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 18 Nov 2024 14:12:11 -0500 Subject: [PATCH 195/270] fix: credentials: only decrypt credentials in the context(s) needed Signed-off-by: Grant Linville --- pkg/credentials/store.go | 58 +++++++++++++++++++++++++----------- pkg/credentials/toolstore.go | 14 +++------ 2 files changed, 45 insertions(+), 27 deletions(-) diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index e602d074..b839ad6d 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -139,36 +139,60 @@ func (s Store) List(_ context.Context) ([]Credential, error) { return nil, err } - credsByContext := make(map[string][]Credential) - allCreds := make([]Credential, 0) - for serverAddress, authCfg := range list { - if authCfg.ServerAddress == "" { - authCfg.ServerAddress = serverAddress // Not sure why we have to do this, but we do. + if len(s.credCtxs) > 0 && s.credCtxs[0] == AllCredentialContexts { + allCreds := make([]Credential, len(list)) + for serverAddress := range list { + ac, err := store.Get(serverAddress) + if err != nil { + return nil, err + } + ac.ServerAddress = serverAddress + + cred, err := credentialFromDockerAuthConfig(ac) + if err != nil { + return nil, err + } + allCreds = append(allCreds, cred) } - c, err := credentialFromDockerAuthConfig(authCfg) + return allCreds, nil + } + + serverAddressesByContext := make(map[string][]string) + for serverAddress := range list { + _, ctx, err := toolNameAndCtxFromAddress(serverAddress) if err != nil { return nil, err } - allCreds = append(allCreds, c) - - if credsByContext[c.Context] == nil { - credsByContext[c.Context] = []Credential{c} + if serverAddressesByContext[ctx] == nil { + serverAddressesByContext[ctx] = []string{serverAddress} } else { - credsByContext[c.Context] = append(credsByContext[c.Context], c) + serverAddressesByContext[ctx] = append(serverAddressesByContext[ctx], serverAddress) } } - if len(s.credCtxs) > 0 && s.credCtxs[0] == AllCredentialContexts { - return allCreds, nil - } - // Go through the contexts in reverse order so that higher priority contexts override lower ones. credsByName := make(map[string]Credential) for i := len(s.credCtxs) - 1; i >= 0; i-- { - for _, c := range credsByContext[s.credCtxs[i]] { - credsByName[c.ToolName] = c + for _, serverAddress := range serverAddressesByContext[s.credCtxs[i]] { + ac, err := store.Get(serverAddress) + if err != nil { + return nil, err + } + ac.ServerAddress = serverAddress + + cred, err := credentialFromDockerAuthConfig(ac) + if err != nil { + return nil, err + } + + toolName, _, err := toolNameAndCtxFromAddress(serverAddress) + if err != nil { + return nil, err + } + + credsByName[toolName] = cred } } diff --git a/pkg/credentials/toolstore.go b/pkg/credentials/toolstore.go index 5f910069..d66b0fd7 100644 --- a/pkg/credentials/toolstore.go +++ b/pkg/credentials/toolstore.go @@ -50,7 +50,7 @@ func (h *toolCredentialStore) GetAll() (map[string]types.AuthConfig, error) { return nil, err } - newCredAddresses := make(map[string]string, len(serverAddresses)) + result = make(map[string]types.AuthConfig, len(serverAddresses)) for serverAddress, val := range serverAddresses { // If the serverAddress contains a port, we need to put it back in the right spot. // For some reason, even when a credential is stored properly as http://hostname:8080///credctx, @@ -80,16 +80,10 @@ func (h *toolCredentialStore) GetAll() (map[string]types.AuthConfig, error) { } } - newCredAddresses[toolNameWithCtx(toolName, ctx)] = val - delete(serverAddresses, serverAddress) - } - - for serverAddress := range newCredAddresses { - ac, err := h.Get(serverAddress) - if err != nil { - return nil, err + result[toolNameWithCtx(toolName, ctx)] = types.AuthConfig{ + Username: val, + ServerAddress: serverAddress, } - result[serverAddress] = ac } return result, nil From 51c4b5bd45edaaadc1728b6065dffaf03ef615ba Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 18 Nov 2024 14:15:32 -0500 Subject: [PATCH 196/270] fix lint issue Signed-off-by: Grant Linville --- pkg/credentials/toolstore.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/credentials/toolstore.go b/pkg/credentials/toolstore.go index d66b0fd7..536e2aa1 100644 --- a/pkg/credentials/toolstore.go +++ b/pkg/credentials/toolstore.go @@ -43,14 +43,12 @@ func (h *toolCredentialStore) Get(serverAddress string) (types.AuthConfig, error } func (h *toolCredentialStore) GetAll() (map[string]types.AuthConfig, error) { - result := map[string]types.AuthConfig{} - serverAddresses, err := client.List(h.program) if err != nil { return nil, err } - result = make(map[string]types.AuthConfig, len(serverAddresses)) + result := make(map[string]types.AuthConfig, len(serverAddresses)) for serverAddress, val := range serverAddresses { // If the serverAddress contains a port, we need to put it back in the right spot. // For some reason, even when a credential is stored properly as http://hostname:8080///credctx, From 33c2995ee1247648d26d60bea9d8b9b78deeddf3 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 21 Nov 2024 10:21:02 -0500 Subject: [PATCH 197/270] fix: allow listing of credentials in all contexts Signed-off-by: Donnie Adams --- pkg/cli/credential.go | 8 +++++++- pkg/credentials/store.go | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index eaf7665b..866ed4e5 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -9,6 +9,7 @@ import ( "time" cmd2 "github.com/gptscript-ai/cmd" + "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/spf13/cobra" ) @@ -44,7 +45,12 @@ func (c *Credential) Run(cmd *cobra.Command, _ []string) error { } defer gptScript.Close(true) - store, err := gptScript.CredentialStoreFactory.NewStore(gptScript.DefaultCredentialContexts) + credCtxs := gptScript.DefaultCredentialContexts + if c.AllContexts { + credCtxs = []string{credentials.AllCredentialContexts} + } + + store, err := gptScript.CredentialStoreFactory.NewStore(credCtxs) if err != nil { return err } diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index b839ad6d..be4be183 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -140,7 +140,7 @@ func (s Store) List(_ context.Context) ([]Credential, error) { } if len(s.credCtxs) > 0 && s.credCtxs[0] == AllCredentialContexts { - allCreds := make([]Credential, len(list)) + allCreds := make([]Credential, 0, len(list)) for serverAddress := range list { ac, err := store.Get(serverAddress) if err != nil { From e5fe428c685875482bf0e78d1210d070a2e176f1 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 21 Nov 2024 10:23:12 -0500 Subject: [PATCH 198/270] fix: reload CLI config when getting credentials When running in SDK server mode and using the file cred store, we must reload the credentials in the file each time. Typically, these credentials will be added via a tool call, which would invalidate the credentials held in memory. Signed-off-by: Donnie Adams --- pkg/config/cliconfig.go | 47 +++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/pkg/config/cliconfig.go b/pkg/config/cliconfig.go index 73741ab3..d7944d8e 100644 --- a/pkg/config/cliconfig.go +++ b/pkg/config/cliconfig.go @@ -11,6 +11,7 @@ import ( "github.com/adrg/xdg" "github.com/docker/cli/cli/config/types" + "github.com/gptscript-ai/gptscript/pkg/mvl" ) const ( @@ -24,6 +25,7 @@ const ( var ( // Helpers is a list of all supported credential helpers from github.com/gptscript-ai/gptscript-credential-helpers Helpers = []string{WincredCredHelper, OsxkeychainCredHelper, SecretserviceCredHelper, PassCredHelper} + log = mvl.Package() ) type AuthConfig types.AuthConfig @@ -85,9 +87,9 @@ func (c *CLIConfig) Save() error { } if c.auths != nil { - c.Auths = map[string]AuthConfig{} + c.Auths = make(map[string]AuthConfig, len(c.auths)) for k, v := range c.auths { - c.Auths[k] = (AuthConfig)(v) + c.Auths[k] = AuthConfig(v) } c.auths = nil } @@ -116,13 +118,21 @@ func (c *CLIConfig) GetAuthConfigs() map[string]types.AuthConfig { defer c.authsLock.Unlock() } + if err := c.readFileIntoConfig(c.location); err != nil { + // This is implementing an interface, so we can't return this error. + log.Warnf("Failed to read config file: %v", err) + } + if c.auths == nil { - c.auths = map[string]types.AuthConfig{} - for k, v := range c.Auths { - authConfig := (types.AuthConfig)(v) - c.auths[k] = authConfig - } + c.auths = make(map[string]types.AuthConfig, len(c.Auths)) + } + + // Assume that whatever was pulled from the file is more recent. + // The docker creds framework will save the file after creating or updating a credential. + for k, v := range c.Auths { + c.auths[k] = types.AuthConfig(v) } + return c.auths } @@ -142,17 +152,13 @@ func ReadCLIConfig(gptscriptConfigFile string) (*CLIConfig, error) { } } - data, err := readFile(gptscriptConfigFile) - if err != nil { - return nil, err - } result := &CLIConfig{ authsLock: &sync.Mutex{}, location: gptscriptConfigFile, - raw: data, } - if err := json.Unmarshal(data, result); err != nil { - return nil, fmt.Errorf("failed to unmarshal %s: %v", gptscriptConfigFile, err) + + if err := result.readFileIntoConfig(gptscriptConfigFile); err != nil { + return nil, err } if store := os.Getenv("GPTSCRIPT_CREDENTIAL_STORE"); store != "" { @@ -180,13 +186,18 @@ func (c *CLIConfig) setDefaultCredentialsStore() error { return c.Save() } -func readFile(path string) ([]byte, error) { +func (c *CLIConfig) readFileIntoConfig(path string) error { data, err := os.ReadFile(path) if os.IsNotExist(err) { - return []byte("{}"), nil + return nil } else if err != nil { - return nil, fmt.Errorf("failed to read user config %s: %w", path, err) + return fmt.Errorf("failed to read user config %s: %w", path, err) } - return data, nil + c.raw = data + if err := json.Unmarshal(data, c); err != nil { + return fmt.Errorf("failed to unmarshal %s: %v", path, err) + } + + return nil } From c39a0693ee94f5e9ab7114ab049ba3aef8689a2d Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 4 Dec 2024 11:23:58 -0500 Subject: [PATCH 199/270] feat: add ability to stop individual daemons Signed-off-by: Donnie Adams --- pkg/engine/cmd.go | 6 +++--- pkg/engine/daemon.go | 25 ++++++++++++++++++------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index b0c1ab4b..010c1ace 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -292,9 +292,8 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T args = args[1:] } - var ( - stop = func() {} - ) + ctx, cancel := context.WithCancel(ctx) + stop := cancel if strings.TrimSpace(rest) != "" { f, err := os.CreateTemp(env.Getenv("GPTSCRIPT_TMPDIR", envvars), version.ProgramName+requiredFileExtensions[args[0]]) @@ -303,6 +302,7 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T } stop = func() { _ = os.Remove(f.Name()) + cancel() } _, err = f.Write([]byte(rest)) diff --git a/pkg/engine/daemon.go b/pkg/engine/daemon.go index f0a1c10c..b7877da3 100644 --- a/pkg/engine/daemon.go +++ b/pkg/engine/daemon.go @@ -19,7 +19,7 @@ var ports Ports type Ports struct { daemonPorts map[string]int64 - daemonsRunning map[string]struct{} + daemonsRunning map[string]func() daemonLock sync.Mutex startPort, endPort int64 @@ -57,6 +57,17 @@ func CloseDaemons() { ports.daemonWG.Wait() } +func StopDaemon(url string) { + ports.daemonLock.Lock() + defer ports.daemonLock.Unlock() + + if stop := ports.daemonsRunning[url]; stop != nil { + stop() + } + + delete(ports.daemonsRunning, url) +} + func nextPort() int64 { if ports.startPort == 0 { ports.startPort = 10240 @@ -118,7 +129,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { port, ok := ports.daemonPorts[tool.ID] url := fmt.Sprintf("http://127.0.0.1:%d%s", port, path) - if ok { + if ok && ports.daemonsRunning[url] != nil { return url, nil } @@ -172,13 +183,13 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { if ports.daemonPorts == nil { ports.daemonPorts = map[string]int64{} - ports.daemonsRunning = map[string]struct{}{} + ports.daemonsRunning = map[string]func(){} } ports.daemonPorts[tool.ID] = port - ports.daemonsRunning[url] = struct{}{} + ports.daemonsRunning[url] = stop - killedCtx, cancel := context.WithCancelCause(ctx) - defer cancel(nil) + killedCtx, killedCancel := context.WithCancelCause(ctx) + defer killedCancel(nil) ports.daemonWG.Add(1) go func() { @@ -189,7 +200,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { _ = r.Close() _ = w.Close() - cancel(err) + killedCancel(err) stop() ports.daemonLock.Lock() defer ports.daemonLock.Unlock() From c5d85f1722136da53f935b4383e764975f88b436 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 4 Dec 2024 13:45:35 -0500 Subject: [PATCH 200/270] chore: openapi: remove https restriction (#916) Signed-off-by: Grant Linville --- docs/docs/03-tools/03-openapi.md | 6 ------ pkg/engine/openapi.go | 20 +++++++++----------- pkg/openapi/run.go | 23 +++++++++-------------- 3 files changed, 18 insertions(+), 31 deletions(-) diff --git a/docs/docs/03-tools/03-openapi.md b/docs/docs/03-tools/03-openapi.md index 0b0f4961..e99172eb 100644 --- a/docs/docs/03-tools/03-openapi.md +++ b/docs/docs/03-tools/03-openapi.md @@ -41,12 +41,6 @@ Will be resolved as `https://api.example.com/v1`. ## Authentication -:::warning -All authentication options will be completely ignored if the server uses HTTP and not HTTPS, unless the request is for `localhost` or 127.0.0.1. -This is to protect users from accidentally sending credentials in plain text. -HTTP is only OK, if it's on localhost/127.0.0.1. -::: - ### 1. Security Schemes GPTScript will read the defined [security schemes](https://swagger.io/docs/specification/authentication/) in the OpenAPI definition. The currently supported types are `apiKey` and `http`. diff --git a/pkg/engine/openapi.go b/pkg/engine/openapi.go index a951bd37..a9a1a644 100644 --- a/pkg/engine/openapi.go +++ b/pkg/engine/openapi.go @@ -197,19 +197,17 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { return nil, fmt.Errorf("failed to create request: %w", err) } - // Check for authentication (only if using HTTPS or localhost) - if u.Scheme == "https" || u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" { - if len(instructions.SecurityInfos) > 0 { - if err := openapi.HandleAuths(req, envMap, instructions.SecurityInfos); err != nil { - return nil, fmt.Errorf("error setting up authentication: %w", err) - } + // Check for authentication + if len(instructions.SecurityInfos) > 0 { + if err := openapi.HandleAuths(req, envMap, instructions.SecurityInfos); err != nil { + return nil, fmt.Errorf("error setting up authentication: %w", err) } + } - // If there is a bearer token set for the whole server, and no Authorization header has been defined, use it. - if token, ok := envMap["GPTSCRIPT_"+env.ToEnvLike(u.Hostname())+"_BEARER_TOKEN"]; ok { - if req.Header.Get("Authorization") == "" { - req.Header.Set("Authorization", "Bearer "+token) - } + // If there is a bearer token set for the whole server, and no Authorization header has been defined, use it. + if token, ok := envMap["GPTSCRIPT_"+env.ToEnvLike(u.Hostname())+"_BEARER_TOKEN"]; ok { + if req.Header.Get("Authorization") == "" { + req.Header.Set("Authorization", "Bearer "+token) } } diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go index ac1ec660..237d8b57 100644 --- a/pkg/openapi/run.go +++ b/pkg/openapi/run.go @@ -8,7 +8,6 @@ import ( "mime/multipart" "net/http" "net/url" - "os" "strings" "github.com/getkin/kin-openapi/openapi3" @@ -69,22 +68,18 @@ func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (s return "", false, fmt.Errorf("failed to create request: %w", err) } - // Check for authentication (only if using HTTPS or localhost) - if u.Scheme == "https" || u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" { - if len(opInfo.SecurityInfos) > 0 { - if err := HandleAuths(req, envMap, opInfo.SecurityInfos); err != nil { - return "", false, fmt.Errorf("error setting up authentication: %w", err) - } + // Check for authentication + if len(opInfo.SecurityInfos) > 0 { + if err := HandleAuths(req, envMap, opInfo.SecurityInfos); err != nil { + return "", false, fmt.Errorf("error setting up authentication: %w", err) } + } - // If there is a bearer token set for the whole server, and no Authorization header has been defined, use it. - if token, ok := envMap["GPTSCRIPT_"+env.ToEnvLike(u.Hostname())+"_BEARER_TOKEN"]; ok { - if req.Header.Get("Authorization") == "" { - req.Header.Set("Authorization", "Bearer "+token) - } + // If there is a bearer token set for the whole server, and no Authorization header has been defined, use it. + if token, ok := envMap["GPTSCRIPT_"+env.ToEnvLike(u.Hostname())+"_BEARER_TOKEN"]; ok { + if req.Header.Get("Authorization") == "" { + req.Header.Set("Authorization", "Bearer "+token) } - } else { - fmt.Fprintf(os.Stderr, "no auth") } // Handle query parameters From e758b54cf59d41357dfa829b5856fbed5e62cefd Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Fri, 13 Dec 2024 09:55:09 -0700 Subject: [PATCH 201/270] bug: set output from outputfilter in callFinish events --- pkg/runner/runner.go | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index fc5737ef..272e5a99 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -493,8 +493,12 @@ func (s State) ContinuationContent() (string, error) { } func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, state *State) (retState *State, retErr error) { + handleOutput := true + defer func() { - retState, retErr = r.handleOutput(callCtx, monitor, env, state, retState, retErr) + if handleOutput { + retState, retErr = r.handleOutput(callCtx, monitor, env, state, retState, retErr) + } }() if state.Continuation == nil { @@ -521,21 +525,33 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s if state.Continuation.Result != nil && len(state.Continuation.Calls) == 0 && state.SubCallID == "" && state.ResumeInput == nil { progressClose() - monitor.Event(Event{ - Time: time.Now(), - CallContext: callCtx.GetCallContext(), - Type: EventTypeCallFinish, - Content: getEventContent(*state.Continuation.Result, callCtx), - }) if callCtx.Tool.Chat { - return &State{ + retState = &State{ Continuation: state.Continuation, ContinuationToolID: callCtx.Tool.ID, - }, nil + } + } else { + retState = &State{ + Result: state.Continuation.Result, + } } - return &State{ - Result: state.Continuation.Result, - }, nil + handleOutput = false + retState, retErr = r.handleOutput(callCtx, monitor, env, state, retState, nil) + if retErr == nil { + var content string + if retState.Continuation != nil && retState.Continuation.Result != nil { + content = *retState.Continuation.Result + } else if retState.Result != nil { + content = *retState.Result + } + monitor.Event(Event{ + Time: time.Now(), + CallContext: callCtx.GetCallContext(), + Type: EventTypeCallFinish, + Content: getEventContent(content, callCtx), + }) + } + return retState, retErr } monitor.Event(Event{ From eb036809105cf871e0c8e4f46361f54476dcab9e Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Mon, 16 Dec 2024 22:07:44 +0100 Subject: [PATCH 202/270] feat: chat-completion-client retries by default - to disable via env (#917) * feat: chat-completion-client retries by default - to disable via env --- go.mod | 2 +- go.sum | 4 ++-- pkg/openai/client.go | 13 +++++++++++-- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 5fa1a5c8..fb81ca10 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20241104122544-5fe75f07c131 + github.com/gptscript-ai/chat-completion-client v0.0.0-20241216203633-5c0178fb89ed github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 diff --git a/go.sum b/go.sum index 3661a6c6..85184dc1 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20241104122544-5fe75f07c131 h1:y2FcmT4X8U606gUS0teX5+JWX9K/NclsLEhHiyrd+EU= -github.com/gptscript-ai/chat-completion-client v0.0.0-20241104122544-5fe75f07c131/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20241216203633-5c0178fb89ed h1:qMHm0IYpKgmw4KHX76RMB/duSICxo7IZuimPCKb0qG4= +github.com/gptscript-ai/chat-completion-client v0.0.0-20241216203633-5c0178fb89ed/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 1894bdda..dea234a9 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -555,10 +555,19 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, var ( headers map[string]string modelProviderEnv []string + retryOpts = []openai.RetryOptions{ + { + Retries: 5, + RetryAboveCode: 499, // 5xx errors + RetryCodes: []int{429}, // 429 Too Many Requests (ratelimit) + }, + } ) for _, e := range env { if strings.HasPrefix(e, "GPTSCRIPT_MODEL_PROVIDER_") { modelProviderEnv = append(modelProviderEnv, e) + } else if strings.HasPrefix(e, "GPTSCRIPT_DISABLE_RETRIES") { + retryOpts = nil } } @@ -572,7 +581,7 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, if !streamResponse { request.StreamOptions = nil - resp, err := c.c.CreateChatCompletion(ctx, request, headers) + resp, err := c.c.CreateChatCompletion(ctx, request, headers, retryOpts...) if err != nil { return types.CompletionMessage{}, err } @@ -597,7 +606,7 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, }), nil } - stream, err := c.c.CreateChatCompletionStream(ctx, request, headers) + stream, err := c.c.CreateChatCompletionStream(ctx, request, headers, retryOpts...) if err != nil { return types.CompletionMessage{}, err } From badb126db3f5801ffad9788882a12236215ee9c9 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 18 Dec 2024 11:34:47 -0500 Subject: [PATCH 203/270] enhance: sdk: list full model objects, instead of just names (#919) Signed-off-by: Grant Linville --- pkg/cli/gptscript.go | 5 ++++- pkg/gptscript/gptscript.go | 3 ++- pkg/llm/registry.go | 9 ++++++--- pkg/openai/client.go | 18 +++++++++++------- pkg/remote/remote.go | 13 +++++++++---- pkg/sdkserver/routes.go | 2 +- 6 files changed, 33 insertions(+), 17 deletions(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index d0481ec8..a3454dd5 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -276,7 +276,10 @@ func (r *GPTScript) listModels(ctx context.Context, gptScript *gptscript.GPTScri if err != nil { return err } - fmt.Println(strings.Join(models, "\n")) + + for _, model := range models { + fmt.Println(model.ID) + } return nil } diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index dfb1771a..5a7229a1 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -10,6 +10,7 @@ import ( "slices" "strings" + openai2 "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" @@ -275,7 +276,7 @@ func (g *GPTScript) ListTools(_ context.Context, prg types.Program) []types.Tool return prg.TopLevelTools() } -func (g *GPTScript) ListModels(ctx context.Context, providers ...string) ([]string, error) { +func (g *GPTScript) ListModels(ctx context.Context, providers ...string) ([]openai2.Model, error) { return g.Registry.ListModels(ctx, providers...) } diff --git a/pkg/llm/registry.go b/pkg/llm/registry.go index 09fe1dce..d53d96b9 100644 --- a/pkg/llm/registry.go +++ b/pkg/llm/registry.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/google/uuid" + openai2 "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/env" "github.com/gptscript-ai/gptscript/pkg/openai" "github.com/gptscript-ai/gptscript/pkg/remote" @@ -16,7 +17,7 @@ import ( type Client interface { Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) - ListModels(ctx context.Context, providers ...string) (result []string, _ error) + ListModels(ctx context.Context, providers ...string) (result []openai2.Model, _ error) Supports(ctx context.Context, modelName string) (bool, error) } @@ -38,7 +39,7 @@ func (r *Registry) AddClient(client Client) error { return nil } -func (r *Registry) ListModels(ctx context.Context, providers ...string) (result []string, _ error) { +func (r *Registry) ListModels(ctx context.Context, providers ...string) (result []openai2.Model, _ error) { for _, v := range r.clients { models, err := v.ListModels(ctx, providers...) if err != nil { @@ -46,7 +47,9 @@ func (r *Registry) ListModels(ctx context.Context, providers ...string) (result } result = append(result, models...) } - sort.Strings(result) + sort.Slice(result, func(i, j int) bool { + return result[i].ID < result[j].ID + }) return result, nil } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index dea234a9..db911962 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -157,10 +157,15 @@ func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { return false, InvalidAuthError{} } - return slices.Contains(models, modelName), nil + for _, model := range models { + if model.ID == modelName { + return true, nil + } + } + return false, nil } -func (c *Client) ListModels(ctx context.Context, providers ...string) (result []string, _ error) { +func (c *Client) ListModels(ctx context.Context, providers ...string) ([]openai.Model, error) { // Only serve if providers is empty or "" is in the list if len(providers) != 0 && !slices.Contains(providers, "") { return nil, nil @@ -179,11 +184,10 @@ func (c *Client) ListModels(ctx context.Context, providers ...string) (result [] if err != nil { return nil, err } - for _, model := range models.Models { - result = append(result, model.ID) - } - sort.Strings(result) - return result, nil + sort.Slice(models.Models, func(i, j int) bool { + return models.Models[i].ID < models.Models[j].ID + }) + return models.Models, nil } func (c *Client) cacheKey(request openai.ChatCompletionRequest) any { diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 5542372b..93f612ef 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -9,6 +9,7 @@ import ( "strings" "sync" + openai2 "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/engine" @@ -62,7 +63,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return client.Call(ctx, messageRequest, env, status) } -func (c *Client) ListModels(ctx context.Context, providers ...string) (result []string, _ error) { +func (c *Client) ListModels(ctx context.Context, providers ...string) (result []openai2.Model, _ error) { for _, provider := range providers { client, err := c.load(ctx, provider) if err != nil { @@ -72,12 +73,16 @@ func (c *Client) ListModels(ctx context.Context, providers ...string) (result [] if err != nil { return nil, err } - for _, model := range models { - result = append(result, model+" from "+provider) + for i := range models { + models[i].ID = fmt.Sprintf("%s from %s", models[i].ID, provider) } + + result = append(result, models...) } - sort.Strings(result) + sort.Slice(result, func(i, j int) bool { + return result[i].ID < result[j].ID + }) return } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 73bf5d58..801227a1 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -145,7 +145,7 @@ func (s *server) listModels(w http.ResponseWriter, r *http.Request) { return } - writeResponse(logger, w, map[string]any{"stdout": strings.Join(out, "\n")}) + writeResponse(logger, w, map[string]any{"stdout": out}) } // execHandler is a general handler for executing tools with gptscript. This is mainly responsible for parsing the request body. From e7b2e64afd431efd5e5463558d469ab1958199b4 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Thu, 19 Dec 2024 20:32:18 +0100 Subject: [PATCH 204/270] chore: update chat-completion-client for retry logging fix (#920) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fb81ca10..2c59b5d0 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20241216203633-5c0178fb89ed + github.com/gptscript-ai/chat-completion-client v0.0.0-20241219123536-85c44096bc10 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 diff --git a/go.sum b/go.sum index 85184dc1..a05b77e7 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20241216203633-5c0178fb89ed h1:qMHm0IYpKgmw4KHX76RMB/duSICxo7IZuimPCKb0qG4= -github.com/gptscript-ai/chat-completion-client v0.0.0-20241216203633-5c0178fb89ed/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20241219123536-85c44096bc10 h1:v251qdhjAE+mCi3s+ekmGbqV9BurrMTl0Vd8/0MvsTY= +github.com/gptscript-ai/chat-completion-client v0.0.0-20241219123536-85c44096bc10/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= From 0be969a104a52f0b0c5d7c7d4fdbafb315aeed73 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 2 Jan 2025 22:41:34 -0700 Subject: [PATCH 205/270] chore: add sys.call --- pkg/chat/chat.go | 13 ++-- pkg/cli/eval.go | 4 +- pkg/credentials/credential.go | 9 +-- pkg/engine/call.go | 92 ++++++++++++++++++++++++ pkg/engine/engine.go | 2 + pkg/runner/output.go | 2 +- pkg/runner/runner.go | 131 ++++------------------------------ pkg/types/args.go | 98 +++++++++++++++++++++++++ pkg/types/tool.go | 5 ++ pkg/types/tool_test.go | 20 ++++++ 10 files changed, 245 insertions(+), 131 deletions(-) create mode 100644 pkg/engine/call.go create mode 100644 pkg/types/args.go diff --git a/pkg/chat/chat.go b/pkg/chat/chat.go index a3fdb97a..1e1fe63f 100644 --- a/pkg/chat/chat.go +++ b/pkg/chat/chat.go @@ -51,25 +51,30 @@ func Start(ctx context.Context, prevState runner.ChatState, chatter Chatter, prg resp runner.ChatResponse ) - prg, err := prg() + prog, err := prg() if err != nil { return err } - prompter.SetPrompt(getPrompt(prg, prevResp)) + prompter.SetPrompt(getPrompt(prog, prevResp)) if startInput != "" { input = startInput startInput = "" - } else if targetTool := prg.ToolSet[prg.EntryToolID]; !((prevState == nil || prevState == "") && targetTool.Arguments == nil && targetTool.Instructions != "") { + } else if targetTool := prog.ToolSet[prog.EntryToolID]; !((prevState == nil || prevState == "") && targetTool.Arguments == nil && targetTool.Instructions != "") { // The above logic will skip prompting if this is the first loop and the chat expects no args input, ok, err = prompter.Readline() if !ok || err != nil { return err } + + prog, err = prg() + if err != nil { + return err + } } - resp, err = chatter.Chat(ctx, prevState, prg, env, input) + resp, err = chatter.Chat(ctx, prevState, prog, env, input) if err != nil { return err } diff --git a/pkg/cli/eval.go b/pkg/cli/eval.go index 2cd4b1b5..c649a505 100644 --- a/pkg/cli/eval.go +++ b/pkg/cli/eval.go @@ -75,7 +75,9 @@ func (e *Eval) Run(cmd *cobra.Command, args []string) error { if e.Chat { return chat.Start(cmd.Context(), nil, runner, func() (types.Program, error) { - return prg, nil + return loader.ProgramFromSource(cmd.Context(), tool.String(), "", loader.Options{ + Cache: runner.Cache, + }) }, os.Environ(), toolInput, "") } diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index f589a065..e458cb9f 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -55,13 +55,8 @@ func (c Credential) toDockerAuthConfig() (types.AuthConfig, error) { func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error) { var cred Credential if authCfg.Password != "" { - if err := json.Unmarshal([]byte(authCfg.Password), &cred); err != nil || len(cred.Env) == 0 { - // Legacy: try unmarshalling into just an env map - var env map[string]string - if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { - return Credential{}, err - } - cred.Env = env + if err := json.Unmarshal([]byte(authCfg.Password), &cred); err != nil { + return cred, fmt.Errorf("failed to unmarshal credential: %w", err) } } diff --git a/pkg/engine/call.go b/pkg/engine/call.go new file mode 100644 index 00000000..048d51ba --- /dev/null +++ b/pkg/engine/call.go @@ -0,0 +1,92 @@ +package engine + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/gptscript-ai/gptscript/pkg/types" +) + +func (e *Engine) runCall(ctx Context, tool types.Tool, input string) (*Return, error) { + interpreter, body, _ := strings.Cut(tool.Instructions, "\n") + + fields := strings.Fields(interpreter) + if len(fields) < 2 { + return nil, fmt.Errorf("invalid tool call, no target tool found in %s", tool.Instructions) + } + toolRef := strings.Join(fields[1:], " ") + + toolName, args := types.SplitArg(toolRef) + + toolNameParts := strings.Fields(toolName) + + toolName = toolNameParts[0] + toolNameArgs := toolNameParts[1:] + + targetTools, ok := tool.ToolMapping[toolName] + if !ok || len(targetTools) == 0 { + return nil, fmt.Errorf("target tool %s not found, must reference in `tools:` fields", toolName) + } + + ref := types.ToolReference{ + Reference: toolName, + Arg: args, + ToolID: targetTools[0].ToolID, + } + + newInput, err := types.GetToolRefInput(ctx.Program, ref, input) + if err != nil { + return nil, err + } + + newInput, err = mergeInputs(input, newInput) + if err != nil { + return nil, fmt.Errorf("failed to merge inputs: %w", err) + } + + newInput, err = mergeInputs(newInput, toString(map[string]string{ + "TOOL_CALL_ARGS": strings.Join(toolNameArgs, " "), + "TOOL_CALL_BODY": body, + })) + + newCtx := ctx + newCtx.Tool = ctx.Program.ToolSet[ref.ToolID] + + return e.Start(newCtx, newInput) +} + +func toString(data map[string]string) string { + out, err := json.Marshal(data) + if err != nil { + // this will never happen + panic(err) + } + return string(out) +} + +func mergeInputs(base, overlay string) (string, error) { + baseMap := map[string]interface{}{} + overlayMap := map[string]interface{}{} + + if overlay == "" || overlay == "{}" { + return base, nil + } + + err := json.Unmarshal([]byte(base), &baseMap) + if err != nil { + return "", fmt.Errorf("failed to unmarshal base input: %w", err) + } + + err = json.Unmarshal([]byte(overlay), &overlayMap) + if err != nil { + return "", fmt.Errorf("failed to unmarshal overlay input: %w", err) + } + + for k, v := range overlayMap { + baseMap[k] = v + } + + out, err := json.Marshal(baseMap) + return string(out), err +} diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index a195a8b4..654d8654 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -286,6 +286,8 @@ func (e *Engine) runCommandTools(ctx Context, tool types.Tool, input string) (*R return e.runOpenAPI(tool, input) } else if tool.IsEcho() { return e.runEcho(tool) + } else if tool.IsCall() { + return e.runCall(ctx, tool, input) } s, err := e.runCommand(ctx, tool, input, ctx.ToolCategory) if err != nil { diff --git a/pkg/runner/output.go b/pkg/runner/output.go index 87e9670f..e48891fd 100644 --- a/pkg/runner/output.go +++ b/pkg/runner/output.go @@ -19,7 +19,7 @@ func argsForFilters(prg *types.Program, tool types.ToolReference, startState *St startInput = *startState.StartInput } - parsedArgs, err := getToolRefInput(prg, tool, startInput) + parsedArgs, err := types.GetToolRefInput(prg, tool, startInput) if err != nil { return "", err } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 272e5a99..62ca91c9 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "sort" - "strings" "sync" "time" @@ -245,92 +244,6 @@ var ( EventTypeRunFinish EventType = "runFinish" ) -func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) (string, error) { - if ref.Arg == "" { - return "", nil - } - - targetArgs := prg.ToolSet[ref.ToolID].Arguments - targetKeys := map[string]string{} - - if ref.Arg == "*" { - return input, nil - } - - if targetArgs == nil { - return "", nil - } - - for targetKey := range targetArgs.Properties { - targetKeys[strings.ToLower(targetKey)] = targetKey - } - - inputMap := map[string]interface{}{} - outputMap := map[string]interface{}{} - - _ = json.Unmarshal([]byte(input), &inputMap) - for k, v := range inputMap { - inputMap[strings.ToLower(k)] = v - } - - fields := strings.Fields(ref.Arg) - - for i := 0; i < len(fields); i++ { - field := fields[i] - if field == "and" { - continue - } - if field == "as" { - i++ - continue - } - - var ( - keyName string - val any - ) - - if strings.HasPrefix(field, "$") { - key := strings.TrimPrefix(field, "$") - key = strings.TrimPrefix(key, "{") - key = strings.TrimSuffix(key, "}") - val = inputMap[strings.ToLower(key)] - } else { - val = field - } - - if len(fields) > i+1 && fields[i+1] == "as" { - keyName = strings.ToLower(fields[i+2]) - } - - if len(targetKeys) == 0 { - return "", fmt.Errorf("can not assign arg to context because target tool [%s] has no defined args", ref.ToolID) - } - - if keyName == "" { - if len(targetKeys) != 1 { - return "", fmt.Errorf("can not assign arg to context because target tool [%s] has does not have one args. You must use \"as\" syntax to map the arg to a key %v", ref.ToolID, targetKeys) - } - for k := range targetKeys { - keyName = k - } - } - - if targetKey, ok := targetKeys[strings.ToLower(keyName)]; ok { - outputMap[targetKey] = val - } else { - return "", fmt.Errorf("can not assign arg to context because target tool [%s] has does not args [%s]", ref.ToolID, keyName) - } - } - - if len(outputMap) == 0 { - return "", nil - } - - output, err := json.Marshal(outputMap) - return string(output), err -} - func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monitor, env []string, input string) (result []engine.InputContext, _ error) { toolRefs, err := callCtx.Tool.GetToolsByType(callCtx.Program, types.ToolTypeContext) if err != nil { @@ -343,7 +256,7 @@ func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monito continue } - contextInput, err := getToolRefInput(callCtx.Program, toolRef, input) + contextInput, err := types.GetToolRefInput(callCtx.Program, toolRef, input) if err != nil { return nil, err } @@ -878,18 +791,9 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env refresh bool ) - // Only try to look up the cred if the tool is on GitHub or has an alias. - // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. - if isGitHubTool(toolName) && credentialAlias == "" { - c, exists, err = r.credStore.Get(callCtx.Ctx, toolName) - if err != nil { - return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) - } - } else if credentialAlias != "" { - c, exists, err = r.credStore.Get(callCtx.Ctx, credentialAlias) - if err != nil { - return nil, fmt.Errorf("failed to get credential %s: %w", credentialAlias, err) - } + c, exists, err = r.credStore.Get(callCtx.Ctx, credName) + if err != nil { + return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) } if c == nil { @@ -955,22 +859,17 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } if !resultCredential.Ephemeral { - // Only store the credential if the tool is on GitHub or has an alias, and the credential is non-empty. - if (isGitHubTool(toolName) && callCtx.Program.ToolSet[ref.ToolID].Source.Repo != nil) || credentialAlias != "" { - if isEmpty { - log.Warnf("Not saving empty credential for tool %s", toolName) + if isEmpty { + log.Warnf("Not saving empty credential for tool %s", toolName) + } else { + if refresh { + err = r.credStore.Refresh(callCtx.Ctx, resultCredential) } else { - if refresh { - err = r.credStore.Refresh(callCtx.Ctx, resultCredential) - } else { - err = r.credStore.Add(callCtx.Ctx, resultCredential) - } - if err != nil { - return nil, fmt.Errorf("failed to save credential for tool %s: %w", toolName, err) - } + err = r.credStore.Add(callCtx.Ctx, resultCredential) + } + if err != nil { + return nil, fmt.Errorf("failed to save credential for tool %s: %w", toolName, err) } - } else { - log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } } } else { @@ -992,7 +891,3 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env return env, nil } - -func isGitHubTool(toolName string) bool { - return strings.HasPrefix(toolName, "github.com") -} diff --git a/pkg/types/args.go b/pkg/types/args.go new file mode 100644 index 00000000..fa9c82b2 --- /dev/null +++ b/pkg/types/args.go @@ -0,0 +1,98 @@ +package types + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/google/shlex" +) + +func GetToolRefInput(prg *Program, ref ToolReference, input string) (string, error) { + if ref.Arg == "" { + return "", nil + } + + targetArgs := prg.ToolSet[ref.ToolID].Arguments + targetKeys := map[string]string{} + + if ref.Arg == "*" { + return input, nil + } + + if targetArgs == nil { + return "", nil + } + + for targetKey := range targetArgs.Properties { + targetKeys[strings.ToLower(targetKey)] = targetKey + } + + inputMap := map[string]interface{}{} + outputMap := map[string]interface{}{} + + _ = json.Unmarshal([]byte(input), &inputMap) + for k, v := range inputMap { + inputMap[strings.ToLower(k)] = v + } + + fields, err := shlex.Split(ref.Arg) + if err != nil { + return "", fmt.Errorf("invalid tool args %q: %v", ref.Arg, err) + } + + for i := 0; i < len(fields); i++ { + field := fields[i] + if field == "and" { + continue + } + if field == "as" { + i++ + continue + } + + var ( + keyName string + val any + ) + + if strings.HasPrefix(field, "$") { + key := strings.TrimPrefix(field, "$") + key = strings.TrimPrefix(key, "{") + key = strings.TrimSuffix(key, "}") + val = inputMap[strings.ToLower(key)] + } else { + val = field + } + + if len(fields) > i+1 && fields[i+1] == "as" { + keyName = strings.ToLower(fields[i+2]) + } + + if len(targetKeys) == 0 { + return "", fmt.Errorf("can not assign arg to context because target tool [%s] has no defined args", ref.ToolID) + } + + if keyName == "" { + if len(targetKeys) != 1 { + return "", fmt.Errorf("can not assign arg to context because target tool [%s] does not have one args. You must use \"as\" syntax to map the arg to a key %v", ref.ToolID, targetKeys) + } + for k := range targetKeys { + keyName = k + } + } + + if targetKey, ok := targetKeys[strings.ToLower(keyName)]; ok { + outputMap[targetKey] = val + } else { + return "", fmt.Errorf("can not assign arg to context because target tool [%s] does not args [%s]", ref.ToolID, keyName) + } + } + + if len(outputMap) == 0 { + return "", nil + } + + output, err := json.Marshal(outputMap) + return string(output), err +} diff --git a/pkg/types/tool.go b/pkg/types/tool.go index e6cbf37f..fcd0d53d 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -19,6 +19,7 @@ const ( DaemonPrefix = "#!sys.daemon" OpenAPIPrefix = "#!sys.openapi" EchoPrefix = "#!sys.echo" + CallPrefix = "#!sys.call" CommandPrefix = "#!" ) @@ -864,6 +865,10 @@ func (t Tool) IsEcho() bool { return strings.HasPrefix(t.Instructions, EchoPrefix) } +func (t Tool) IsCall() bool { + return strings.HasPrefix(t.Instructions, CallPrefix) +} + func (t Tool) IsHTTP() bool { return strings.HasPrefix(t.Instructions, "#!http://") || strings.HasPrefix(t.Instructions, "#!https://") diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index 89c36ac8..a146955e 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -94,3 +94,23 @@ func float32Ptr(f float32) *float32 { func boolPtr(b bool) *bool { return &b } + +func TestSplitArg(t *testing.T) { + prefix, arg := SplitArg("") + autogold.Expect([]string{"", ""}).Equal(t, []string{prefix, arg}) + + prefix, arg = SplitArg("toolName") + autogold.Expect([]string{"toolName", ""}).Equal(t, []string{prefix, arg}) + + prefix, arg = SplitArg("toolName as myAlias") + autogold.Expect([]string{"toolName", "as myAlias"}).Equal(t, []string{prefix, arg}) + + prefix, arg = SplitArg("toolName with value1 as arg1 and value2 as arg2") + autogold.Expect([]string{"toolName", "value1 as arg1 and value2 as arg2"}).Equal(t, []string{prefix, arg}) + + prefix, arg = SplitArg("toolName as myAlias with value1 as arg1 and value2 as arg2") + autogold.Expect([]string{"toolName", "value1 as arg1 and value2 as arg2"}).Equal(t, []string{prefix, arg}) + + prefix, arg = SplitArg("toolName with value1 as arg1 and value2 as arg2 as myAlias") + autogold.Expect([]string{"toolName", "value1 as arg1 and value2 as arg2 as myAlias"}).Equal(t, []string{prefix, arg}) +} From 59ad4c0b0d64df738a96bf16f311621e2b839655 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 2 Jan 2025 23:06:43 -0700 Subject: [PATCH 206/270] bug: revert removing github cred check --- pkg/engine/call.go | 3 +++ pkg/runner/runner.go | 43 +++++++++++++++++++++++++++++++------------ 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/pkg/engine/call.go b/pkg/engine/call.go index 048d51ba..d116d0fa 100644 --- a/pkg/engine/call.go +++ b/pkg/engine/call.go @@ -49,6 +49,9 @@ func (e *Engine) runCall(ctx Context, tool types.Tool, input string) (*Return, e "TOOL_CALL_ARGS": strings.Join(toolNameArgs, " "), "TOOL_CALL_BODY": body, })) + if err != nil { + return nil, fmt.Errorf("failed to merge inputs for tool calls: %w", err) + } newCtx := ctx newCtx.Tool = ctx.Program.ToolSet[ref.ToolID] diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 62ca91c9..09b242f7 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "sort" + "strings" "sync" "time" @@ -791,9 +792,18 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env refresh bool ) - c, exists, err = r.credStore.Get(callCtx.Ctx, credName) - if err != nil { - return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) + // Only try to look up the cred if the tool is on GitHub or has an alias. + // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. + if isGitHubTool(toolName) && credentialAlias == "" { + c, exists, err = r.credStore.Get(callCtx.Ctx, toolName) + if err != nil { + return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) + } + } else if credentialAlias != "" { + c, exists, err = r.credStore.Get(callCtx.Ctx, credentialAlias) + if err != nil { + return nil, fmt.Errorf("failed to get credential %s: %w", credentialAlias, err) + } } if c == nil { @@ -859,17 +869,22 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env } if !resultCredential.Ephemeral { - if isEmpty { - log.Warnf("Not saving empty credential for tool %s", toolName) - } else { - if refresh { - err = r.credStore.Refresh(callCtx.Ctx, resultCredential) + // Only store the credential if the tool is on GitHub or has an alias, and the credential is non-empty. + if (isGitHubTool(toolName) && callCtx.Program.ToolSet[ref.ToolID].Source.Repo != nil) || credentialAlias != "" { + if isEmpty { + log.Warnf("Not saving empty credential for tool %s", toolName) } else { - err = r.credStore.Add(callCtx.Ctx, resultCredential) - } - if err != nil { - return nil, fmt.Errorf("failed to save credential for tool %s: %w", toolName, err) + if refresh { + err = r.credStore.Refresh(callCtx.Ctx, resultCredential) + } else { + err = r.credStore.Add(callCtx.Ctx, resultCredential) + } + if err != nil { + return nil, fmt.Errorf("failed to save credential for tool %s: %w", toolName, err) + } } + } else { + log.Warnf("Not saving credential for tool %s - credentials will only be saved for tools from GitHub, or tools that use aliases.", toolName) } } } else { @@ -891,3 +906,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env return env, nil } + +func isGitHubTool(toolName string) bool { + return strings.HasPrefix(toolName, "github.com") +} From a89c4428d9db50bb8769906b95287449fc8eb571 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 2 Jan 2025 16:41:47 -0500 Subject: [PATCH 207/270] enhance: add a maximum consecutive tools calls restriction This is a safety net for instances when the LLM goes awry. There is an undocumented environment variable to change the maximum number of consecutive tool calls from the default of 10. Signed-off-by: Donnie Adams --- pkg/engine/engine.go | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 654d8654..69e16b68 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -4,6 +4,9 @@ import ( "context" "encoding/json" "fmt" + "os" + "slices" + "strconv" "strings" "sync" @@ -12,6 +15,16 @@ import ( "github.com/gptscript-ai/gptscript/pkg/version" ) +var maxConsecutiveToolCalls = 10 + +func init() { + if val := os.Getenv("GPTSCRIPT_MAX_CONSECUTIVE_TOOL_CALLS"); val != "" { + if i, err := strconv.Atoi(val); err == nil && i > 0 { + maxConsecutiveToolCalls = i + } + } +} + type Model interface { Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) ProxyInfo([]string) (string, string, error) @@ -387,6 +400,28 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { } }() + // Limit the number of consecutive tool calls and responses. + // We don't want the LLM to call tools unrestricted or get stuck in an error loop. + var messagesSinceLastUserMessage int + for _, msg := range slices.Backward(state.Completion.Messages) { + if msg.Role == types.CompletionMessageRoleTypeUser { + break + } + messagesSinceLastUserMessage++ + } + // Divide by 2 because tool calls come in pairs: call and response. + if messagesSinceLastUserMessage/2 > maxConsecutiveToolCalls { + msg := fmt.Sprintf("We cannot continue because the number of consecutive tool calls is limited to %d.", maxConsecutiveToolCalls) + ret.State.Completion.Messages = append(state.Completion.Messages, types.CompletionMessage{ + Role: types.CompletionMessageRoleTypeAssistant, + Content: []types.ContentPart{{Text: msg}}, + }) + // Setting this ensures that chat continues as expected when we hit this problem. + state.Pending = map[string]types.CompletionToolCall{} + ret.Result = &msg + return &ret, nil + } + resp, err := e.Model.Call(ctx, state.Completion, e.Env, progress) if err != nil { return nil, fmt.Errorf("failed calling model for completion: %w", err) From 78f07b0c18b803702c4a6d65025b05b0a396c153 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Sat, 4 Jan 2025 21:00:38 -0500 Subject: [PATCH 208/270] fix: only count assistant messages when limiting consecutive tool calls Before this change, the number of consecutive tool calls was counted by looking at all messages since the last time a message with role "user" was sent. This was bad logic because the LLM could ask for many parallel tool calls. This change addresses this problem by only considering messages with role "assistant" with tool calls since the last user message. Signed-off-by: Donnie Adams --- pkg/engine/engine.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 69e16b68..bd7b09ab 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -400,17 +400,24 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { } }() - // Limit the number of consecutive tool calls and responses. + // Limit the number of consecutive tool calls. // We don't want the LLM to call tools unrestricted or get stuck in an error loop. var messagesSinceLastUserMessage int for _, msg := range slices.Backward(state.Completion.Messages) { if msg.Role == types.CompletionMessageRoleTypeUser { break + } else if msg.Role == types.CompletionMessageRoleTypeAssistant { + for _, content := range msg.Content { + // If this message is requesting that a tool call be made, then count it towards the limit. + if content.ToolCall != nil { + messagesSinceLastUserMessage++ + break + } + } } - messagesSinceLastUserMessage++ } - // Divide by 2 because tool calls come in pairs: call and response. - if messagesSinceLastUserMessage/2 > maxConsecutiveToolCalls { + + if messagesSinceLastUserMessage > maxConsecutiveToolCalls { msg := fmt.Sprintf("We cannot continue because the number of consecutive tool calls is limited to %d.", maxConsecutiveToolCalls) ret.State.Completion.Messages = append(state.Completion.Messages, types.CompletionMessage{ Role: types.CompletionMessageRoleTypeAssistant, From c4812c6df093b733069d98390ba9a95e15b9a5cc Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 13 Jan 2025 09:17:57 -0500 Subject: [PATCH 209/270] enhance: clear cache on load with disabled cache A change was made such that the cache key was removed when a tool was run with cache disabled. This change connects the cache disabled feature when running tools with the cache disabled when loading tools. That way, if a tool is loaded with cache disabled, then this newer version is used when running the tool the next time. Signed-off-by: Donnie Adams --- pkg/loader/url.go | 3 +++ pkg/sdkserver/routes.go | 15 ++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/loader/url.go b/pkg/loader/url.go index 72970546..d84e08be 100644 --- a/pkg/loader/url.go +++ b/pkg/loader/url.go @@ -63,6 +63,9 @@ func loadURL(ctx context.Context, cache *cache.Client, base *source, name string } } } + if cachedKey.Path == "" { + cachedKey.Path = "." + } if ok, err := cache.Get(ctx, cachedKey, &cachedValue); err != nil { return nil, false, err diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 801227a1..41ee68e0 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -232,21 +232,22 @@ func (s *server) load(w http.ResponseWriter, r *http.Request) { logger.Debugf("parsing file: file=%s, content=%s", reqObject.File, reqObject.Content) var ( - prg types.Program - err error - cache = s.client.Cache + prg types.Program + err error + + ctx = r.Context() ) if reqObject.DisableCache { - cache = nil + ctx = cache.WithNoCache(ctx) } if reqObject.Content != "" { - prg, err = loader.ProgramFromSource(r.Context(), reqObject.Content, reqObject.SubTool, loader.Options{Cache: cache}) + prg, err = loader.ProgramFromSource(ctx, reqObject.Content, reqObject.SubTool, loader.Options{Cache: s.client.Cache}) } else if reqObject.File != "" { - prg, err = loader.Program(r.Context(), reqObject.File, reqObject.SubTool, loader.Options{Cache: cache}) + prg, err = loader.Program(ctx, reqObject.File, reqObject.SubTool, loader.Options{Cache: s.client.Cache}) } else { - prg, err = loader.ProgramFromSource(r.Context(), reqObject.ToolDefs.String(), reqObject.SubTool, loader.Options{Cache: cache}) + prg, err = loader.ProgramFromSource(ctx, reqObject.ToolDefs.String(), reqObject.SubTool, loader.Options{Cache: s.client.Cache}) } if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) From a8208bb110f7caa48a07c6cfb6b3a074292b41a1 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 13 Jan 2025 20:07:07 -0700 Subject: [PATCH 210/270] chore: don't always copy the current env in the sdkserver --- pkg/sdkserver/routes.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 41ee68e0..c01eeb21 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net/http" - "os" "sort" "strings" "sync" @@ -172,7 +171,6 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { reqObject.ChatState = "null" } - reqObject.Env = append(os.Environ(), reqObject.Env...) // Don't overwrite the PromptURLEnvVar if it is already set in the environment. var promptTokenAlreadySet bool for _, env := range reqObject.Env { From 9cb92bc3378a6e5b93806df08908c857624c33af Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 13 Jan 2025 21:36:43 -0700 Subject: [PATCH 211/270] chore: allow cred store env to be protected --- pkg/gptscript/gptscript.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 5a7229a1..6b7ceb04 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -58,6 +58,8 @@ type Options struct { DisablePromptServer bool SystemToolsDir string Env []string + CredentialStore string + CredentialToolsEnv []string } func Complete(opts ...Options) Options { @@ -73,8 +75,10 @@ func Complete(opts ...Options) Options { result.Quiet = types.FirstSet(opt.Quiet, result.Quiet) result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) result.Env = append(result.Env, opt.Env...) + result.CredentialToolsEnv = append(result.CredentialToolsEnv, opt.CredentialToolsEnv...) result.DisablePromptServer = types.FirstSet(opt.DisablePromptServer, result.DisablePromptServer) result.DefaultModelProvider = types.FirstSet(opt.DefaultModelProvider, result.DefaultModelProvider) + result.CredentialStore = types.FirstSet(opt.CredentialStore, result.CredentialStore) } if result.Quiet == nil { @@ -83,6 +87,9 @@ func Complete(opts ...Options) Options { if len(result.Env) == 0 { result.Env = os.Environ() } + if len(result.CredentialToolsEnv) == 0 { + result.CredentialToolsEnv = result.Env + } if len(result.CredentialContexts) == 0 { result.CredentialContexts = []string{credentials.DefaultCredentialContext} } @@ -104,11 +111,15 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { return nil, err } + if opts.CredentialStore != "" { + cliCfg.CredentialsStore = opts.CredentialStore + } + if opts.Runner.RuntimeManager == nil { opts.Runner.RuntimeManager = runtimes.Default(cacheClient.CacheDir(), opts.SystemToolsDir) } - simplerRunner, err := newSimpleRunner(cacheClient, opts.Runner.RuntimeManager, opts.Env) + simplerRunner, err := newSimpleRunner(cacheClient, opts.Runner.RuntimeManager, opts.CredentialToolsEnv) if err != nil { return nil, err } From 73a9ffeb1cc8c027cba02ea5df7066470644c040 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 13 Jan 2025 22:39:12 -0700 Subject: [PATCH 212/270] chore: seperate out env for dataset/workspace tools --- pkg/sdkserver/datasets.go | 8 ++++---- pkg/sdkserver/routes.go | 1 + pkg/sdkserver/server.go | 16 +++++++++++----- pkg/sdkserver/workspaces.go | 20 ++++++++++++-------- 4 files changed, 28 insertions(+), 17 deletions(-) diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index c00308e7..c4178801 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -79,7 +79,7 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, req.Env, req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -147,7 +147,7 @@ func (s *server) addDatasetElements(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, req.Env, req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -207,7 +207,7 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, req.Env, req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -270,7 +270,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, req.Env, req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index c01eeb21..dfad4a18 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -28,6 +28,7 @@ type server struct { gptscriptOpts gptscript.Options address, token string datasetTool, workspaceTool string + serverToolsEnv []string client *gptscript.GPTScript events *broadcaster.Broadcaster[event] diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 04bff085..79d6daf7 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -28,6 +28,7 @@ type Options struct { ListenAddress string DatasetTool, WorkspaceTool string + ServerToolsEnv []string Debug bool DisableServerErrorLogging bool } @@ -105,11 +106,13 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { } s := &server{ - gptscriptOpts: opts.Options, - address: listener.Addr().String(), - token: token, - datasetTool: opts.DatasetTool, - workspaceTool: opts.WorkspaceTool, + gptscriptOpts: opts.Options, + address: listener.Addr().String(), + token: token, + datasetTool: opts.DatasetTool, + workspaceTool: opts.WorkspaceTool, + serverToolsEnv: opts.ServerToolsEnv, + client: g, events: events, runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir, opts.SystemToolsDir), @@ -176,6 +179,9 @@ func complete(opts ...Options) Options { if result.DatasetTool == "" { result.DatasetTool = "github.com/gptscript-ai/datasets" } + if len(result.ServerToolsEnv) == 0 { + result.ServerToolsEnv = os.Environ() + } return result } diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index ed6602ea..f0d7ef00 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -30,6 +30,10 @@ type createWorkspaceRequest struct { FromWorkspaceIDs []string `json:"fromWorkspaceIDs"` } +func (s *server) getServerToolsEnv(env []string) []string { + return append(s.serverToolsEnv, env...) +} + func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) var reqObject createWorkspaceRequest @@ -51,7 +55,7 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"provider": "%s", "workspace_ids": "%s"}`, reqObject.ProviderType, strings.Join(reqObject.FromWorkspaceIDs, ","), @@ -86,7 +90,7 @@ func (s *server) deleteWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s"}`, reqObject.ID, @@ -123,7 +127,7 @@ func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s", "ls_prefix": "%s"}`, reqObject.ID, reqObject.Prefix, @@ -159,7 +163,7 @@ func (s *server) removeAllWithPrefixInWorkspace(w http.ResponseWriter, r *http.R out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s", "prefix": "%s"}`, reqObject.ID, reqObject.Prefix, @@ -196,7 +200,7 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s", "file_path": "%s", "body": "%s"}`, reqObject.ID, reqObject.FilePath, reqObject.Contents, @@ -232,7 +236,7 @@ func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s", "file_path": "%s"}`, reqObject.ID, reqObject.FilePath, @@ -268,7 +272,7 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s", "file_path": "%s"}`, reqObject.ID, reqObject.FilePath, @@ -304,7 +308,7 @@ func (s *server) statFileInWorkspace(w http.ResponseWriter, r *http.Request) { out, err := s.client.Run( r.Context(), prg, - reqObject.Env, + s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( `{"workspace_id": "%s", "file_path": "%s"}`, reqObject.ID, reqObject.FilePath, From f8b95c9ffab6f70556f5734f078438390d0287d8 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 14 Jan 2025 08:06:48 -0500 Subject: [PATCH 213/270] feat: add file revision API for workspaces Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 11 ++-- pkg/sdkserver/workspaces.go | 109 ++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 7 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index dfad4a18..c4b45e92 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -3,7 +3,6 @@ package sdkserver import ( "encoding/json" "fmt" - "io" "net/http" "sort" "strings" @@ -81,6 +80,9 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /workspaces/delete-file", s.removeFileInWorkspace) mux.HandleFunc("POST /workspaces/read-file", s.readFileInWorkspace) mux.HandleFunc("POST /workspaces/stat-file", s.statFileInWorkspace) + mux.HandleFunc("POST /workspaces/list-revisions", s.listRevisions) + mux.HandleFunc("POST /workspaces/get-revision", s.getRevisionForFileInWorkspace) + mux.HandleFunc("POST /workspaces/delete-revision", s.deleteRevisionForFileInWorkspace) } // health just provides an endpoint for checking whether the server is running and accessible. @@ -152,14 +154,9 @@ func (s *server) listModels(w http.ResponseWriter, r *http.Request) { // Then the options and tool are passed to the process function. func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) - body, err := io.ReadAll(r.Body) - if err != nil { - writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to read request body: %w", err)) - return - } reqObject := new(toolOrFileRequest) - if err := json.Unmarshal(body, reqObject); err != nil { + if err := json.NewDecoder(r.Body).Decode(reqObject); err != nil { writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index f0d7ef00..bde59974 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -321,3 +321,112 @@ func (s *server) statFileInWorkspace(w http.ResponseWriter, r *http.Request) { writeResponse(logger, w, map[string]any{"stdout": out}) } + +type listRevisionsRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` +} + +func (s *server) listRevisions(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject listRevisionsRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + } + + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "List Revisions for File in Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.getServerToolsEnv(reqObject.Env), + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s"}`, + reqObject.ID, reqObject.FilePath, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type getRevisionForFileInWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` + RevisionID string `json:"revisionID"` +} + +func (s *server) getRevisionForFileInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject getRevisionForFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Get a Revision for File in Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.getServerToolsEnv(reqObject.Env), + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s", "revision_id": "%s"}`, + reqObject.ID, reqObject.FilePath, reqObject.RevisionID, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + +type deleteRevisionForFileInWorkspaceRequest struct { + workspaceCommonRequest `json:",inline"` + FilePath string `json:"filePath"` + RevisionID string `json:"revisionID"` +} + +func (s *server) deleteRevisionForFileInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject deleteRevisionForFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Delete a Revision for File in Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.getServerToolsEnv(reqObject.Env), + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s", "revision_id": "%s"}`, + reqObject.ID, reqObject.FilePath, reqObject.RevisionID, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} From ccc2ea4e75886ac3594a5b75fe2a433029592cc8 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 20 Jan 2025 08:04:23 -0500 Subject: [PATCH 214/270] fix: send the correct payload when creating workspaces from workspaces The workspace-provider daemon expects fromWorkspaceIDs instead of workspace_ids. Signed-off-by: Donnie Adams --- pkg/sdkserver/workspaces.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index bde59974..4389ff3b 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "net/http" - "strings" gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/loader" @@ -52,14 +51,20 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { reqObject.ProviderType = "directory" } + b, err := json.Marshal(map[string]any{ + "provider": reqObject.ProviderType, + "fromWorkspaceIDs": reqObject.FromWorkspaceIDs, + }) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to marshal request body: %w", err)) + return + } + out, err := s.client.Run( r.Context(), prg, s.getServerToolsEnv(reqObject.Env), - fmt.Sprintf( - `{"provider": "%s", "workspace_ids": "%s"}`, - reqObject.ProviderType, strings.Join(reqObject.FromWorkspaceIDs, ","), - ), + string(b), ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) From 3f876b2ef42b1d8e199b706b8d08baf10244957f Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 20 Jan 2025 10:10:03 -0700 Subject: [PATCH 215/270] bug: don't drop valid json that doesn't have a message --- pkg/builtin/builtin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 42ff373b..ccbda66a 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -789,7 +789,7 @@ func SysChatFinish(_ context.Context, _ []string, input string, _ chan<- string) var params struct { Message string `json:"return,omitempty"` } - if err := json.Unmarshal([]byte(input), ¶ms); err != nil { + if err := json.Unmarshal([]byte(input), ¶ms); err != nil || params.Message == "" { return "", &engine.ErrChatFinish{ Message: input, } From c02c4cbaa1cc9f65c85ed54473e1d87379c6ebca Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 21 Jan 2025 20:25:36 -0700 Subject: [PATCH 216/270] chore: make credential overrides cred context aware --- pkg/credentials/factory.go | 60 +++++++++---- pkg/credentials/overrides.go | 149 +++++++++++++++++++++++++++++++++ pkg/gptscript/gptscript.go | 2 +- pkg/runner/credentials.go | 43 ---------- pkg/runner/credentials_test.go | 3 +- pkg/runner/runner.go | 2 +- 6 files changed, 199 insertions(+), 60 deletions(-) create mode 100644 pkg/credentials/overrides.go delete mode 100644 pkg/runner/credentials.go diff --git a/pkg/credentials/factory.go b/pkg/credentials/factory.go index ca6f1d18..42295fc8 100644 --- a/pkg/credentials/factory.go +++ b/pkg/credentials/factory.go @@ -2,6 +2,7 @@ package credentials import ( "context" + "strings" "github.com/docker/docker-credential-helpers/client" "github.com/gptscript-ai/gptscript/pkg/config" @@ -13,12 +14,32 @@ type ProgramLoaderRunner interface { Run(ctx context.Context, prg types.Program, input string) (output string, err error) } -func NewFactory(ctx context.Context, cfg *config.CLIConfig, plr ProgramLoaderRunner) (StoreFactory, error) { +func NewFactory(ctx context.Context, cfg *config.CLIConfig, overrides []string, plr ProgramLoaderRunner) (StoreFactory, error) { + creds, err := ParseCredentialOverrides(overrides) + if err != nil { + return StoreFactory{}, err + } + + overrideMap := make(map[string]map[string]map[string]string) + for k, v := range creds { + contextName, toolName, ok := strings.Cut(k, ctxSeparator) + if !ok { + continue + } + toolMap, ok := overrideMap[contextName] + if !ok { + toolMap = make(map[string]map[string]string) + } + toolMap[toolName] = v + overrideMap[contextName] = toolMap + } + toolName := translateToolName(cfg.CredentialsStore) if toolName == config.FileCredHelper { return StoreFactory{ - file: true, - cfg: cfg, + file: true, + cfg: cfg, + overrides: overrideMap, }, nil } @@ -28,10 +49,11 @@ func NewFactory(ctx context.Context, cfg *config.CLIConfig, plr ProgramLoaderRun } return StoreFactory{ - ctx: ctx, - prg: prg, - runner: plr, - cfg: cfg, + ctx: ctx, + prg: prg, + runner: plr, + cfg: cfg, + overrides: overrideMap, }, nil } @@ -41,6 +63,8 @@ type StoreFactory struct { file bool runner ProgramLoaderRunner cfg *config.CLIConfig + // That's a lot of maps: context -> toolName -> key -> value + overrides map[string]map[string]map[string]string } func (s *StoreFactory) NewStore(credCtxs []string) (CredentialStore, error) { @@ -48,15 +72,23 @@ func (s *StoreFactory) NewStore(credCtxs []string) (CredentialStore, error) { return nil, err } if s.file { - return Store{ - credCtxs: credCtxs, - cfg: s.cfg, + return withOverride{ + target: Store{ + credCtxs: credCtxs, + cfg: s.cfg, + }, + overrides: s.overrides, + credContext: credCtxs, }, nil } - return Store{ - credCtxs: credCtxs, - cfg: s.cfg, - program: s.program, + return withOverride{ + target: Store{ + credCtxs: credCtxs, + cfg: s.cfg, + program: s.program, + }, + overrides: s.overrides, + credContext: credCtxs, }, nil } diff --git a/pkg/credentials/overrides.go b/pkg/credentials/overrides.go new file mode 100644 index 00000000..0911cac5 --- /dev/null +++ b/pkg/credentials/overrides.go @@ -0,0 +1,149 @@ +package credentials + +import ( + "context" + "fmt" + "maps" + "os" + "strings" +) + +// ParseCredentialOverrides parses a string of credential overrides that the user provided as a command line arg. +// The format of credential overrides can be one of two things: +// cred1:ENV1,ENV2 (direct mapping of environment variables) +// cred1:ENV1=VALUE1,ENV2=VALUE2 (key-value pairs) +// +// This function turns it into a map[string]map[string]string like this: +// +// { +// "cred1": { +// "ENV1": "VALUE1", +// "ENV2": "VALUE2", +// } +// } +func ParseCredentialOverrides(overrides []string) (map[string]map[string]string, error) { + credentialOverrides := make(map[string]map[string]string) + + for _, o := range overrides { + credName, envs, found := strings.Cut(o, ":") + if !found { + return nil, fmt.Errorf("invalid credential override: %s", o) + } + envMap, ok := credentialOverrides[credName] + if !ok { + envMap = make(map[string]string) + } + for _, env := range strings.Split(envs, ",") { + for _, env := range strings.Split(env, "|") { + key, value, found := strings.Cut(env, "=") + if !found { + // User just passed an env var name as the key, so look up the value. + value = os.Getenv(key) + } + envMap[key] = value + } + } + credentialOverrides[credName] = envMap + } + + return credentialOverrides, nil +} + +type withOverride struct { + target CredentialStore + credContext []string + overrides map[string]map[string]map[string]string +} + +func (w withOverride) Get(ctx context.Context, toolName string) (*Credential, bool, error) { + for _, credCtx := range w.credContext { + overrides, ok := w.overrides[credCtx] + if !ok { + continue + } + override, ok := overrides[toolName] + if !ok { + continue + } + + return &Credential{ + Context: credCtx, + ToolName: toolName, + Type: CredentialTypeTool, + Env: maps.Clone(override), + }, true, nil + } + + return w.target.Get(ctx, toolName) +} + +func (w withOverride) Add(ctx context.Context, cred Credential) error { + for _, credCtx := range w.credContext { + if override, ok := w.overrides[credCtx]; ok { + if _, ok := override[cred.ToolName]; ok { + return fmt.Errorf("cannot add credential with context %q and tool %q because it is statically configure", cred.Context, cred.ToolName) + } + } + } + return w.target.Add(ctx, cred) +} + +func (w withOverride) Refresh(ctx context.Context, cred Credential) error { + if override, ok := w.overrides[cred.Context]; ok { + if _, ok := override[cred.ToolName]; ok { + return nil + } + } + return w.target.Refresh(ctx, cred) +} + +func (w withOverride) Remove(ctx context.Context, toolName string) error { + for _, credCtx := range w.credContext { + if override, ok := w.overrides[credCtx]; ok { + if _, ok := override[toolName]; ok { + return fmt.Errorf("cannot remove credential with context %q and tool %q because it is statically configure", credCtx, toolName) + } + } + } + return w.target.Remove(ctx, toolName) +} + +func (w withOverride) List(ctx context.Context) ([]Credential, error) { + creds, err := w.target.List(ctx) + if err != nil { + return nil, err + } + + added := make(map[string]map[string]bool) + for i, cred := range creds { + if override, ok := w.overrides[cred.Context]; ok { + if _, ok := override[cred.ToolName]; ok { + creds[i].Type = CredentialTypeTool + creds[i].Env = maps.Clone(override[cred.ToolName]) + } + } + tools, ok := added[cred.Context] + if !ok { + tools = make(map[string]bool) + } + tools[cred.ToolName] = true + added[cred.Context] = tools + } + + for _, credCtx := range w.credContext { + tools := w.overrides[credCtx] + for toolName := range tools { + if _, ok := added[credCtx][toolName]; ok { + continue + } + creds = append(creds, Credential{ + Context: credCtx, + ToolName: toolName, + Type: CredentialTypeTool, + Env: maps.Clone(tools[toolName]), + }) + } + } + + return creds, nil +} diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 6b7ceb04..4669e5ab 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -124,7 +124,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { return nil, err } - storeFactory, err := credentials.NewFactory(ctx, cliCfg, simplerRunner) + storeFactory, err := credentials.NewFactory(ctx, cliCfg, opts.Runner.CredentialOverrides, simplerRunner) if err != nil { return nil, err } diff --git a/pkg/runner/credentials.go b/pkg/runner/credentials.go deleted file mode 100644 index d2fbb00e..00000000 --- a/pkg/runner/credentials.go +++ /dev/null @@ -1,43 +0,0 @@ -package runner - -import ( - "fmt" - "os" - "strings" -) - -// parseCredentialOverrides parses a string of credential overrides that the user provided as a command line arg. -// The format of credential overrides can be one of two things: -// cred1:ENV1,ENV2 (direct mapping of environment variables) -// cred1:ENV1=VALUE1,ENV2=VALUE2 (key-value pairs) -// -// This function turns it into a map[string]map[string]string like this: -// -// { -// "cred1": { -// "ENV1": "VALUE1", -// "ENV2": "VALUE2", -// } -// } -func parseCredentialOverrides(overrides []string) (map[string]map[string]string, error) { - credentialOverrides := make(map[string]map[string]string) - - for _, o := range overrides { - credName, envs, found := strings.Cut(o, ":") - if !found { - return nil, fmt.Errorf("invalid credential override: %s", o) - } - envMap := make(map[string]string) - for _, env := range strings.Split(envs, ",") { - key, value, found := strings.Cut(env, "=") - if !found { - // User just passed an env var name as the key, so look up the value. - value = os.Getenv(key) - } - envMap[key] = value - } - credentialOverrides[credName] = envMap - } - - return credentialOverrides, nil -} diff --git a/pkg/runner/credentials_test.go b/pkg/runner/credentials_test.go index c568d6be..74fa9353 100644 --- a/pkg/runner/credentials_test.go +++ b/pkg/runner/credentials_test.go @@ -4,6 +4,7 @@ import ( "os" "testing" + "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/stretchr/testify/require" ) @@ -119,7 +120,7 @@ func TestParseCredentialOverrides(t *testing.T) { _ = os.Setenv(k, v) } - out, err := parseCredentialOverrides(tc.in) + out, err := credentials.ParseCredentialOverrides(tc.in) if tc.expectErr { require.Error(t, err) return diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 09b242f7..e2699cf6 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -754,7 +754,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env err error ) if r.credOverrides != nil { - credOverrides, err = parseCredentialOverrides(r.credOverrides) + credOverrides, err = credentials.ParseCredentialOverrides(r.credOverrides) if err != nil { return nil, fmt.Errorf("failed to parse credential overrides: %w", err) } From 45323ce8428858abb28024f9dcad4b0737ac178e Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Thu, 23 Jan 2025 17:51:39 +0100 Subject: [PATCH 217/270] chore: update chat-completion-client dep to include error log fix (#938) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2c59b5d0..31ad1fae 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20241219123536-85c44096bc10 + github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 diff --git a/go.sum b/go.sum index a05b77e7..c98a8f98 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20241219123536-85c44096bc10 h1:v251qdhjAE+mCi3s+ekmGbqV9BurrMTl0Vd8/0MvsTY= -github.com/gptscript-ai/chat-completion-client v0.0.0-20241219123536-85c44096bc10/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= From 1b6eba965d7497167a988c10fca488b6de5f33ea Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 27 Jan 2025 08:01:13 -0500 Subject: [PATCH 218/270] enhance: add support for disabling file revisions for workspace provider Signed-off-by: Donnie Adams --- pkg/sdkserver/workspaces.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index 4389ff3b..e5f03ad1 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -186,6 +186,7 @@ type writeFileInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` FilePath string `json:"filePath"` Contents string `json:"contents"` + CreateRevision *bool `json:"createRevision"` } func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -207,8 +208,8 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { prg, s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "body": "%s"}`, - reqObject.ID, reqObject.FilePath, reqObject.Contents, + `{"workspace_id": "%s", "file_path": "%s", "body": "%s", "create_revision": %t}`, + reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.CreateRevision == nil || *reqObject.CreateRevision, ), ) if err != nil { From e09b5564965445a18cb7b7ac796980ed293198e6 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Tue, 28 Jan 2025 19:28:31 +0100 Subject: [PATCH 219/270] chore: bump chat-completion-client version to include error formatting fix (#940) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 31ad1fae..3ce270a6 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 + github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 diff --git a/go.sum b/go.sum index c98a8f98..7d74e86d 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1 h1:D8VmhL68Fm6YI7fue4wkzd1TqODn//LtcJtPvWk8BQ8= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= From 7ee5c807d2b9567b0f5078aaea7ff085d6b8e340 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Tue, 28 Jan 2025 20:09:59 +0100 Subject: [PATCH 220/270] chore: switch from deprecated mholt/archiver/v4 to mholt/archives (#941) --- go.mod | 35 +++++++-------- go.sum | 82 +++++++++++++++++++---------------- pkg/repos/download/extract.go | 8 ++-- 3 files changed, 67 insertions(+), 58 deletions(-) diff --git a/go.mod b/go.mod index 3ce270a6..72e43a50 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 - github.com/mholt/archiver/v4 v4.0.0-alpha.8 + github.com/mholt/archives v0.1.0 github.com/rs/cors v1.11.0 github.com/samber/lo v1.38.1 github.com/sirupsen/logrus v1.9.3 @@ -32,7 +32,7 @@ require ( github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.9.0 golang.org/x/term v0.22.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 @@ -46,36 +46,36 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/STARRY-S/zip v0.2.1 // indirect github.com/alecthomas/chroma/v2 v2.8.0 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect - github.com/bodgit/plumbing v1.2.0 // indirect - github.com/bodgit/sevenzip v1.3.0 // indirect - github.com/bodgit/windows v1.0.0 // indirect + github.com/bodgit/plumbing v1.3.0 // indirect + github.com/bodgit/sevenzip v1.6.0 // indirect + github.com/bodgit/windows v1.0.1 // indirect github.com/charmbracelet/glamour v0.7.0 // indirect github.com/charmbracelet/lipgloss v0.11.0 // indirect github.com/charmbracelet/x/ansi v0.1.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/connesc/cipherio v0.2.1 // indirect github.com/containerd/console v1.0.4 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.4.0 // indirect - github.com/dsnet/compress v0.0.1 // indirect + github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/swag v0.22.8 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hexops/autogold v1.3.1 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -84,8 +84,8 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.16.5 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -98,10 +98,10 @@ require ( github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.15.2 // indirect github.com/nightlyone/lockfile v1.0.0 // indirect - github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect + github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pterm/pterm v0.12.79 // indirect @@ -109,24 +109,25 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - github.com/ulikunitz/xz v0.5.10 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect - go4.org v0.0.0-20200411211856-f5505b9728dd // indirect + go4.org v0.0.0-20230225012048-214862532bf5 // indirect golang.org/x/crypto v0.25.0 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/tools v0.23.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect mvdan.cc/gofumpt v0.6.0 // indirect diff --git a/go.sum b/go.sum index 7d74e86d..ff7e21e9 100644 --- a/go.sum +++ b/go.sum @@ -47,6 +47,8 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63n github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= +github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink= @@ -55,8 +57,8 @@ github.com/alecthomas/chroma/v2 v2.8.0 h1:w9WJUjFFmHHB2e8mRpL9jjy3alYDlU0QLDezj1 github.com/alecthomas/chroma/v2 v2.8.0/go.mod h1:yrkMI9807G1ROx13fhe1v6PN2DDeaR73L3d+1nmYQtw= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -66,12 +68,12 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/bodgit/plumbing v1.2.0 h1:gg4haxoKphLjml+tgnecR4yLBV5zo4HAZGCtAh3xCzM= -github.com/bodgit/plumbing v1.2.0/go.mod h1:b9TeRi7Hvc6Y05rjm8VML3+47n4XTZPtQ/5ghqic2n8= -github.com/bodgit/sevenzip v1.3.0 h1:1ljgELgtHqvgIp8W8kgeEGHIWP4ch3xGI8uOBZgLVKY= -github.com/bodgit/sevenzip v1.3.0/go.mod h1:omwNcgZTEooWM8gA/IJ2Nk/+ZQ94+GsytRzOJJ8FBlM= -github.com/bodgit/windows v1.0.0 h1:rLQ/XjsleZvx4fR1tB/UxQrK+SJ2OFHzfPjLWWOhDIA= -github.com/bodgit/windows v1.0.0/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= +github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= +github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= +github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A= +github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc= +github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= +github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng= @@ -93,8 +95,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= -github.com/connesc/cipherio v0.2.1 h1:FGtpTPMbKNNWByNrr9aEBtaJtXjqOzkIXNYJp6OEycw= -github.com/connesc/cipherio v0.2.1/go.mod h1:ukY0MWJDFnJEbXMQtOcn2VmTpRfzcTz4OoVrWGGJZcA= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= @@ -114,8 +114,8 @@ github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUy github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= -github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= @@ -161,14 +161,10 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -176,6 +172,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -208,12 +205,15 @@ github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1 github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 h1:vkgNZVWQgbE33VD3z9WKDwuu7B/eJVVMMPM62ixfCR8= github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6/go.mod h1:frrl/B+ZH3VSs3Tqk2qxEIIWTONExX3tuUa4JsVnqx4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hexops/autogold v0.8.1/go.mod h1:97HLDXyG23akzAoRYJh/2OBs3kd80eHyKPvZw0S5ZBY= github.com/hexops/autogold v1.3.1 h1:YgxF9OHWbEIUjhDbpnLhgVsjUDsiHDTyDfy2lrfdlzo= github.com/hexops/autogold v1.3.1/go.mod h1:sQO+mQUCVfxOKPht+ipDSkJ2SCJ7BNJVHZexsXqWMx4= @@ -245,8 +245,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -254,8 +254,8 @@ github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuOb github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -289,8 +289,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/archiver/v4 v4.0.0-alpha.8 h1:tRGQuDVPh66WCOelqe6LIGh0gwmfwxUrSSDunscGsRM= -github.com/mholt/archiver/v4 v4.0.0-alpha.8/go.mod h1:5f7FUYGXdJWUjESffJaYR4R60VhnHxb2X3T1teMyv5A= +github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= +github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I= github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= @@ -301,16 +301,16 @@ github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= -github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk= -github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 h1:3bMMZ1f+GPXFQ1uNaYbO/uECWvSfqEA+ZEXn1rFAT88= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -353,6 +353,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= +github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= @@ -362,12 +364,16 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= @@ -380,9 +386,9 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= @@ -394,6 +400,8 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= @@ -404,8 +412,8 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= -go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= +go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -464,13 +472,13 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -492,8 +500,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -561,8 +569,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/repos/download/extract.go b/pkg/repos/download/extract.go index 4cf09f0c..9615d372 100644 --- a/pkg/repos/download/extract.go +++ b/pkg/repos/download/extract.go @@ -14,7 +14,7 @@ import ( "strings" "time" - "github.com/mholt/archiver/v4" + "github.com/mholt/archives" ) func Extract(ctx context.Context, downloadURL, digest, targetDir string) error { @@ -74,17 +74,17 @@ func Extract(ctx context.Context, downloadURL, digest, targetDir string) error { return err } - format, input, err := archiver.Identify(filepath.Base(parsedURL.Path), tmpFile) + format, input, err := archives.Identify(ctx, filepath.Base(parsedURL.Path), tmpFile) if err != nil { return err } - ex, ok := format.(archiver.Extractor) + ex, ok := format.(archives.Extractor) if !ok { return fmt.Errorf("failed to detect proper archive for extraction from %s got: %v", downloadURL, ex) } - err = ex.Extract(ctx, input, nil, func(_ context.Context, f archiver.File) error { + err = ex.Extract(ctx, input, func(_ context.Context, f archives.FileInfo) error { target := filepath.Join(targetDir, f.NameInArchive) if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { return err From 468c4f1a3cd98c1ebc0c3672d65a2a4e3318217b Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 4 Feb 2025 09:58:59 -0500 Subject: [PATCH 221/270] enhance: add field-level sensitivity for prompts Additionally, each field can now also have a description. This change is made such that all existing tools will work. However, existing code will need to be updated to support the new types. Signed-off-by: Donnie Adams --- go.mod | 4 +- go.sum | 8 +-- pkg/cli/gptscript.go | 1 - pkg/engine/call.go | 10 +-- pkg/prompt/prompt.go | 25 ++++--- pkg/types/prompt.go | 63 ++++++++++++++++- pkg/types/prompt_test.go | 142 +++++++++++++++++++++++++++++++++++++++ 7 files changed, 227 insertions(+), 26 deletions(-) create mode 100644 pkg/types/prompt_test.go diff --git a/go.mod b/go.mod index 72e43a50..bc80c47e 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1 github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb - github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e - github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 + github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 + github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index ff7e21e9..7ed757bd 100644 --- a/go.sum +++ b/go.sum @@ -201,10 +201,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e h1:WpNae0NBx+Ri8RB3SxF8DhadDKU7h+jfWPQterDpbJA= -github.com/gptscript-ai/go-gptscript v0.9.5-rc5.0.20240927213153-2af51434b93e/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= -github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6 h1:vkgNZVWQgbE33VD3z9WKDwuu7B/eJVVMMPM62ixfCR8= -github.com/gptscript-ai/tui v0.0.0-20240923192013-172e51ccf1d6/go.mod h1:frrl/B+ZH3VSs3Tqk2qxEIIWTONExX3tuUa4JsVnqx4= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 h1:QxLjsLOYlsVLPwuRkP0Q8EcAoZT1s8vU2ZBSX0+R6CI= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= +github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee h1:70PHW6Xw70yNNZ5aX936XqcMLwNmfMZpCV3FCOGKpxE= +github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index a3454dd5..4bd04509 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -494,7 +494,6 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { DisableCache: r.DisableCache, CredentialOverrides: r.CredentialOverride, Input: toolInput, - CacheDir: r.CacheDir, SubTool: r.SubTool, Workspace: r.Workspace, SaveChatStateFile: r.SaveChatStateFile, diff --git a/pkg/engine/call.go b/pkg/engine/call.go index d116d0fa..4a3b70b5 100644 --- a/pkg/engine/call.go +++ b/pkg/engine/call.go @@ -76,13 +76,13 @@ func mergeInputs(base, overlay string) (string, error) { return base, nil } - err := json.Unmarshal([]byte(base), &baseMap) - if err != nil { - return "", fmt.Errorf("failed to unmarshal base input: %w", err) + if base != "" { + if err := json.Unmarshal([]byte(base), &baseMap); err != nil { + return "", fmt.Errorf("failed to unmarshal base input: %w", err) + } } - err = json.Unmarshal([]byte(overlay), &overlayMap) - if err != nil { + if err := json.Unmarshal([]byte(overlay), &overlayMap); err != nil { return "", fmt.Errorf("failed to unmarshal overlay input: %w", err) } diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go index f91a04b6..fa4beeb6 100644 --- a/pkg/prompt/prompt.go +++ b/pkg/prompt/prompt.go @@ -52,7 +52,7 @@ func sysPromptHTTP(ctx context.Context, envs []string, url string, prompt types. func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string) (_ string, err error) { var params struct { Message string `json:"message,omitempty"` - Fields string `json:"fields,omitempty"` + Fields types.Fields `json:"fields,omitempty"` Sensitive string `json:"sensitive,omitempty"` Metadata map[string]string `json:"metadata,omitempty"` } @@ -60,16 +60,11 @@ func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string return "", err } - var fields []string for _, env := range envs { if url, ok := strings.CutPrefix(env, types.PromptURLEnvVar+"="); ok { - if params.Fields != "" { - fields = strings.Split(params.Fields, ",") - } - httpPrompt := types.Prompt{ Message: params.Message, - Fields: fields, + Fields: params.Fields, Sensitive: params.Sensitive == "true", Metadata: params.Metadata, } @@ -102,21 +97,25 @@ func sysPrompt(ctx context.Context, req types.Prompt) (_ string, err error) { results := map[string]string{} for _, f := range req.Fields { var ( - value string - msg = f + value string + msg = f.Name + sensitive = req.Sensitive ) + if f.Sensitive != nil { + sensitive = *f.Sensitive + } if len(req.Fields) == 1 && req.Message != "" { msg = req.Message } - if req.Sensitive { - err = survey.AskOne(&survey.Password{Message: msg}, &value, survey.WithStdio(os.Stdin, os.Stderr, os.Stderr)) + if sensitive { + err = survey.AskOne(&survey.Password{Message: msg, Help: f.Description}, &value, survey.WithStdio(os.Stdin, os.Stderr, os.Stderr)) } else { - err = survey.AskOne(&survey.Input{Message: msg}, &value, survey.WithStdio(os.Stdin, os.Stderr, os.Stderr)) + err = survey.AskOne(&survey.Input{Message: msg, Help: f.Description}, &value, survey.WithStdio(os.Stdin, os.Stderr, os.Stderr)) } if err != nil { return "", err } - results[f] = value + results[f.Name] = value } resultsStr, err := json.Marshal(results) diff --git a/pkg/types/prompt.go b/pkg/types/prompt.go index 653ad066..3da40a2e 100644 --- a/pkg/types/prompt.go +++ b/pkg/types/prompt.go @@ -1,5 +1,10 @@ package types +import ( + "encoding/json" + "strings" +) + const ( PromptURLEnvVar = "GPTSCRIPT_PROMPT_URL" PromptTokenEnvVar = "GPTSCRIPT_PROMPT_TOKEN" @@ -7,7 +12,63 @@ const ( type Prompt struct { Message string `json:"message,omitempty"` - Fields []string `json:"fields,omitempty"` + Fields Fields `json:"fields,omitempty"` Sensitive bool `json:"sensitive,omitempty"` Metadata map[string]string `json:"metadata,omitempty"` } + +type Field struct { + Name string `json:"name,omitempty"` + Sensitive *bool `json:"sensitive,omitempty"` + Description string `json:"description,omitempty"` +} + +type Fields []Field + +// UnmarshalJSON will unmarshal the corresponding JSON object for Fields, +// or a comma-separated strings (for backwards compatibility). +func (f *Fields) UnmarshalJSON(b []byte) error { + if len(b) == 0 || f == nil { + return nil + } + + if b[0] == '[' { + var arr []Field + if err := json.Unmarshal(b, &arr); err != nil { + return err + } + *f = arr + return nil + } + + var fields string + if err := json.Unmarshal(b, &fields); err != nil { + return err + } + + if fields != "" { + fieldsArr := strings.Split(fields, ",") + *f = make([]Field, 0, len(fieldsArr)) + for _, field := range fieldsArr { + *f = append(*f, Field{Name: strings.TrimSpace(field)}) + } + } + + return nil +} + +type field *Field + +// UnmarshalJSON will unmarshal the corresponding JSON object for a Field, +// or a string (for backwards compatibility). +func (f *Field) UnmarshalJSON(b []byte) error { + if len(b) == 0 || f == nil { + return nil + } + + if b[0] == '{' { + return json.Unmarshal(b, field(f)) + } + + return json.Unmarshal(b, &f.Name) +} diff --git a/pkg/types/prompt_test.go b/pkg/types/prompt_test.go new file mode 100644 index 00000000..f2d911ef --- /dev/null +++ b/pkg/types/prompt_test.go @@ -0,0 +1,142 @@ +package types + +import ( + "reflect" + "testing" +) + +func TestFieldUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + input []byte + expected Field + expectErr bool + }{ + { + name: "valid single Field object JSON", + input: []byte(`{"name":"field1","sensitive":true,"description":"A test field"}`), + expected: Field{Name: "field1", Sensitive: boolPtr(true), Description: "A test field"}, + expectErr: false, + }, + { + name: "valid Field name as string", + input: []byte(`"field1"`), + expected: Field{Name: "field1"}, + expectErr: false, + }, + { + name: "empty input", + input: []byte(``), + expected: Field{}, + expectErr: false, + }, + { + name: "invalid JSON object", + input: []byte(`{"name":"field1","sensitive":"not_boolean"}`), + expected: Field{Name: "field1", Sensitive: new(bool)}, + expectErr: true, + }, + { + name: "extra unknown fields in JSON object", + input: []byte(`{"name":"field1","unknown":"field","sensitive":false}`), + expected: Field{Name: "field1", Sensitive: boolPtr(false)}, + expectErr: false, + }, + { + name: "malformed JSON", + input: []byte(`{"name":"field1","sensitive":true`), + expected: Field{}, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var field Field + err := field.UnmarshalJSON(tt.input) + if (err != nil) != tt.expectErr { + t.Errorf("UnmarshalJSON() error = %v, expectErr %v", err, tt.expectErr) + } + if !reflect.DeepEqual(field, tt.expected) { + t.Errorf("UnmarshalJSON() = %v, expected %v", field, tt.expected) + } + }) + } +} + +func TestFieldsUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + input []byte + expected Fields + expectErr bool + }{ + { + name: "empty input", + input: nil, + expected: nil, + expectErr: false, + }, + { + name: "nil pointer", + input: nil, + expected: nil, + expectErr: false, + }, + { + name: "valid JSON array", + input: []byte(`[{"Name":"field1"},{"Name":"field2"}]`), + expected: Fields{{Name: "field1"}, {Name: "field2"}}, + expectErr: false, + }, + { + name: "single string input", + input: []byte(`"field1,field2,field3"`), + expected: Fields{{Name: "field1"}, {Name: "field2"}, {Name: "field3"}}, + expectErr: false, + }, + { + name: "trim spaces in single string input", + input: []byte(`"field1, field2 , field3 "`), + expected: Fields{{Name: "field1"}, {Name: "field2"}, {Name: "field3"}}, + expectErr: false, + }, + { + name: "invalid JSON array", + input: []byte(`[{"Name":"field1"},{"Name":field2}]`), + expected: nil, + expectErr: true, + }, + { + name: "invalid single string", + input: []byte(`1234`), + expected: nil, + expectErr: true, + }, + { + name: "empty array", + input: []byte(`[]`), + expected: Fields{}, + expectErr: false, + }, + { + name: "empty string", + input: []byte(`""`), + expected: nil, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var fields Fields + err := fields.UnmarshalJSON(tt.input) + if (err != nil) != tt.expectErr { + t.Errorf("UnmarshalJSON() error = %v, expectErr %v", err, tt.expectErr) + } + if !reflect.DeepEqual(fields, tt.expected) { + t.Errorf("UnmarshalJSON() = %v, expected %v", fields, tt.expected) + } + }) + } +} From bb6456f024cc884ea67911346e0d529d5d309d8e Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 6 Feb 2025 11:42:31 -0500 Subject: [PATCH 222/270] chore: bump the consecutive tool calls limit to 50 Signed-off-by: Donnie Adams --- pkg/engine/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index bd7b09ab..7b0d86d0 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -15,7 +15,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/version" ) -var maxConsecutiveToolCalls = 10 +var maxConsecutiveToolCalls = 50 func init() { if val := os.Getenv("GPTSCRIPT_MAX_CONSECUTIVE_TOOL_CALLS"); val != "" { From bf1b000271934c78e814046afd27acbc353f150c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 6 Feb 2025 07:42:03 -0500 Subject: [PATCH 223/270] feat: allow daemons to request environment variables Daemon tools can now request which environment variables it needs from the calling tool. These environment variables will be sent in the X-GPTScript-Env header. Signed-off-by: Donnie Adams --- pkg/engine/http.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index 109db559..f301f978 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -40,6 +40,7 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too return nil, err } + var requestedEnvVars map[string]struct{} if strings.HasSuffix(parsed.Hostname(), DaemonURLSuffix) { referencedToolName := strings.TrimSuffix(parsed.Hostname(), DaemonURLSuffix) referencedToolRefs, ok := tool.ToolMapping[referencedToolName] @@ -60,6 +61,14 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too } parsed.Host = toolURLParsed.Host toolURL = parsed.String() + + metadataEnvVars := strings.Split(referencedTool.MetaData["requestedEnvVars"], ",") + requestedEnvVars = make(map[string]struct{}, len(metadataEnvVars)) + for _, e := range metadataEnvVars { + if e != "" { + requestedEnvVars[e] = struct{}{} + } + } } if tool.Blocking { @@ -78,7 +87,7 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too } for _, k := range slices.Sorted(maps.Keys(envMap)) { - if strings.HasPrefix(k, "GPTSCRIPT_WORKSPACE_") { + if _, ok := requestedEnvVars[k]; ok || strings.HasPrefix(k, "GPTSCRIPT_WORKSPACE_") { req.Header.Add("X-GPTScript-Env", k+"="+envMap[k]) } } From ed71575d4ed6f3bb29785d2965f47d5be76101f3 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 11 Feb 2025 10:03:59 -0500 Subject: [PATCH 224/270] enhance: add latest_revision field to writing files in a workspace This field implements an optimistic locking behavior in the workspace-provider. Signed-off-by: Donnie Adams --- pkg/sdkserver/workspaces.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index e5f03ad1..3b38c759 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -187,6 +187,7 @@ type writeFileInWorkspaceRequest struct { FilePath string `json:"filePath"` Contents string `json:"contents"` CreateRevision *bool `json:"createRevision"` + LatestRevision string `json:"latestRevision"` } func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -208,8 +209,8 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { prg, s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "body": "%s", "create_revision": %t}`, - reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.CreateRevision == nil || *reqObject.CreateRevision, + `{"workspace_id": "%s", "file_path": "%s", "body": "%s", "create_revision": %t, "latest_revision": "%s"}`, + reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.CreateRevision == nil || *reqObject.CreateRevision, reqObject.LatestRevision, ), ) if err != nil { From 86e85f032cc92af730b66e8748275c1a101eec72 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Sat, 22 Feb 2025 11:59:00 -0500 Subject: [PATCH 225/270] enhance: add option to return revision ID when reading file in workspace (#949) * enhance: add option to return revision ID when reading file in workspace Signed-off-by: Donnie Adams --- pkg/sdkserver/routes.go | 1 + pkg/sdkserver/workspaces.go | 42 ++++++++++++++++++++++++++++++++----- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index c4b45e92..1431b73b 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -79,6 +79,7 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /workspaces/write-file", s.writeFileInWorkspace) mux.HandleFunc("POST /workspaces/delete-file", s.removeFileInWorkspace) mux.HandleFunc("POST /workspaces/read-file", s.readFileInWorkspace) + mux.HandleFunc("POST /workspaces/read-file-with-revision", s.readFileWithRevisionInWorkspace) mux.HandleFunc("POST /workspaces/stat-file", s.statFileInWorkspace) mux.HandleFunc("POST /workspaces/list-revisions", s.listRevisions) mux.HandleFunc("POST /workspaces/get-revision", s.getRevisionForFileInWorkspace) diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index 3b38c759..162853f7 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -187,7 +187,7 @@ type writeFileInWorkspaceRequest struct { FilePath string `json:"filePath"` Contents string `json:"contents"` CreateRevision *bool `json:"createRevision"` - LatestRevision string `json:"latestRevision"` + LatestRevisionID string `json:"latestRevisionID"` } func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -209,8 +209,8 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { prg, s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s", "body": "%s", "create_revision": %t, "latest_revision": "%s"}`, - reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.CreateRevision == nil || *reqObject.CreateRevision, reqObject.LatestRevision, + `{"workspace_id": "%s", "file_path": "%s", "body": "%s", "create_revision": %t, "latest_revision_id": "%s"}`, + reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.CreateRevision == nil || *reqObject.CreateRevision, reqObject.LatestRevisionID, ), ) if err != nil { @@ -293,9 +293,41 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { writeResponse(logger, w, map[string]any{"stdout": out}) } +func (s *server) readFileWithRevisionInWorkspace(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + var reqObject readFileInWorkspaceRequest + if err := json.NewDecoder(r.Body).Decode(&reqObject); err != nil { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) + return + } + + prg, err := loader.Program(r.Context(), s.getWorkspaceTool(reqObject.workspaceCommonRequest), "Read File With Revision In Workspace", loader.Options{Cache: s.client.Cache}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) + return + } + + out, err := s.client.Run( + r.Context(), + prg, + s.getServerToolsEnv(reqObject.Env), + fmt.Sprintf( + `{"workspace_id": "%s", "file_path": "%s", "with_latest_revision_id": "true"}`, + reqObject.ID, reqObject.FilePath, + ), + ) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": out}) +} + type statFileInWorkspaceRequest struct { workspaceCommonRequest `json:",inline"` FilePath string `json:"filePath"` + WithLatestRevisionID bool `json:"withLatestRevisionID"` } func (s *server) statFileInWorkspace(w http.ResponseWriter, r *http.Request) { @@ -317,8 +349,8 @@ func (s *server) statFileInWorkspace(w http.ResponseWriter, r *http.Request) { prg, s.getServerToolsEnv(reqObject.Env), fmt.Sprintf( - `{"workspace_id": "%s", "file_path": "%s"}`, - reqObject.ID, reqObject.FilePath, + `{"workspace_id": "%s", "file_path": "%s", "with_latest_revision_id": "%v"}`, + reqObject.ID, reqObject.FilePath, reqObject.WithLatestRevisionID, ), ) if err != nil { From 0d20be17f5d8a6eb97477cbdbc40300879da5c30 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 24 Feb 2025 14:41:56 -0500 Subject: [PATCH 226/270] feat: add ability to gracefully stop runs (#950) Various parts of a tool execution cannot be stopped gracefully. For example, non-streamed HTTP request can't be stopped gracefully. However, commands and chat completions can be gracefully stopped by a user and the result returned. An "ABORTED BY USER" message is added to such messages. Additionally, aborted chat completion responses are not stored in the cache. Signed-off-by: Donnie Adams --- .vscode/launch.json | 4 +-- go.mod | 2 +- go.sum | 4 +-- pkg/builtin/builtin.go | 27 +++++++------- pkg/cache/cache.go | 7 ++++ pkg/chat/chat.go | 4 +-- pkg/cli/eval.go | 11 +++--- pkg/cli/gptscript.go | 4 +-- pkg/engine/cmd.go | 18 +++++++--- pkg/engine/daemon.go | 4 +-- pkg/engine/engine.go | 70 ++++++++++++++++++++++++++----------- pkg/engine/http.go | 14 +++++--- pkg/engine/openapi.go | 11 ++++-- pkg/gptscript/gptscript.go | 10 +++--- pkg/loader/loader.go | 4 +-- pkg/openai/client.go | 21 +++++++++-- pkg/remote/remote.go | 2 +- pkg/runner/runner.go | 21 ++++++----- pkg/sdkserver/datasets.go | 9 ++--- pkg/sdkserver/routes.go | 40 ++++++++++++++++++++- pkg/sdkserver/run.go | 62 +++++++++++--------------------- pkg/sdkserver/server.go | 1 + pkg/sdkserver/workspaces.go | 13 +++++++ pkg/tests/runner2_test.go | 15 ++++---- pkg/tests/runner_test.go | 39 +++++++++++---------- pkg/tests/tester/runner.go | 2 +- 26 files changed, 268 insertions(+), 151 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index cc84991c..669016b3 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -15,12 +15,12 @@ ] }, { - "name": "Launch Server", + "name": "Clicky Serves", "type": "go", "request": "launch", "mode": "debug", "program": "main.go", - "args": ["--server"] + "args": ["--debug", "--listen-address", "127.0.0.1:63774", "sys.sdkserver"] } ] } diff --git a/go.mod b/go.mod index bc80c47e..15f88d5f 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 - github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1 + github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee diff --git a/go.sum b/go.sum index 7ed757bd..07d8d500 100644 --- a/go.sum +++ b/go.sum @@ -197,8 +197,8 @@ github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtIEd0z1ia8qFjq3u0Ozb6QKwidyL856JLJp6nbA= github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1 h1:D8VmhL68Fm6YI7fue4wkzd1TqODn//LtcJtPvWk8BQ8= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250128181713-57857b74f9f1/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d h1:p5uqZufDIMQzAALblZFkr8fwbnZbFXbBCR1ZMAFylXk= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 h1:QxLjsLOYlsVLPwuRkP0Q8EcAoZT1s8vU2ZBSX0+R6CI= diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index ccbda66a..d14fe7c7 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -269,18 +269,14 @@ func ListTools() (result []types.Tool) { sort.Strings(keys) for _, key := range keys { - t, _ := Builtin(key) + t, _ := DefaultModel(key, "") result = append(result, t) } return } -func Builtin(name string) (types.Tool, bool) { - return BuiltinWithDefaultModel(name, "") -} - -func BuiltinWithDefaultModel(name, defaultModel string) (types.Tool, bool) { +func DefaultModel(name, defaultModel string) (types.Tool, bool) { // Legacy syntax not used anymore name = strings.TrimSuffix(name, "?") t, ok := tools[name] @@ -332,7 +328,7 @@ func SysFind(_ context.Context, _ []string, input string, _ chan<- string) (stri return strings.Join(result, "\n"), nil } -func SysExec(_ context.Context, env []string, input string, progress chan<- string) (string, error) { +func SysExec(ctx context.Context, env []string, input string, progress chan<- string) (string, error) { var params struct { Command string `json:"command,omitempty"` Directory string `json:"directory,omitempty"` @@ -345,14 +341,20 @@ func SysExec(_ context.Context, env []string, input string, progress chan<- stri params.Directory = "." } + commandCtx, _ := engine.FromContext(ctx) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + commandCtx.OnUserCancel(ctx, cancel) + log.Debugf("Running %s in %s", params.Command, params.Directory) var cmd *exec.Cmd - if runtime.GOOS == "windows" { - cmd = exec.Command("cmd.exe", "/c", params.Command) + cmd = exec.CommandContext(ctx, "cmd.exe", "/c", params.Command) } else { - cmd = exec.Command("/bin/sh", "-c", params.Command) + cmd = exec.CommandContext(ctx, "/bin/sh", "-c", params.Command) } var ( @@ -371,7 +373,8 @@ func SysExec(_ context.Context, env []string, input string, progress chan<- stri cmd.Dir = params.Directory cmd.Stdout = combined cmd.Stderr = combined - if err := cmd.Run(); err != nil { + if err := cmd.Run(); err != nil && (ctx.Err() == nil || commandCtx.Ctx.Err() != nil) { + // If the command failed and the context hasn't been canceled, then return the error. return fmt.Sprintf("ERROR: %s\nOUTPUT:\n%s", err, &out), nil } return out.String(), nil @@ -420,7 +423,6 @@ func getWorkspaceEnvFileContents(envs []string) ([]string, error) { } return envContents, nil - } func getWorkspaceDir(envs []string) (string, error) { @@ -665,6 +667,7 @@ func DiscardProgress() (progress chan<- string, closeFunc func()) { ch := make(chan string) go func() { for range ch { + continue } }() return ch, func() { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 90e8ee10..e5b4494e 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -105,6 +105,13 @@ func (c *Client) Store(ctx context.Context, key, value any) error { return nil } + select { + // If the context has been canceled, then don't try to save. + case <-ctx.Done(): + return nil + default: + } + if c.noop || IsNoCache(ctx) { keyValue, err := c.cacheKey(key) if err == nil { diff --git a/pkg/chat/chat.go b/pkg/chat/chat.go index 1e1fe63f..e36f107b 100644 --- a/pkg/chat/chat.go +++ b/pkg/chat/chat.go @@ -17,7 +17,7 @@ type Prompter interface { } type Chatter interface { - Chat(ctx context.Context, prevState runner.ChatState, prg types.Program, env []string, input string) (resp runner.ChatResponse, err error) + Chat(ctx context.Context, prevState runner.ChatState, prg types.Program, env []string, input string, opts runner.RunOptions) (resp runner.ChatResponse, err error) } type GetProgram func() (types.Program, error) @@ -74,7 +74,7 @@ func Start(ctx context.Context, prevState runner.ChatState, chatter Chatter, prg } } - resp, err = chatter.Chat(ctx, prevState, prog, env, input) + resp, err = chatter.Chat(ctx, prevState, prog, env, input, runner.RunOptions{}) if err != nil { return err } diff --git a/pkg/cli/eval.go b/pkg/cli/eval.go index c649a505..4afdf112 100644 --- a/pkg/cli/eval.go +++ b/pkg/cli/eval.go @@ -10,6 +10,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/input" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/spf13/cobra" ) @@ -56,13 +57,13 @@ func (e *Eval) Run(cmd *cobra.Command, args []string) error { return err } - runner, err := gptscript.New(cmd.Context(), opts) + g, err := gptscript.New(cmd.Context(), opts) if err != nil { return err } prg, err := loader.ProgramFromSource(cmd.Context(), tool.String(), "", loader.Options{ - Cache: runner.Cache, + Cache: g.Cache, }) if err != nil { return err @@ -74,14 +75,14 @@ func (e *Eval) Run(cmd *cobra.Command, args []string) error { } if e.Chat { - return chat.Start(cmd.Context(), nil, runner, func() (types.Program, error) { + return chat.Start(cmd.Context(), nil, g, func() (types.Program, error) { return loader.ProgramFromSource(cmd.Context(), tool.String(), "", loader.Options{ - Cache: runner.Cache, + Cache: g.Cache, }) }, os.Environ(), toolInput, "") } - toolOutput, err := runner.Run(cmd.Context(), prg, opts.Env, toolInput) + toolOutput, err := g.Run(cmd.Context(), prg, opts.Env, toolInput, runner.RunOptions{}) if err != nil { return err } diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4bd04509..16f9152d 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -469,7 +469,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // This chat in a stateless mode if r.SaveChatStateFile == "-" || r.SaveChatStateFile == "stdout" { - resp, err := gptScript.Chat(cmd.Context(), chatState, prg, gptOpt.Env, toolInput) + resp, err := gptScript.Chat(cmd.Context(), chatState, prg, gptOpt.Env, toolInput, runner.RunOptions{}) if err != nil { return err } @@ -511,7 +511,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { gptScript.ExtraEnv = nil } - s, err := gptScript.Run(cmd.Context(), prg, gptOpt.Env, toolInput) + s, err := gptScript.Run(cmd.Context(), prg, gptOpt.Env, toolInput, runner.RunOptions{}) if err != nil { return err } diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 010c1ace..368b1c98 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -119,10 +119,14 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate instructions = append(instructions, inputContext.Content) } - var extraEnv = []string{ + extraEnv := []string{ strings.TrimSpace("GPTSCRIPT_CONTEXT=" + strings.Join(instructions, "\n")), } - cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input, true) + + commandCtx, cancel := context.WithCancel(ctx.Ctx) + defer cancel() + + cmd, stop, err := e.newCommand(commandCtx, extraEnv, tool, input, true) if err != nil { if toolCategory == NoCategory && ctx.Parent != nil { return fmt.Sprintf("ERROR: got (%v) while parsing command", err), nil @@ -155,18 +159,22 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate cmd.Stdout = io.MultiWriter(stdout, stdoutAndErr, progressOut) cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut, os.Stderr) result = stdout + defer func() { + combinedOutput = stdoutAndErr.String() + }() + + ctx.OnUserCancel(commandCtx, cancel) - if err := cmd.Run(); err != nil { + if err := cmd.Run(); err != nil && (commandCtx.Err() == nil || ctx.Ctx.Err() != nil) { + // If the command failed and the context hasn't been canceled, then return the error. if toolCategory == NoCategory && ctx.Parent != nil { // If this is a sub-call, then don't return the error; return the error as a message so that the LLM can retry. return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) - combinedOutput = stdoutAndErr.String() return "", fmt.Errorf("ERROR: %s: %w", stdoutAndErr, err) } - combinedOutput = stdoutAndErr.String() return result.String(), IsChatFinishMessage(result.String()) } diff --git a/pkg/engine/daemon.go b/pkg/engine/daemon.go index b7877da3..6f991be0 100644 --- a/pkg/engine/daemon.go +++ b/pkg/engine/daemon.go @@ -229,7 +229,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { return url, fmt.Errorf("timeout waiting for 200 response from GET %s", url) } -func (e *Engine) runDaemon(ctx context.Context, prg *types.Program, tool types.Tool, input string) (cmdRet *Return, cmdErr error) { +func (e *Engine) runDaemon(ctx Context, tool types.Tool, input string) (cmdRet *Return, cmdErr error) { url, err := e.startDaemon(tool) if err != nil { return nil, err @@ -238,5 +238,5 @@ func (e *Engine) runDaemon(ctx context.Context, prg *types.Program, tool types.T tool.Instructions = strings.Join(append([]string{ types.CommandPrefix + url, }, strings.Split(tool.Instructions, "\n")[1:]...), "\n") - return e.runHTTP(ctx, prg, tool, input) + return e.runHTTP(ctx, tool, input) } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 7b0d86d0..778b1e7e 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -92,7 +92,8 @@ type Context struct { Engine *Engine Program *types.Program // Input is saved only so that we can render display text, don't use otherwise - Input string + Input string + userCancel <-chan struct{} } type ChatHistory struct { @@ -188,6 +189,18 @@ func (c *Context) MarshalJSON() ([]byte, error) { return json.Marshal(c.GetCallContext()) } +func (c *Context) OnUserCancel(ctx context.Context, cancel func()) { + go func() { + select { + case <-ctx.Done(): + // If the context is canceled, then nothing to do. + case <-c.userCancel: + // If the user is requesting a cancel, then cancel the context. + cancel() + } + }() +} + type toolCategoryKey struct{} func WithToolCategory(ctx context.Context, toolCategory ToolCategory) context.Context { @@ -199,7 +212,7 @@ func ToolCategoryFromContext(ctx context.Context) ToolCategory { return category } -func NewContext(ctx context.Context, prg *types.Program, input string) (Context, error) { +func NewContext(ctx context.Context, prg *types.Program, input string, userCancel <-chan struct{}) (Context, error) { category := ToolCategoryFromContext(ctx) callCtx := Context{ @@ -208,9 +221,10 @@ func NewContext(ctx context.Context, prg *types.Program, input string) (Context, Tool: prg.ToolSet[prg.EntryToolID], ToolCategory: category, }, - Ctx: ctx, - Program: prg, - Input: input, + Ctx: ctx, + Program: prg, + Input: input, + userCancel: userCancel, } agentGroup, err := callCtx.Tool.GetToolsByType(prg, types.ToolTypeAgent) @@ -251,6 +265,7 @@ func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID stri Program: c.Program, CurrentReturn: c.CurrentReturn, Input: input, + userCancel: c.userCancel, }, nil } @@ -292,32 +307,37 @@ func populateMessageParams(ctx Context, completion *types.CompletionRequest, too func (e *Engine) runCommandTools(ctx Context, tool types.Tool, input string) (*Return, error) { if tool.IsHTTP() { - return e.runHTTP(ctx.Ctx, ctx.Program, tool, input) + return e.runHTTP(ctx, tool, input) } else if tool.IsDaemon() { - return e.runDaemon(ctx.Ctx, ctx.Program, tool, input) + return e.runDaemon(ctx, tool, input) } else if tool.IsOpenAPI() { - return e.runOpenAPI(tool, input) + return e.runOpenAPI(ctx, tool, input) } else if tool.IsEcho() { return e.runEcho(tool) } else if tool.IsCall() { return e.runCall(ctx, tool, input) } s, err := e.runCommand(ctx, tool, input, ctx.ToolCategory) - if err != nil { - return nil, err - } return &Return{ Result: &s, - }, nil + }, err } -func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { +func (e *Engine) Start(ctx Context, input string) (ret *Return, err error) { tool := ctx.Tool defer func() { if ret != nil && ret.State != nil { ret.State.Input = input } + select { + case <-ctx.userCancel: + if ret.Result == nil { + ret.Result = new(string) + } + *ret.Result += "\n\nABORTED BY USER" + default: + } }() if tool.IsCommand() { @@ -344,7 +364,7 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { }) } - return e.complete(ctx.Ctx, &State{ + return e.complete(ctx, &State{ Completion: completion, }) } @@ -376,7 +396,7 @@ func addUpdateSystem(ctx Context, tool types.Tool, msgs []types.CompletionMessag return append([]types.CompletionMessage{msg}, msgs...) } -func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { +func (e *Engine) complete(ctx Context, state *State) (*Return, error) { var ( progress = make(chan types.CompletionStatus) ret = Return{ @@ -429,7 +449,7 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { return &ret, nil } - resp, err := e.Model.Call(ctx, state.Completion, e.Env, progress) + resp, err := e.Model.Call(ctx.WrappedContext(e), state.Completion, e.Env, progress) if err != nil { return nil, fmt.Errorf("failed calling model for completion: %w", err) } @@ -474,7 +494,17 @@ func (e *Engine) complete(ctx context.Context, state *State) (*Return, error) { return &ret, nil } -func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (*Return, error) { +func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (ret *Return, _ error) { + defer func() { + select { + case <-ctx.userCancel: + if ret.Result == nil { + ret.Result = new(string) + } + *ret.Result += "\n\nABORTED BY USER" + default: + } + }() if ctx.Tool.IsCommand() { var input string if len(results) == 1 { @@ -508,7 +538,7 @@ func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (*Re } } - ret := Return{ + ret = &Return{ State: state, Calls: map[string]Call{}, } @@ -524,7 +554,7 @@ func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (*Re if len(ret.Calls) > 0 { // Outstanding tool calls still pending - return &ret, nil + return ret, nil } for _, content := range state.Completion.Messages[len(state.Completion.Messages)-1].Content { @@ -559,5 +589,5 @@ func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (*Re return nil, err } - return e.complete(ctx.Ctx, state) + return e.complete(ctx, state) } diff --git a/pkg/engine/http.go b/pkg/engine/http.go index f301f978..9e59b70a 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -1,7 +1,6 @@ package engine import ( - "context" "encoding/json" "fmt" "io" @@ -17,7 +16,7 @@ import ( const DaemonURLSuffix = ".daemon.gptscript.local" -func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Tool, input string) (cmdRet *Return, cmdErr error) { +func (e *Engine) runHTTP(ctx Context, tool types.Tool, input string) (cmdRet *Return, cmdErr error) { envMap := map[string]string{} for _, env := range appendInputAsEnv(nil, input) { @@ -47,7 +46,7 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too if !ok || len(referencedToolRefs) != 1 { return nil, fmt.Errorf("invalid reference [%s] to tool [%s] from [%s], missing \"tools: %s\" parameter", toolURL, referencedToolName, tool.Source, referencedToolName) } - referencedTool, ok := prg.ToolSet[referencedToolRefs[0].ToolID] + referencedTool, ok := ctx.Program.ToolSet[referencedToolRefs[0].ToolID] if !ok { return nil, fmt.Errorf("failed to find tool [%s] for [%s]", referencedToolName, parsed.Hostname()) } @@ -81,7 +80,7 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too input = body } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, toolURL, strings.NewReader(input)) + req, err := http.NewRequestWithContext(ctx.Ctx, http.MethodPost, toolURL, strings.NewReader(input)) if err != nil { return nil, err } @@ -121,6 +120,13 @@ func (e *Engine) runHTTP(ctx context.Context, prg *types.Program, tool types.Too req.Header.Set("Content-Type", "text/plain") } + // If the user canceled the run, then don't make the request. + select { + case <-ctx.userCancel: + return &Return{}, nil + default: + } + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err diff --git a/pkg/engine/openapi.go b/pkg/engine/openapi.go index a9a1a644..2e79bc38 100644 --- a/pkg/engine/openapi.go +++ b/pkg/engine/openapi.go @@ -145,7 +145,7 @@ func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error } res = &Return{ - Result: ptr(result), + Result: &result, } } @@ -156,7 +156,7 @@ func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error // The tool itself will have instructions regarding the HTTP request that needs to be made. // The tools Instructions field will be in the format "#!sys.openapi '{Instructions JSON}'", // where {Instructions JSON} is a JSON string of type OpenAPIInstructions. -func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { +func (e *Engine) runOpenAPI(ctx Context, tool types.Tool, input string) (*Return, error) { if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { return e.runOpenAPIRevamp(tool, input) } @@ -266,6 +266,13 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { req.Body = io.NopCloser(&body) } + // If the user canceled the run, then don't make the request. + select { + case <-ctx.userCancel: + return &Return{}, nil + default: + } + // Make the request resp, err := http.DefaultClient.Do(req) if err != nil { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 4669e5ab..f92f9324 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -244,22 +244,22 @@ func makeAbsolute(path string) (string, error) { return filepath.Abs(path) } -func (g *GPTScript) Chat(ctx context.Context, prevState runner.ChatState, prg types.Program, envs []string, input string) (runner.ChatResponse, error) { +func (g *GPTScript) Chat(ctx context.Context, prevState runner.ChatState, prg types.Program, envs []string, input string, opts runner.RunOptions) (runner.ChatResponse, error) { envs, err := g.getEnv(envs) if err != nil { return runner.ChatResponse{}, err } - return g.Runner.Chat(ctx, prevState, prg, envs, input) + return g.Runner.Chat(ctx, prevState, prg, envs, input, opts) } -func (g *GPTScript) Run(ctx context.Context, prg types.Program, envs []string, input string) (string, error) { +func (g *GPTScript) Run(ctx context.Context, prg types.Program, envs []string, input string, opts runner.RunOptions) (string, error) { envs, err := g.getEnv(envs) if err != nil { return "", err } - return g.Runner.Run(ctx, prg, envs, input) + return g.Runner.Run(ctx, prg, envs, input, opts) } func (g *GPTScript) Close(closeDaemons bool) { @@ -319,7 +319,7 @@ func (s *simpleRunner) Load(ctx context.Context, toolName string) (prg types.Pro } func (s *simpleRunner) Run(ctx context.Context, prg types.Program, input string) (output string, err error) { - return s.runner.Run(ctx, prg, s.env, input) + return s.runner.Run(ctx, prg, s.env, input, runner.RunOptions{}) } type noopModel struct { diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index 5a907f5b..902d0ed9 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -141,7 +141,7 @@ func loadProgram(data []byte, into *types.Program, targetToolName, defaultModel into.ToolSet = make(map[string]types.Tool, len(ext.ToolSet)) for k, v := range ext.ToolSet { - if builtinTool, ok := builtin.BuiltinWithDefaultModel(k, defaultModel); ok { + if builtinTool, ok := builtin.DefaultModel(k, defaultModel); ok { v = builtinTool } into.ToolSet[k] = v @@ -471,7 +471,7 @@ func Program(ctx context.Context, name, subToolName string, opts ...Options) (ty func resolve(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, name, subTool, defaultModel string) ([]types.Tool, error) { if subTool == "" { - t, ok := builtin.BuiltinWithDefaultModel(name, defaultModel) + t, ok := builtin.DefaultModel(name, defaultModel) if ok { prg.ToolSet[t.ID] = t return []types.Tool{t}, nil diff --git a/pkg/openai/client.go b/pkg/openai/client.go index db911962..ec6b2668 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -15,6 +15,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/counter" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/prompt" @@ -583,10 +584,21 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, slog.Debug("calling openai", "message", request.Messages) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + engineCtx, ok := engine.FromContext(ctx) + if ok { + engineCtx.OnUserCancel(ctx, cancel) + } + if !streamResponse { request.StreamOptions = nil resp, err := c.c.CreateChatCompletion(ctx, request, headers, retryOpts...) if err != nil { + if errors.Is(err, context.Canceled) { + err = nil + } return types.CompletionMessage{}, err } return appendMessage(types.CompletionMessage{}, openai.ChatCompletionStreamResponse{ @@ -612,6 +624,9 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, stream, err := c.c.CreateChatCompletionStream(ctx, request, headers, retryOpts...) if err != nil { + if errors.Is(err, context.Canceled) { + err = nil + } return types.CompletionMessage{}, err } defer stream.Close() @@ -619,11 +634,12 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, var ( partialMessage types.CompletionMessage start = time.Now() - last []string ) for { response, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + // If the stream is finished, either because we got an EOF or the context was canceled, + // then we're done. The cache won't save the response if the context was canceled. return partialMessage, c.cache.Store(ctx, c.cacheKey(request), partialMessage) } else if err != nil { return types.CompletionMessage{}, err @@ -631,7 +647,6 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, partialMessage = appendMessage(partialMessage, response) if partial != nil { if time.Since(start) > 100*time.Millisecond { - last = last[:0] partial <- types.CompletionStatus{ CompletionID: transactionID, PartialResponse: &partialMessage, diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 93f612ef..441a01dd 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -166,7 +166,7 @@ func (c *Client) load(ctx context.Context, toolName string, env ...string) (*ope return nil, err } - url, err := c.runner.Run(engine.WithToolCategory(ctx, engine.ProviderToolCategory), prg.SetBlocking(), c.envs, "") + url, err := c.runner.Run(engine.WithToolCategory(ctx, engine.ProviderToolCategory), prg.SetBlocking(), c.envs, "", runner.RunOptions{}) if err != nil { return nil, err } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index e2699cf6..df3ef172 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -39,6 +39,10 @@ type Options struct { Authorizer AuthorizerFunc `usage:"-"` } +type RunOptions struct { + UserCancel <-chan struct{} +} + type AuthorizerResponse struct { Accept bool Message string @@ -130,7 +134,7 @@ type ChatResponse struct { type ChatState interface{} -func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Program, env []string, input string) (resp ChatResponse, err error) { +func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Program, env []string, input string, opts RunOptions) (resp ChatResponse, err error) { var state *State defer func() { @@ -167,7 +171,7 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra monitor.Stop(ctx, resp.Content, err) }() - callCtx, err := engine.NewContext(ctx, &prg, input) + callCtx, err := engine.NewContext(ctx, &prg, input, opts.UserCancel) if err != nil { return resp, err } @@ -210,8 +214,8 @@ func (r *Runner) Chat(ctx context.Context, prevState ChatState, prg types.Progra }, nil } -func (r *Runner) Run(ctx context.Context, prg types.Program, env []string, input string) (output string, err error) { - resp, err := r.Chat(ctx, nil, prg, env, input) +func (r *Runner) Run(ctx context.Context, prg types.Program, env []string, input string, opts RunOptions) (output string, err error) { + resp, err := r.Chat(ctx, nil, prg, env, input, opts) if err != nil { return "", err } @@ -651,8 +655,11 @@ func (r *Runner) newDispatcher(ctx context.Context) dispatcher { return newParallelDispatcher(ctx) } -func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, state *State, toolCategory engine.ToolCategory) (_ *State, callResults []SubCallResult, _ error) { - var resultLock sync.Mutex +func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, state *State, toolCategory engine.ToolCategory) (*State, []SubCallResult, error) { + var ( + resultLock sync.Mutex + callResults []SubCallResult + ) if state.Continuation != nil { callCtx.LastReturn = state.Continuation @@ -666,8 +673,6 @@ func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, for _, subCall := range state.SubCalls { if subCall.CallID == state.SubCallID { found = true - subState := *subCall.State - subState.ResumeInput = state.ResumeInput result, err := r.subCallResume(callCtx.Ctx, callCtx, monitor, env, subCall.ToolID, subCall.CallID, subCall.State.WithResumeInput(state.ResumeInput), toolCategory) if err != nil { return nil, nil, err diff --git a/pkg/sdkserver/datasets.go b/pkg/sdkserver/datasets.go index c4178801..b923490b 100644 --- a/pkg/sdkserver/datasets.go +++ b/pkg/sdkserver/datasets.go @@ -9,6 +9,7 @@ import ( gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/gptscript" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/runner" ) func (s *server) getDatasetTool(req datasetRequest) string { @@ -79,7 +80,7 @@ func (s *server) listDatasets(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input, runner.RunOptions{}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -147,7 +148,7 @@ func (s *server) addDatasetElements(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input, runner.RunOptions{}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -207,7 +208,7 @@ func (s *server) listDatasetElements(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input, runner.RunOptions{}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return @@ -270,7 +271,7 @@ func (s *server) getDatasetElement(w http.ResponseWriter, r *http.Request) { return } - result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input) + result, err := g.Run(r.Context(), prg, s.getServerToolsEnv(req.Env), req.Input, runner.RunOptions{}) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) return diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 1431b73b..d520e97a 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -36,6 +36,9 @@ type server struct { lock sync.RWMutex waitingToConfirm map[string]chan runner.AuthorizerResponse waitingToPrompt map[string]chan map[string]string + + runningLock sync.Mutex + running map[string]chan struct{} } func (s *server) addRoutes(mux *http.ServeMux) { @@ -52,6 +55,7 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /run", s.execHandler) mux.HandleFunc("POST /evaluate", s.execHandler) + mux.HandleFunc("POST /abort/{run_id}", s.abort) mux.HandleFunc("POST /load", s.load) @@ -164,6 +168,17 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { ctx := gserver.ContextWithNewRunID(r.Context()) runID := gserver.RunIDFromContext(ctx) + cancel := make(chan struct{}) + s.runningLock.Lock() + s.running[runID] = cancel + s.runningLock.Unlock() + + defer func() { + s.runningLock.Lock() + delete(s.running, runID) + s.runningLock.Unlock() + close(cancel) + }() // Ensure chat state is not empty. if reqObject.ChatState == "" { @@ -214,7 +229,30 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { opts.Runner.Authorizer = s.authorize } - s.execAndStream(ctx, programLoader, logger, w, opts, reqObject.ChatState, reqObject.Input, reqObject.SubTool, def) + s.execAndStream(ctx, programLoader, logger, w, opts, reqObject.ChatState, reqObject.Input, reqObject.SubTool, def, cancel) +} + +// abort will abort the run in a way such that the chat state will be returned. +func (s *server) abort(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + runID := r.PathValue("run_id") + if runID == "" { + writeError(logger, w, http.StatusBadRequest, fmt.Errorf("run_id is required")) + return + } + + s.runningLock.Lock() + cancel := s.running[runID] + delete(s.running, runID) + s.runningLock.Unlock() + + if cancel == nil { + writeResponse(logger, w, "run not found") + return + } + + close(cancel) + writeResponse(logger, w, "run aborted") } // load will load the file and return the corresponding Program. diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index 1c0f7c4b..93c9996b 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -24,7 +24,7 @@ func loaderWithLocation(f loaderFunc, loc string) loaderFunc { } } -func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, logger mvl.Logger, w http.ResponseWriter, opts gptscript.Options, chatState, input, subTool string, toolDef fmt.Stringer) { +func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, logger mvl.Logger, w http.ResponseWriter, opts gptscript.Options, chatState, input, subTool string, toolDef fmt.Stringer, cancel <-chan struct{}) { g, err := gptscript.New(ctx, s.gptscriptOpts, opts) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) @@ -48,7 +48,9 @@ func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, lo defer events.Close() go func() { - run, err := g.Chat(ctx, chatState, prg, opts.Env, input) + run, err := g.Chat(ctx, chatState, prg, opts.Env, input, runner.RunOptions{ + UserCancel: cancel, + }) if err != nil { errChan <- err } else { @@ -58,21 +60,19 @@ func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, lo close(programOutput) }() - processEventStreamOutput(ctx, logger, w, gserver.RunIDFromContext(ctx), events.C, programOutput, errChan) + processEventStreamOutput(logger, w, gserver.RunIDFromContext(ctx), events.C, programOutput, errChan) } // processEventStreamOutput will stream the events of the tool to the response as server sent events. // If an error occurs, then an event with the error will also be sent. -func processEventStreamOutput(ctx context.Context, logger mvl.Logger, w http.ResponseWriter, id string, events <-chan event, output <-chan runner.ChatResponse, errChan chan error) { +func processEventStreamOutput(logger mvl.Logger, w http.ResponseWriter, id string, events <-chan event, output <-chan runner.ChatResponse, errChan chan error) { run := newRun(id) setStreamingHeaders(w) - streamEvents(ctx, logger, w, run, events) + streamEvents(logger, w, run, events) - var out runner.ChatResponse select { - case <-ctx.Done(): - case out = <-output: + case out := <-output: run.processStdout(out) writeServerSentEvent(logger, w, map[string]any{ @@ -85,47 +85,27 @@ func processEventStreamOutput(ctx context.Context, logger mvl.Logger, w http.Res } // Now that we have received all events, send the DONE event. - _, err := w.Write([]byte("data: [DONE]\n\n")) - if err == nil { - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - } + writeServerSentEvent(logger, w, "[DONE]") logger.Debugf("wrote DONE event") } // streamEvents will stream the events of the tool to the response as server sent events. -func streamEvents(ctx context.Context, logger mvl.Logger, w http.ResponseWriter, run *runInfo, events <-chan event) { +func streamEvents(logger mvl.Logger, w http.ResponseWriter, run *runInfo, events <-chan event) { logger.Debugf("receiving events") - for { - select { - case <-ctx.Done(): - logger.Debugf("context canceled while receiving events") - go func() { - //nolint:revive - for range events { - } - }() - return - case e, ok := <-events: - if ok && e.RunID != run.ID { - continue - } - - if !ok { - logger.Debugf("done receiving events") - return - } - - writeServerSentEvent(logger, w, run.process(e)) - - if e.Type == runner.EventTypeRunFinish { - logger.Debugf("finished receiving events") - return - } + for e := range events { + if e.RunID != run.ID { + continue + } + + writeServerSentEvent(logger, w, run.process(e)) + + if e.Type == runner.EventTypeRunFinish { + break } } + + logger.Debugf("done receiving events") } func writeResponse(logger mvl.Logger, w http.ResponseWriter, v any) { diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 79d6daf7..41066d30 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -118,6 +118,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir, opts.SystemToolsDir), waitingToConfirm: make(map[string]chan runner.AuthorizerResponse), waitingToPrompt: make(map[string]chan map[string]string), + running: make(map[string]chan struct{}), } defer s.close() diff --git a/pkg/sdkserver/workspaces.go b/pkg/sdkserver/workspaces.go index 162853f7..f1846051 100644 --- a/pkg/sdkserver/workspaces.go +++ b/pkg/sdkserver/workspaces.go @@ -7,6 +7,7 @@ import ( gcontext "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/runner" ) func (s *server) getWorkspaceTool(req workspaceCommonRequest) string { @@ -65,6 +66,7 @@ func (s *server) createWorkspace(w http.ResponseWriter, r *http.Request) { prg, s.getServerToolsEnv(reqObject.Env), string(b), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -100,6 +102,7 @@ func (s *server) deleteWorkspace(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s"}`, reqObject.ID, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -137,6 +140,7 @@ func (s *server) listWorkspaceContents(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s", "ls_prefix": "%s"}`, reqObject.ID, reqObject.Prefix, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -173,6 +177,7 @@ func (s *server) removeAllWithPrefixInWorkspace(w http.ResponseWriter, r *http.R `{"workspace_id": "%s", "prefix": "%s"}`, reqObject.ID, reqObject.Prefix, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -212,6 +217,7 @@ func (s *server) writeFileInWorkspace(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s", "file_path": "%s", "body": "%s", "create_revision": %t, "latest_revision_id": "%s"}`, reqObject.ID, reqObject.FilePath, reqObject.Contents, reqObject.CreateRevision == nil || *reqObject.CreateRevision, reqObject.LatestRevisionID, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -248,6 +254,7 @@ func (s *server) removeFileInWorkspace(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s", "file_path": "%s"}`, reqObject.ID, reqObject.FilePath, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -284,6 +291,7 @@ func (s *server) readFileInWorkspace(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s", "file_path": "%s"}`, reqObject.ID, reqObject.FilePath, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -315,6 +323,7 @@ func (s *server) readFileWithRevisionInWorkspace(w http.ResponseWriter, r *http. `{"workspace_id": "%s", "file_path": "%s", "with_latest_revision_id": "true"}`, reqObject.ID, reqObject.FilePath, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -352,6 +361,7 @@ func (s *server) statFileInWorkspace(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s", "file_path": "%s", "with_latest_revision_id": "%v"}`, reqObject.ID, reqObject.FilePath, reqObject.WithLatestRevisionID, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -387,6 +397,7 @@ func (s *server) listRevisions(w http.ResponseWriter, r *http.Request) { `{"workspace_id": "%s", "file_path": "%s"}`, reqObject.ID, reqObject.FilePath, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -424,6 +435,7 @@ func (s *server) getRevisionForFileInWorkspace(w http.ResponseWriter, r *http.Re `{"workspace_id": "%s", "file_path": "%s", "revision_id": "%s"}`, reqObject.ID, reqObject.FilePath, reqObject.RevisionID, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) @@ -461,6 +473,7 @@ func (s *server) deleteRevisionForFileInWorkspace(w http.ResponseWriter, r *http `{"workspace_id": "%s", "file_path": "%s", "revision_id": "%s"}`, reqObject.ID, reqObject.FilePath, reqObject.RevisionID, ), + runner.RunOptions{}, ) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to run program: %w", err)) diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 165f86c8..f5de8e10 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/tests/tester" "github.com/hexops/autogold/v2" "github.com/stretchr/testify/require" @@ -28,10 +29,10 @@ echo This is the input: ${GPTSCRIPT_INPUT} `, "") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1", runner.RunOptions{}) r.AssertStep(t, resp, err) - resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2") + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2", runner.RunOptions{}) r.AssertStep(t, resp, err) } @@ -54,7 +55,7 @@ name: realcontext Yo dawg`, "") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1", runner.RunOptions{}) r.AssertStep(t, resp, err) } @@ -76,9 +77,9 @@ echo ${FOO}:${INPUT} `, "") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, `{"foo":"123"}`) + resp, err := r.Chat(context.Background(), nil, prg, nil, `{"foo":"123"}`, runner.RunOptions{}) r.AssertStep(t, resp, err) - resp, err = r.Chat(context.Background(), nil, prg, nil, `"foo":"123"}`) + resp, err = r.Chat(context.Background(), nil, prg, nil, `"foo":"123"}`, runner.RunOptions{}) r.AssertStep(t, resp, err) } @@ -110,7 +111,7 @@ echo '{"env": {"CRED2": "that also worked"}}' `, "") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "") + resp, err := r.Chat(context.Background(), nil, prg, nil, "", runner.RunOptions{}) r.AssertStep(t, resp, err) } @@ -144,7 +145,7 @@ echo "${GPTSCRIPT_INPUT}" `, "") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, `{"foo":"baz", "start": true}`) + resp, err := r.Chat(context.Background(), nil, prg, nil, `{"foo":"baz", "start": true}`, runner.RunOptions{}) r.AssertStep(t, resp, err) data := map[string]any{} diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 18871ed6..ce3cebe6 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -12,6 +12,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/tests/tester" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" @@ -143,7 +144,7 @@ func TestDualSubChat(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "User 1") + resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "User 1", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -157,7 +158,7 @@ func TestDualSubChat(t *testing.T) { }, }) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 2") + resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 2", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -168,7 +169,7 @@ func TestDualSubChat(t *testing.T) { Text: "Assistant 3", }) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 3") + resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 3", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -184,7 +185,7 @@ func TestDualSubChat(t *testing.T) { Text: "And we're done", }) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 4") + resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 4", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.True(t, resp.Done) @@ -213,7 +214,7 @@ func TestContextSubChat(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - _, err = r.Chat(context.Background(), nil, prg, os.Environ(), "User 1") + _, err = r.Chat(context.Background(), nil, prg, os.Environ(), "User 1", runner.RunOptions{}) autogold.Expect("invalid state: context tool [testdata/TestContextSubChat/test.gpt:subtool] can not result in a continuation").Equal(t, err.Error()) } @@ -232,7 +233,7 @@ func TestSubChat(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "Hello") + resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "Hello", runner.RunOptions{}) require.NoError(t, err) autogold.Expect(`{ @@ -357,7 +358,7 @@ func TestSubChat(t *testing.T) { } }`).Equal(t, toJSONString(t, resp)) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 1") + resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 1", runner.RunOptions{}) require.NoError(t, err) autogold.Expect(`{ @@ -512,7 +513,7 @@ func TestChat(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "Hello") + resp, err := r.Chat(context.Background(), nil, prg, os.Environ(), "Hello", runner.RunOptions{}) require.NoError(t, err) autogold.Expect(`{ @@ -564,7 +565,7 @@ func TestChat(t *testing.T) { } }`).Equal(t, toJSONString(t, resp)) - resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 1") + resp, err = r.Chat(context.Background(), resp.State, prg, os.Environ(), "User 1", runner.RunOptions{}) require.NoError(t, err) autogold.Expect(`{ @@ -740,7 +741,7 @@ func TestAgentOnly(t *testing.T) { }, }) - resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -767,7 +768,7 @@ func TestAgents(t *testing.T) { }, }) - resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -785,14 +786,14 @@ func TestInput(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "You're stupid") + resp, err := r.Chat(context.Background(), nil, prg, nil, "You're stupid", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) autogold.Expect("TEST RESULT CALL: 1").Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) - resp, err = r.Chat(context.Background(), resp.State, prg, nil, "You're ugly") + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "You're ugly", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -813,7 +814,7 @@ func TestOutput(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "Input 1", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -824,7 +825,7 @@ func TestOutput(t *testing.T) { r.RespondWith(tester.Result{ Text: "Response 2", }) - resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 2") + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 2", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -837,7 +838,7 @@ func TestOutput(t *testing.T) { Message: "Chat Done", }, }) - resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 3") + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "Input 3", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.True(t, resp.Done) @@ -885,7 +886,7 @@ func TestSysContext(t *testing.T) { prg, err := r.Load("") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -977,7 +978,7 @@ tools: sys.ls, sys.read, sys.write `, "") require.NoError(t, err) - resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1") + resp, err := r.Chat(context.Background(), nil, prg, nil, "input 1", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) @@ -991,7 +992,7 @@ tools: sys.ls, sys.write `, "") require.NoError(t, err) - resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2") + resp, err = r.Chat(context.Background(), resp.State, prg, nil, "input 2", runner.RunOptions{}) require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) diff --git a/pkg/tests/tester/runner.go b/pkg/tests/tester/runner.go index 44ec4e3c..f59c0b14 100644 --- a/pkg/tests/tester/runner.go +++ b/pkg/tests/tester/runner.go @@ -159,7 +159,7 @@ func (r *Runner) Run(script, input string) (string, error) { return "", err } - return r.Runner.Run(context.Background(), prg, os.Environ(), input) + return r.Runner.Run(context.Background(), prg, os.Environ(), input, runner.RunOptions{}) } func (r *Runner) AssertResponded(t *testing.T) { From cee1a753e1675807757812dbfc55a6c911ba294b Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 14 Mar 2025 11:00:52 -0400 Subject: [PATCH 227/270] enhance: add method for recreating all credentials (#951) Signed-off-by: Grant Linville --- pkg/credentials/factory.go | 8 ++-- pkg/credentials/noop.go | 4 ++ pkg/credentials/overrides.go | 4 ++ pkg/credentials/store.go | 83 ++++++++++++++++++++++++++++++++---- pkg/sdkserver/credentials.go | 17 ++++++++ pkg/sdkserver/routes.go | 1 + 6 files changed, 105 insertions(+), 12 deletions(-) diff --git a/pkg/credentials/factory.go b/pkg/credentials/factory.go index 42295fc8..60f1c838 100644 --- a/pkg/credentials/factory.go +++ b/pkg/credentials/factory.go @@ -72,8 +72,8 @@ func (s *StoreFactory) NewStore(credCtxs []string) (CredentialStore, error) { return nil, err } if s.file { - return withOverride{ - target: Store{ + return &withOverride{ + target: &Store{ credCtxs: credCtxs, cfg: s.cfg, }, @@ -81,8 +81,8 @@ func (s *StoreFactory) NewStore(credCtxs []string) (CredentialStore, error) { credContext: credCtxs, }, nil } - return withOverride{ - target: Store{ + return &withOverride{ + target: &Store{ credCtxs: credCtxs, cfg: s.cfg, program: s.program, diff --git a/pkg/credentials/noop.go b/pkg/credentials/noop.go index 414f8a12..540d80aa 100644 --- a/pkg/credentials/noop.go +++ b/pkg/credentials/noop.go @@ -25,3 +25,7 @@ func (s NoopStore) Remove(context.Context, string) error { func (s NoopStore) List(context.Context) ([]Credential, error) { return nil, nil } + +func (s NoopStore) RecreateAll(context.Context) error { + return nil +} diff --git a/pkg/credentials/overrides.go b/pkg/credentials/overrides.go index 0911cac5..747909d7 100644 --- a/pkg/credentials/overrides.go +++ b/pkg/credentials/overrides.go @@ -147,3 +147,7 @@ func (w withOverride) List(ctx context.Context) ([]Credential, error) { return creds, nil } + +func (w withOverride) RecreateAll(ctx context.Context) error { + return w.target.RecreateAll(ctx) +} diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index be4be183..def6ff89 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -5,6 +5,7 @@ import ( "fmt" "regexp" "slices" + "sync" "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" @@ -24,15 +25,20 @@ type CredentialStore interface { Refresh(ctx context.Context, cred Credential) error Remove(ctx context.Context, toolName string) error List(ctx context.Context) ([]Credential, error) + RecreateAll(ctx context.Context) error } type Store struct { - credCtxs []string - cfg *config.CLIConfig - program client.ProgramFunc + credCtxs []string + cfg *config.CLIConfig + program client.ProgramFunc + recreateAllLock sync.RWMutex } -func (s Store) Get(_ context.Context, toolName string) (*Credential, bool, error) { +func (s *Store) Get(_ context.Context, toolName string) (*Credential, bool, error) { + s.recreateAllLock.RLock() + defer s.recreateAllLock.RUnlock() + if len(s.credCtxs) > 0 && s.credCtxs[0] == AllCredentialContexts { return nil, false, fmt.Errorf("cannot get a credential with context %q", AllCredentialContexts) } @@ -80,7 +86,10 @@ func (s Store) Get(_ context.Context, toolName string) (*Credential, bool, error // Add adds a new credential to the credential store. // Any context set on the credential object will be overwritten with the first context of the credential store. -func (s Store) Add(_ context.Context, cred Credential) error { +func (s *Store) Add(_ context.Context, cred Credential) error { + s.recreateAllLock.RLock() + defer s.recreateAllLock.RUnlock() + first := first(s.credCtxs) if first == AllCredentialContexts { return fmt.Errorf("cannot add a credential with context %q", AllCredentialContexts) @@ -99,7 +108,10 @@ func (s Store) Add(_ context.Context, cred Credential) error { } // Refresh updates an existing credential in the credential store. -func (s Store) Refresh(_ context.Context, cred Credential) error { +func (s *Store) Refresh(_ context.Context, cred Credential) error { + s.recreateAllLock.RLock() + defer s.recreateAllLock.RUnlock() + if !slices.Contains(s.credCtxs, cred.Context) { return fmt.Errorf("context %q not in list of valid contexts for this credential store", cred.Context) } @@ -115,7 +127,10 @@ func (s Store) Refresh(_ context.Context, cred Credential) error { return store.Store(auth) } -func (s Store) Remove(_ context.Context, toolName string) error { +func (s *Store) Remove(_ context.Context, toolName string) error { + s.recreateAllLock.RLock() + defer s.recreateAllLock.RUnlock() + first := first(s.credCtxs) if len(s.credCtxs) > 1 || first == AllCredentialContexts { return fmt.Errorf("error: credential deletion is not supported when multiple credential contexts are provided") @@ -129,7 +144,10 @@ func (s Store) Remove(_ context.Context, toolName string) error { return store.Erase(toolNameWithCtx(toolName, first)) } -func (s Store) List(_ context.Context) ([]Credential, error) { +func (s *Store) List(_ context.Context) ([]Credential, error) { + s.recreateAllLock.RLock() + defer s.recreateAllLock.RUnlock() + store, err := s.getStore() if err != nil { return nil, err @@ -199,6 +217,55 @@ func (s Store) List(_ context.Context) ([]Credential, error) { return maps.Values(credsByName), nil } +func (s *Store) RecreateAll(_ context.Context) error { + store, err := s.getStore() + if err != nil { + return err + } + + // New credentials might be created after our GetAll, but they will be created with the current encryption configuration, + // so it's okay that they are skipped by this function. + s.recreateAllLock.Lock() + all, err := store.GetAll() + s.recreateAllLock.Unlock() + if err != nil { + return err + } + + // Loop through and recreate each individual credential. + for serverAddress := range all { + if err := s.recreateCredential(store, serverAddress); err != nil { + return err + } + } + + return nil +} + +func (s *Store) recreateCredential(store credentials.Store, serverAddress string) error { + s.recreateAllLock.Lock() + defer s.recreateAllLock.Unlock() + + authConfig, err := store.Get(serverAddress) + if err != nil { + if IsCredentialsNotFoundError(err) { + // This can happen if the credential was deleted between the GetAll and the Get by another thread. + return nil + } + return err + } + + if err := store.Erase(serverAddress); err != nil { + return err + } + + if err := store.Store(authConfig); err != nil { + return err + } + + return nil +} + func (s *Store) getStore() (credentials.Store, error) { if s.program != nil { return &toolCredentialStore{ diff --git a/pkg/sdkserver/credentials.go b/pkg/sdkserver/credentials.go index 2b527b2b..adf86bc7 100644 --- a/pkg/sdkserver/credentials.go +++ b/pkg/sdkserver/credentials.go @@ -20,6 +20,23 @@ func (s *server) initializeCredentialStore(_ context.Context, credCtxs []string) return store, nil } +func (s *server) recreateAllCredentials(w http.ResponseWriter, r *http.Request) { + logger := gcontext.GetLogger(r.Context()) + + store, err := s.initializeCredentialStore(r.Context(), []string{credentials.AllCredentialContexts}) + if err != nil { + writeError(logger, w, http.StatusInternalServerError, err) + return + } + + if err := store.RecreateAll(r.Context()); err != nil { + writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to recreate all credentials: %w", err)) + return + } + + writeResponse(logger, w, map[string]any{"stdout": "All credentials recreated successfully"}) +} + func (s *server) listCredentials(w http.ResponseWriter, r *http.Request) { logger := gcontext.GetLogger(r.Context()) req := new(credentialsRequest) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index d520e97a..b1bd4c3b 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -70,6 +70,7 @@ func (s *server) addRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /credentials/create", s.createCredential) mux.HandleFunc("POST /credentials/reveal", s.revealCredential) mux.HandleFunc("POST /credentials/delete", s.deleteCredential) + mux.HandleFunc("POST /credentials/recreate-all", s.recreateAllCredentials) mux.HandleFunc("POST /datasets", s.listDatasets) mux.HandleFunc("POST /datasets/list-elements", s.listDatasetElements) From f9c09f128b0fc1cd3478dd022f7cb7bf016353d7 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sun, 16 Mar 2025 22:20:06 -0700 Subject: [PATCH 228/270] chore: move string to constant for easier reuse --- pkg/openai/client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index ec6b2668..4862271b 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -546,6 +546,8 @@ func override(left, right string) string { return left } +const WaitingMessage = "Waiting for model response..." + func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, transactionID string, env []string, partial chan<- types.CompletionStatus) (types.CompletionMessage, error) { streamResponse := os.Getenv("GPTSCRIPT_INTERNAL_OPENAI_STREAMING") != "false" @@ -553,7 +555,7 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, CompletionID: transactionID, PartialResponse: &types.CompletionMessage{ Role: types.CompletionMessageRoleTypeAssistant, - Content: types.Text("Waiting for model response..."), + Content: types.Text(WaitingMessage), }, } From 6093748cd8ef50a473beb9e57b083550cba697d5 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 11 Feb 2025 15:24:10 -0700 Subject: [PATCH 229/270] chore: allow image data to be in prompt input --- pkg/openai/client.go | 34 ++++++++++++++++++++++++++++++---- pkg/openai/client_test.go | 38 ++++++++++++++++++++++++++++++++++++++ pkg/runner/runner.go | 15 ++++++++++++++- 3 files changed, 82 insertions(+), 5 deletions(-) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 4862271b..65bc2ae8 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -282,10 +282,7 @@ func toMessages(request types.CompletionRequest, compat bool) (result []openai.C chatMessage.ToolCalls = append(chatMessage.ToolCalls, toToolCall(*content.ToolCall)) } if content.Text != "" { - chatMessage.MultiContent = append(chatMessage.MultiContent, openai.ChatMessagePart{ - Type: openai.ChatMessagePartTypeText, - Text: content.Text, - }) + chatMessage.MultiContent = append(chatMessage.MultiContent, textToMultiContent(content.Text)...) } } @@ -307,6 +304,35 @@ func toMessages(request types.CompletionRequest, compat bool) (result []openai.C return } +const imagePrefix = "data:image/png;base64," + +func textToMultiContent(text string) []openai.ChatMessagePart { + var chatParts []openai.ChatMessagePart + parts := strings.Split(text, "\n") + for i := len(parts) - 1; i >= 0; i-- { + if strings.HasPrefix(parts[i], imagePrefix) { + chatParts = append(chatParts, openai.ChatMessagePart{ + Type: openai.ChatMessagePartTypeImageURL, + ImageURL: &openai.ChatMessageImageURL{ + URL: parts[i], + }, + }) + parts = parts[:i] + } else { + break + } + } + if len(parts) > 0 { + chatParts = append(chatParts, openai.ChatMessagePart{ + Type: openai.ChatMessagePartTypeText, + Text: strings.Join(parts, "\n"), + }) + } + + slices.Reverse(chatParts) + return chatParts +} + func (c *Client) Call(ctx context.Context, messageRequest types.CompletionRequest, env []string, status chan<- types.CompletionStatus) (*types.CompletionMessage, error) { if err := c.ValidAuth(); err != nil { if err := c.RetrieveAPIKey(ctx, env); err != nil { diff --git a/pkg/openai/client_test.go b/pkg/openai/client_test.go index 30f1705b..78f3eac2 100644 --- a/pkg/openai/client_test.go +++ b/pkg/openai/client_test.go @@ -9,6 +9,44 @@ import ( "github.com/hexops/valast" ) +func TestTextToMultiContent(t *testing.T) { + autogold.Expect([]openai.ChatMessagePart{{ + Type: "text", + Text: "hi\ndata:image/png;base64,xxxxx\n", + }}).Equal(t, textToMultiContent("hi\ndata:image/png;base64,xxxxx\n")) + + autogold.Expect([]openai.ChatMessagePart{ + { + Type: "text", + Text: "hi", + }, + { + Type: "image_url", + ImageURL: &openai.ChatMessageImageURL{URL: "data:image/png;base64,xxxxx"}, + }, + }).Equal(t, textToMultiContent("hi\ndata:image/png;base64,xxxxx")) + + autogold.Expect([]openai.ChatMessagePart{{ + Type: "image_url", + ImageURL: &openai.ChatMessageImageURL{URL: "data:image/png;base64,xxxxx"}, + }}).Equal(t, textToMultiContent("data:image/png;base64,xxxxx")) + + autogold.Expect([]openai.ChatMessagePart{ + { + Type: "text", + Text: "\none\ntwo", + }, + { + Type: "image_url", + ImageURL: &openai.ChatMessageImageURL{URL: "data:image/png;base64,xxxxx"}, + }, + { + Type: "image_url", + ImageURL: &openai.ChatMessageImageURL{URL: "data:image/png;base64,yyyyy"}, + }, + }).Equal(t, textToMultiContent("\none\ntwo\ndata:image/png;base64,xxxxx\ndata:image/png;base64,yyyyy")) +} + func Test_appendMessage(t *testing.T) { autogold.Expect(types.CompletionMessage{Content: []types.ContentPart{ {ToolCall: &types.CompletionToolCall{ diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index df3ef172..bd05e96a 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -655,6 +655,17 @@ func (r *Runner) newDispatcher(ctx context.Context) dispatcher { return newParallelDispatcher(ctx) } +func idForToolCall(id string, state *engine.Return) string { + if state == nil || state.State == nil { + return id + } + tc, ok := state.State.Pending[id] + if !ok || tc.Index == nil { + return id + } + return fmt.Sprintf("%03d", *tc.Index) +} + func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, state *State, toolCategory engine.ToolCategory) (*State, []SubCallResult, error) { var ( resultLock sync.Mutex @@ -698,7 +709,9 @@ func (r *Runner) subCalls(callCtx engine.Context, monitor Monitor, env []string, // Sort the id so if sequential the results are predictable ids := maps.Keys(state.Continuation.Calls) - sort.Strings(ids) + sort.Slice(ids, func(i, j int) bool { + return idForToolCall(ids[i], state.Continuation) < idForToolCall(ids[j], state.Continuation) + }) for _, id := range ids { call := state.Continuation.Calls[id] From 31ce029b1eb65434798a52cc7a4337443564a388 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Sat, 22 Mar 2025 11:13:19 -0700 Subject: [PATCH 230/270] bug: String() is unsafe, so rename to Print() and make a safer version The syntax of GPTScript is not well defined enough to safely parse at the moment. :/ --- pkg/assemble/assemble.go | 17 ------------ pkg/auth/auth.go | 2 +- pkg/cli/fmt.go | 4 +-- pkg/cli/gptscript.go | 16 ----------- pkg/loader/loader.go | 57 +++++++++------------------------------ pkg/parser/parser.go | 4 +-- pkg/parser/parser_test.go | 6 ++--- pkg/sdkserver/routes.go | 4 +-- pkg/sdkserver/types.go | 13 ++++----- pkg/types/tool.go | 8 ++++++ pkg/types/tool_test.go | 4 +-- 11 files changed, 38 insertions(+), 97 deletions(-) delete mode 100644 pkg/assemble/assemble.go diff --git a/pkg/assemble/assemble.go b/pkg/assemble/assemble.go deleted file mode 100644 index ad44d1dd..00000000 --- a/pkg/assemble/assemble.go +++ /dev/null @@ -1,17 +0,0 @@ -package assemble - -import ( - "encoding/json" - "io" - - "github.com/gptscript-ai/gptscript/pkg/types" -) - -var Header = []byte("GPTSCRIPT!") - -func Assemble(prg types.Program, output io.Writer) error { - if _, err := output.Write(Header); err != nil { - return err - } - return json.NewEncoder(output).Encode(prg) -} diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 5c818902..0a664b4d 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -23,7 +23,7 @@ func Authorize(ctx engine.Context, input string) (runner.AuthorizerResponse, err var result bool err := survey.AskOne(&survey.Confirm{ - Help: fmt.Sprintf("The full source of the tools is as follows:\n\n%s", ctx.Tool.String()), + Help: fmt.Sprintf("The full source of the tools is as follows:\n\n%s", ctx.Tool.Print()), Default: true, Message: ConfirmMessage(ctx, input), }, &result) diff --git a/pkg/cli/fmt.go b/pkg/cli/fmt.go index 72696756..8e669349 100644 --- a/pkg/cli/fmt.go +++ b/pkg/cli/fmt.go @@ -43,9 +43,9 @@ func (e *Fmt) Run(_ *cobra.Command, args []string) error { } if e.Write && loc != "" { - return os.WriteFile(loc, []byte(doc.String()), 0644) + return os.WriteFile(loc, []byte(doc.Print()), 0644) } - fmt.Print(doc.String()) + fmt.Print(doc.Print()) return nil } diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 16f9152d..718c6f90 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -14,7 +14,6 @@ import ( "github.com/fatih/color" "github.com/gptscript-ai/cmd" gptscript2 "github.com/gptscript-ai/go-gptscript" - "github.com/gptscript-ai/gptscript/pkg/assemble" "github.com/gptscript-ai/gptscript/pkg/auth" "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -58,7 +57,6 @@ type GPTScript struct { // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` - Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` ListModels bool `usage:"List the models available and exit" local:"true"` ListTools bool `usage:"List built-in tools and exit" local:"true"` ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` @@ -439,20 +437,6 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { return cmd.Help() } - if r.Assemble { - var out io.Writer = os.Stdout - if r.Output != "" && r.Output != "-" { - f, err := os.Create(r.Output) - if err != nil { - return fmt.Errorf("opening %s: %w", r.Output, err) - } - defer f.Close() - out = f - } - - return assemble.Assemble(prg, out) - } - toolInput, err := input.FromCLI(r.Input, args) if err != nil { return err diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index 902d0ed9..8e3914b5 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -17,7 +17,6 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/gptscript-ai/gptscript/internal" - "github.com/gptscript-ai/gptscript/pkg/assemble" "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/hash" @@ -132,36 +131,6 @@ func loadLocal(base *source, name string) (*source, bool, error) { }, true, nil } -func loadProgram(data []byte, into *types.Program, targetToolName, defaultModel string) (types.Tool, error) { - var ext types.Program - - if err := json.Unmarshal(data[len(assemble.Header):], &ext); err != nil { - return types.Tool{}, err - } - - into.ToolSet = make(map[string]types.Tool, len(ext.ToolSet)) - for k, v := range ext.ToolSet { - if builtinTool, ok := builtin.DefaultModel(k, defaultModel); ok { - v = builtinTool - } - into.ToolSet[k] = v - } - - tool := into.ToolSet[ext.EntryToolID] - if targetToolName == "" { - return tool, nil - } - - tool, ok := into.ToolSet[tool.LocalTools[strings.ToLower(targetToolName)]] - if !ok { - return tool, &types.ErrToolNotFound{ - ToolName: targetToolName, - } - } - - return tool, nil -} - func loadOpenAPI(prg *types.Program, data []byte) *openapi3.T { var ( openAPICacheKey = hash.Digest(data) @@ -189,14 +158,6 @@ func loadOpenAPI(prg *types.Program, data []byte) *openapi3.T { func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, targetToolName, defaultModel string) ([]types.Tool, error) { data := base.Content - if bytes.HasPrefix(data, assemble.Header) { - tool, err := loadProgram(data, prg, targetToolName, defaultModel) - if err != nil { - return nil, err - } - return []types.Tool{tool}, nil - } - var ( tools []types.Tool isOpenAPI bool @@ -231,11 +192,19 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base // If we didn't get any tools from trying to parse it as OpenAPI, try to parse it as a GPTScript if len(tools) == 0 { var err error - tools, err = parser.ParseTools(bytes.NewReader(data), parser.Options{ - AssignGlobals: true, - }) - if err != nil { - return nil, err + _, marshaled, ok := strings.Cut(string(data), "#!GPTSCRIPT") + if ok { + err = json.Unmarshal([]byte(marshaled), &tools) + if err != nil { + return nil, fmt.Errorf("error parsing marshalled script: %w", err) + } + } else { + tools, err = parser.ParseTools(bytes.NewReader(data), parser.Options{ + AssignGlobals: true, + }) + if err != nil { + return nil, err + } } } diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index 626056a7..e1b0b9fa 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -263,7 +263,7 @@ func writeSep(buf *strings.Builder, lastText bool) { } } -func (d Document) String() string { +func (d Document) Print() string { buf := strings.Builder{} lastText := false for _, node := range d.Nodes { @@ -274,7 +274,7 @@ func (d Document) String() string { } if node.ToolNode != nil { writeSep(&buf, lastText) - buf.WriteString(node.ToolNode.Tool.String()) + buf.WriteString(node.ToolNode.Tool.Print()) lastText = false } } diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index 7e1282ca..a3263539 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -304,7 +304,7 @@ body !metadata:first:package.json foo=base f -`).Equal(t, tools[0].String()) +`).Equal(t, tools[0].Print()) } func TestFormatWithBadInstruction(t *testing.T) { @@ -316,9 +316,9 @@ func TestFormatWithBadInstruction(t *testing.T) { Instructions: "foo: bar", }, } - autogold.Expect("Name: foo\n===\nfoo: bar\n").Equal(t, input.String()) + autogold.Expect("Name: foo\n===\nfoo: bar\n").Equal(t, input.Print()) - tools, err := ParseTools(strings.NewReader(input.String())) + tools, err := ParseTools(strings.NewReader(input.Print())) require.NoError(t, err) if reflect.DeepEqual(input, tools[0]) { t.Errorf("expected %v, got %v", input, tools[0]) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index b1bd4c3b..66449749 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -114,7 +114,7 @@ func (s *server) listTools(w http.ResponseWriter, r *http.Request) { // Don't print instructions tool.Instructions = "" - lines = append(lines, tool.String()) + lines = append(lines, tool.Print()) } writeResponse(logger, w, map[string]any{"stdout": strings.Join(lines, "\n---\n")}) @@ -339,5 +339,5 @@ func (s *server) fmtDocument(w http.ResponseWriter, r *http.Request) { return } - writeResponse(logger, w, map[string]string{"stdout": doc.String()}) + writeResponse(logger, w, map[string]string{"stdout": doc.Print()}) } diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index a4332557..278a8c78 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -1,8 +1,8 @@ package sdkserver import ( + "encoding/json" "maps" - "strings" "time" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -30,15 +30,12 @@ const ( type toolDefs []types.ToolDef func (t toolDefs) String() string { - s := new(strings.Builder) - for i, tool := range t { - s.WriteString(tool.String()) - if i != len(t)-1 { - s.WriteString("\n\n---\n\n") - } + data, err := json.Marshal(t) + if err != nil { + panic(err) } - return s.String() + return "#!GPTSCRIPT" + string(data) } type ( diff --git a/pkg/types/tool.go b/pkg/types/tool.go index fcd0d53d..0c8bd77c 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -389,6 +389,14 @@ func (t Tool) GetToolRefsFromNames(names []string) (result []ToolReference, _ er } func (t ToolDef) String() string { + data, err := json.Marshal([]any{t}) + if err != nil { + panic(err) + } + return "#!GPTSCRIPT" + string(data) +} + +func (t ToolDef) Print() string { buf := &strings.Builder{} if t.Parameters.GlobalModelName != "" { _, _ = fmt.Fprintf(buf, "Global Model Name: %s\n", t.Parameters.GlobalModelName) diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index a146955e..1160b4f8 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -6,7 +6,7 @@ import ( "github.com/hexops/autogold/v2" ) -func TestToolDef_String(t *testing.T) { +func TestToolDef_Print(t *testing.T) { tool := ToolDef{ Parameters: Parameters{ Name: "Tool Sample", @@ -82,7 +82,7 @@ This is a sample instruction // blah blah some ugly JSON } -`).Equal(t, tool.String()) +`).Equal(t, tool.Print()) } // float32Ptr is used to return a pointer to a given float32 value From cb3d0de9be0d8c28383207e3b8c0677f3402d0ea Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Fri, 28 Mar 2025 08:56:01 -0700 Subject: [PATCH 231/270] Fix: fix close bug when run is aborted (#956) Signed-off-by: Daishan Peng --- pkg/sdkserver/routes.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 66449749..1a4e28ea 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -176,9 +176,13 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { defer func() { s.runningLock.Lock() + // Need to check if the cancel is still in map. In case when user abort, the channel will be deleted from map and closed already, and closing it again will panic + _, ok := s.running[runID] + if ok { + close(cancel) + } delete(s.running, runID) s.runningLock.Unlock() - close(cancel) }() // Ensure chat state is not empty. From 550d649b5fe1ff6c0fc4de302437063c4d755a1c Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Fri, 28 Mar 2025 10:17:03 -0700 Subject: [PATCH 232/270] Fix: make sure to not proceed with cred after abort (#957) Signed-off-by: Daishan Peng --- pkg/engine/engine.go | 6 ++++-- pkg/runner/runner.go | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 778b1e7e..c55f092d 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -17,6 +17,8 @@ import ( var maxConsecutiveToolCalls = 50 +const AbortedSuffix = "\n\nABORTED BY USER" + func init() { if val := os.Getenv("GPTSCRIPT_MAX_CONSECUTIVE_TOOL_CALLS"); val != "" { if i, err := strconv.Atoi(val); err == nil && i > 0 { @@ -335,7 +337,7 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, err error) { if ret.Result == nil { ret.Result = new(string) } - *ret.Result += "\n\nABORTED BY USER" + *ret.Result += AbortedSuffix default: } }() @@ -501,7 +503,7 @@ func (e *Engine) Continue(ctx Context, state *State, results ...CallResult) (ret if ret.Result == nil { ret.Result = new(string) } - *ret.Result += "\n\nABORTED BY USER" + *ret.Result += AbortedSuffix default: } }() diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index bd05e96a..d61b3a33 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -864,6 +864,10 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env continue } + if strings.HasSuffix(*res.Result, engine.AbortedSuffix) { + continue + } + if err := json.Unmarshal([]byte(*res.Result), &resultCredential); err != nil { return nil, fmt.Errorf("failed to unmarshal credential tool %s response: %w", ref.Reference, err) } From ce3b7262ed0e4b2ed7cfb3135b1e1efc7e74a395 Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Fri, 28 Mar 2025 12:41:44 -0700 Subject: [PATCH 233/270] Enhance: add ability to do inline prompting to choose oauth/pat (#955) Signed-off-by: Daishan Peng --- pkg/types/prompt.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/types/prompt.go b/pkg/types/prompt.go index 3da40a2e..f36ea566 100644 --- a/pkg/types/prompt.go +++ b/pkg/types/prompt.go @@ -18,9 +18,10 @@ type Prompt struct { } type Field struct { - Name string `json:"name,omitempty"` - Sensitive *bool `json:"sensitive,omitempty"` - Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + Sensitive *bool `json:"sensitive,omitempty"` + Description string `json:"description,omitempty"` + Options []string `json:"options,omitempty"` } type Fields []Field From 9abfd87cb0af10781788794c3100bc6df7873730 Mon Sep 17 00:00:00 2001 From: Craig Jellick Date: Mon, 7 Apr 2025 11:44:14 -0700 Subject: [PATCH 234/270] Chore: Drop tools.gptscript.ai Signed-off-by: Craig Jellick --- docs/docs/03-tools/02-authoring.md | 12 ------------ docs/docusaurus.config.js | 5 ----- 2 files changed, 17 deletions(-) diff --git a/docs/docs/03-tools/02-authoring.md b/docs/docs/03-tools/02-authoring.md index b8757440..423147ab 100644 --- a/docs/docs/03-tools/02-authoring.md +++ b/docs/docs/03-tools/02-authoring.md @@ -90,15 +90,3 @@ Here are the supported languages and examples of tools written in those language | `Python` | [Image Generation](https://github.com/gptscript-ai/dalle-image-generation) - Generate images based on a prompt | | `Node.js` | [Vision](https://github.com/gptscript-ai/gpt4-v-vision) - Analyze and interpret images | | `Golang` | [Search](https://github.com/gptscript-ai/search) - Use various providers to search the internet | - - -## Automatic Documentation - -Each GPTScript tool is self-documented using the `tool.gpt` file. -You can automatically generate documentation for your tools by visiting `https://tools.gptscript.ai/`. -This documentation site allows others to easily search and explore the tools that have been created. - -You can add more information about how to use your tool by adding an `examples` directory to your repository and adding a collection of `.gpt` files that demonstrate how to use your tool. -These examples will be automatically included in the documentation. - -For more information and to explore existing tools, visit [tools.gptscript.ai](https://tools.gptscript.ai). diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 6a0eb3bc..f3344bca 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -70,11 +70,6 @@ const config = { label: "Contact Us", position: "right", }, - { - href: "https://tools.gptscript.ai/", - label: "Tool Search", - position: "right", - }, ], }, footer: { From c519c63e1d4fb84a61f7c5d55775f585a71c9188 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 10 Apr 2025 11:35:09 -0400 Subject: [PATCH 235/270] fix: estimate tokens using tiktoken (#959) Signed-off-by: Grant Linville --- go.mod | 4 ++- go.sum | 8 ++++-- pkg/openai/client.go | 27 ++++++++++++++---- pkg/openai/count.go | 67 ++++++++++++++++++++++++++++++++++---------- 4 files changed, 83 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 15f88d5f..f803a3b9 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,8 @@ require ( github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 github.com/mholt/archives v0.1.0 + github.com/pkoukk/tiktoken-go v0.1.7 + github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 github.com/rs/cors v1.11.0 github.com/samber/lo v1.38.1 github.com/sirupsen/logrus v1.9.3 @@ -62,7 +64,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.4.0 // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect diff --git a/go.sum b/go.sum index 07d8d500..74341af5 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= -github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUyzJVPxD30I= github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= @@ -316,6 +316,10 @@ github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFz github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkoukk/tiktoken-go v0.1.7 h1:qOBHXX4PHtvIvmOtyg1EeKlwFRiMKAcoMp4Q+bLQDmw= +github.com/pkoukk/tiktoken-go v0.1.7/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= +github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 h1:Sp8yiuxsitkmCfEvUnmNf8wzuZwlGNkRjI2yF0C3QUQ= +github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699/go.mod h1:4mIkYyZooFlnenDlormIo6cd5wrlUKNr97wp9nGgEKo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 65bc2ae8..7715c657 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -349,16 +349,29 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return nil, err } + toolTokenCount, err := countTools(messageRequest.Tools) + if err != nil { + return nil, err + } + if messageRequest.Chat { // Check the last message. If it is from a tool call, and if it takes up more than 80% of the budget on its own, reject it. lastMessage := msgs[len(msgs)-1] - if lastMessage.Role == string(types.CompletionMessageRoleTypeTool) && countMessage(lastMessage) > int(float64(getBudget(messageRequest.MaxTokens))*0.8) { + lastMessageCount, err := countMessage(lastMessage) + if err != nil { + return nil, err + } + + if lastMessage.Role == string(types.CompletionMessageRoleTypeTool) && lastMessageCount+toolTokenCount > int(float64(getBudget(messageRequest.MaxTokens))*0.8) { // We need to update it in the msgs slice for right now and in the messageRequest for future calls. msgs[len(msgs)-1].Content = TooLongMessage messageRequest.Messages[len(messageRequest.Messages)-1].Content = types.Text(TooLongMessage) } - msgs = dropMessagesOverCount(messageRequest.MaxTokens, msgs) + msgs, err = dropMessagesOverCount(messageRequest.MaxTokens, toolTokenCount, msgs) + if err != nil { + return nil, err + } } if len(msgs) == 0 { @@ -439,7 +452,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques // Decrease maxTokens by 10% to make garbage collection more aggressive. // The retry loop will further decrease maxTokens if needed. maxTokens := decreaseTenPercent(messageRequest.MaxTokens) - result, err = c.contextLimitRetryLoop(ctx, request, id, env, maxTokens, status) + result, err = c.contextLimitRetryLoop(ctx, request, id, env, maxTokens, toolTokenCount, status) } if err != nil { return nil, err @@ -473,7 +486,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return &result, nil } -func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, env []string, maxTokens int, status chan<- types.CompletionStatus) (types.CompletionMessage, error) { +func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatCompletionRequest, id string, env []string, maxTokens int, toolTokenCount int, status chan<- types.CompletionStatus) (types.CompletionMessage, error) { var ( response types.CompletionMessage err error @@ -481,7 +494,11 @@ func (c *Client) contextLimitRetryLoop(ctx context.Context, request openai.ChatC for range 10 { // maximum 10 tries // Try to drop older messages again, with a decreased max tokens. - request.Messages = dropMessagesOverCount(maxTokens, request.Messages) + request.Messages, err = dropMessagesOverCount(maxTokens, toolTokenCount, request.Messages) + if err != nil { + return types.CompletionMessage{}, err + } + response, err = c.call(ctx, request, id, env, status) if err == nil { return response, nil diff --git a/pkg/openai/count.go b/pkg/openai/count.go index ffd902e5..d8f2ca36 100644 --- a/pkg/openai/count.go +++ b/pkg/openai/count.go @@ -1,9 +1,18 @@ package openai import ( + "encoding/json" + openai "github.com/gptscript-ai/chat-completion-client" + "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/pkoukk/tiktoken-go" + tiktoken_loader "github.com/pkoukk/tiktoken-go-loader" ) +func init() { + tiktoken.SetBpeLoader(tiktoken_loader.NewOfflineLoader()) +} + const DefaultMaxTokens = 128_000 func decreaseTenPercent(maxTokens int) int { @@ -12,22 +21,26 @@ func decreaseTenPercent(maxTokens int) int { } func getBudget(maxTokens int) int { - if maxTokens == 0 { + if maxTokens <= 0 { return DefaultMaxTokens } return maxTokens } -func dropMessagesOverCount(maxTokens int, msgs []openai.ChatCompletionMessage) (result []openai.ChatCompletionMessage) { +func dropMessagesOverCount(maxTokens, toolTokenCount int, msgs []openai.ChatCompletionMessage) (result []openai.ChatCompletionMessage, err error) { var ( lastSystem int withinBudget int - budget = getBudget(maxTokens) + budget = getBudget(maxTokens) - toolTokenCount ) for i, msg := range msgs { if msg.Role == openai.ChatMessageRoleSystem { - budget -= countMessage(msg) + count, err := countMessage(msg) + if err != nil { + return nil, err + } + budget -= count lastSystem = i result = append(result, msg) } else { @@ -37,7 +50,11 @@ func dropMessagesOverCount(maxTokens int, msgs []openai.ChatCompletionMessage) ( for i := len(msgs) - 1; i > lastSystem; i-- { withinBudget = i - budget -= countMessage(msgs[i]) + count, err := countMessage(msgs[i]) + if err != nil { + return nil, err + } + budget -= count if budget <= 0 { break } @@ -54,22 +71,42 @@ func dropMessagesOverCount(maxTokens int, msgs []openai.ChatCompletionMessage) ( if withinBudget == len(msgs)-1 { // We are going to drop all non system messages, which seems useless, so just return them // all and let it fail - return msgs + return msgs, nil } - return append(result, msgs[withinBudget:]...) + return append(result, msgs[withinBudget:]...), nil } -func countMessage(msg openai.ChatCompletionMessage) (count int) { - count += len(msg.Role) - count += len(msg.Content) +func countMessage(msg openai.ChatCompletionMessage) (int, error) { + encoding, err := tiktoken.GetEncoding("o200k_base") + if err != nil { + return 0, err + } + + count := len(encoding.Encode(msg.Role, nil, nil)) + count += len(encoding.Encode(msg.Content, nil, nil)) for _, content := range msg.MultiContent { - count += len(content.Text) + count += len(encoding.Encode(content.Text, nil, nil)) } for _, tool := range msg.ToolCalls { - count += len(tool.Function.Name) - count += len(tool.Function.Arguments) + count += len(encoding.Encode(tool.Function.Name, nil, nil)) + count += len(encoding.Encode(tool.Function.Arguments, nil, nil)) } - count += len(msg.ToolCallID) - return count / 3 + count += len(encoding.Encode(msg.ToolCallID, nil, nil)) + + return count, nil +} + +func countTools(tools []types.ChatCompletionTool) (int, error) { + encoding, err := tiktoken.GetEncoding("o200k_base") + if err != nil { + return 0, err + } + + toolJSON, err := json.Marshal(tools) + if err != nil { + return 0, err + } + + return len(encoding.Encode(string(toolJSON), nil, nil)), nil } From 4b87a83c9191d1dfd92be8511c9dcabfd55e0f73 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 14 Apr 2025 20:40:47 -0400 Subject: [PATCH 236/270] fix: remove unnecessary error wrapping (#961) Signed-off-by: Grant Linville --- pkg/runner/runner.go | 2 +- pkg/sdkserver/run.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index d61b3a33..aea91b34 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -853,7 +853,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env res, err := r.subCall(callCtx.Ctx, callCtx, monitor, env, ref.ToolID, input, "", engine.CredentialToolCategory) if err != nil { - return nil, fmt.Errorf("failed to run credential tool %s: %w", ref.Reference, err) + return nil, err } if res.Result == nil { diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index 93c9996b..94bdf93c 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -80,7 +80,7 @@ func processEventStreamOutput(logger mvl.Logger, w http.ResponseWriter, id strin }) case err := <-errChan: writeServerSentEvent(logger, w, map[string]any{ - "stderr": fmt.Sprintf("failed to run: %v", err), + "stderr": err.Error(), }) } From 7e668d50020b27b939771159d6719633c925a072 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 16 Apr 2025 13:57:58 -0400 Subject: [PATCH 237/270] chore: migrate to golangci-lint v2 (#962) Signed-off-by: Donnie Adams --- .golangci.yml | 51 +++++++++++++------- Makefile | 2 +- pkg/builtin/builtin.go | 4 +- pkg/builtin/defaults.go | 4 +- pkg/chat/chat.go | 2 +- pkg/cli/gptscript.go | 6 +-- pkg/engine/cmd.go | 2 +- pkg/engine/daemon.go | 4 +- pkg/engine/engine.go | 14 +++--- pkg/engine/http.go | 2 +- pkg/loader/loader.go | 22 ++++----- pkg/loader/openapi.go | 10 ++-- pkg/monitor/display.go | 2 +- pkg/parser/parser.go | 56 +++++++++++----------- pkg/sdkserver/run.go | 4 +- pkg/sdkserver/server.go | 4 +- pkg/types/tool.go | 103 ++++++++++++++++++++-------------------- pkg/types/toolstring.go | 3 +- static/fs.go | 5 +- tools/gendocs/main.go | 2 +- 20 files changed, 158 insertions(+), 144 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e91a9ccc..0f992df5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,24 +1,41 @@ +version: "2" run: timeout: 5m - output: formats: - - format: colored-line-number - + text: + path: stdout linters: - disable-all: true + default: none enable: - - errcheck - - gofmt - - gosimple - - govet - - ineffassign - - staticcheck - - typecheck - - thelper - - unused - - goimports - - whitespace - - revive - fast: false + - errcheck + - govet + - ineffassign + - revive + - staticcheck + - thelper + - unused + - whitespace + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +issues: max-same-issues: 50 +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/Makefile b/Makefile index 4a52694a..b2e8482a 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ smoke: build smoke: go test -v -tags='smoke' ./pkg/tests/smoke/... -GOLANGCI_LINT_VERSION ?= v1.60.1 +GOLANGCI_LINT_VERSION ?= v2.1.2 lint: if ! command -v golangci-lint &> /dev/null; then \ echo "Could not find golangci-lint, installing version $(GOLANGCI_LINT_VERSION)."; \ diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index d14fe7c7..b339b52a 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -280,8 +280,8 @@ func DefaultModel(name, defaultModel string) (types.Tool, bool) { // Legacy syntax not used anymore name = strings.TrimSuffix(name, "?") t, ok := tools[name] - t.Parameters.Name = name - t.Parameters.ModelName = defaultModel + t.Name = name + t.ModelName = defaultModel t.ID = name t.Instructions = "#!" + name return SetDefaults(t), ok diff --git a/pkg/builtin/defaults.go b/pkg/builtin/defaults.go index ac264ae6..85bdf485 100644 --- a/pkg/builtin/defaults.go +++ b/pkg/builtin/defaults.go @@ -18,8 +18,8 @@ func SetDefaultModel(model string) { } func SetDefaults(tool types.Tool) types.Tool { - if tool.Parameters.ModelName == "" { - tool.Parameters.ModelName = GetDefaultModel() + if tool.ModelName == "" { + tool.ModelName = GetDefaultModel() } return tool } diff --git a/pkg/chat/chat.go b/pkg/chat/chat.go index e36f107b..5adc0676 100644 --- a/pkg/chat/chat.go +++ b/pkg/chat/chat.go @@ -61,7 +61,7 @@ func Start(ctx context.Context, prevState runner.ChatState, chatter Chatter, prg if startInput != "" { input = startInput startInput = "" - } else if targetTool := prog.ToolSet[prog.EntryToolID]; !((prevState == nil || prevState == "") && targetTool.Arguments == nil && targetTool.Instructions != "") { + } else if targetTool := prog.ToolSet[prog.EntryToolID]; prevState != nil && prevState != "" || targetTool.Arguments != nil || targetTool.Instructions == "" { // The above logic will skip prompting if this is the first loop and the chat expects no args input, ok, err = prompter.Readline() if !ok || err != nil { diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 718c6f90..4b0642d2 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -366,7 +366,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the file is external, then set the SCRIPTS_PATH to the current working directory. Otherwise, // set it to the directory of the script and set the file to the base. - if !(strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "github.com")) { + if !strings.HasPrefix(file, "http://") && !strings.HasPrefix(file, "https://") && !strings.HasPrefix(file, "github.com") { absPathToScript, err := filepath.Abs(file) if err != nil { return fmt.Errorf("cannot determine absolute path to script %s: %v", file, err) @@ -469,8 +469,8 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // Don't use cmd.Context() because then sigint will cancel everything return tui.Run(context.Background(), args[0], tui.RunOptions{ ClientOpts: &gptscript2.GlobalOptions{ - OpenAIAPIKey: r.OpenAIOptions.APIKey, - OpenAIBaseURL: r.OpenAIOptions.BaseURL, + OpenAIAPIKey: r.APIKey, + OpenAIBaseURL: r.BaseURL, DefaultModel: r.DefaultModel, DefaultModelProvider: r.DefaultModelProvider, }, diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 368b1c98..5fb340c5 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -171,7 +171,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate // If this is a sub-call, then don't return the error; return the error as a message so that the LLM can retry. return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } - log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) + log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Name, cmd.Args, err) return "", fmt.Errorf("ERROR: %s: %w", stdoutAndErr, err) } diff --git a/pkg/engine/daemon.go b/pkg/engine/daemon.go index 6f991be0..58de592b 100644 --- a/pkg/engine/daemon.go +++ b/pkg/engine/daemon.go @@ -175,7 +175,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { return w.Close() } - log.Infof("launched [%s][%s] port [%d] %v", tool.Parameters.Name, tool.ID, port, cmd.Args) + log.Infof("launched [%s][%s] port [%d] %v", tool.Name, tool.ID, port, cmd.Args) if err := cmd.Start(); err != nil { stop() return url, err @@ -195,7 +195,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { go func() { err := cmd.Wait() if err != nil { - log.Debugf("daemon exited tool [%s] %v: %v", tool.Parameters.Name, cmd.Args, err) + log.Debugf("daemon exited tool [%s] %v: %v", tool.Name, cmd.Args, err) } _ = r.Close() _ = w.Close() diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index c55f092d..abf45e8c 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -285,13 +285,13 @@ func (c *Context) WrappedContext(e *Engine) context.Context { } func populateMessageParams(ctx Context, completion *types.CompletionRequest, tool types.Tool) error { - completion.Model = tool.Parameters.ModelName - completion.MaxTokens = tool.Parameters.MaxTokens - completion.JSONResponse = tool.Parameters.JSONResponse - completion.Cache = tool.Parameters.Cache - completion.Chat = tool.Parameters.Chat - completion.Temperature = tool.Parameters.Temperature - completion.InternalSystemPrompt = tool.Parameters.InternalPrompt + completion.Model = tool.ModelName + completion.MaxTokens = tool.MaxTokens + completion.JSONResponse = tool.JSONResponse + completion.Cache = tool.Cache + completion.Chat = tool.Chat + completion.Temperature = tool.Temperature + completion.InternalSystemPrompt = tool.InternalPrompt if tool.Chat && completion.InternalSystemPrompt == nil { completion.InternalSystemPrompt = new(bool) diff --git a/pkg/engine/http.go b/pkg/engine/http.go index 9e59b70a..49738b1a 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -112,7 +112,7 @@ func (e *Engine) runHTTP(ctx Context, tool types.Tool, input string) (cmdRet *Re } } - req.Header.Set("X-GPTScript-Tool-Name", tool.Parameters.Name) + req.Header.Set("X-GPTScript-Tool-Name", tool.Name) if err := json.Unmarshal([]byte(input), &map[string]any{}); err == nil { req.Header.Set("Content-Type", "application/json") diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index 8e3914b5..e70827c6 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -225,15 +225,15 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base // Probably a better way to come up with an ID tool.ID = tool.Source.Location + ":" + tool.Name - if i != 0 && tool.Parameters.Name == "" { + if i != 0 && tool.Name == "" { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have no name")) } - if i != 0 && tool.Parameters.GlobalModelName != "" { + if i != 0 && tool.GlobalModelName != "" { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have global model name")) } - if i != 0 && len(tool.Parameters.GlobalTools) > 0 { + if i != 0 && len(tool.GlobalTools) > 0 { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have global tools")) } @@ -245,8 +245,8 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base targetTools = append(targetTools, tool) } - if targetToolName != "" && tool.Parameters.Name != "" { - if strings.EqualFold(tool.Parameters.Name, targetToolName) { + if targetToolName != "" && tool.Name != "" { + if strings.EqualFold(tool.Name, targetToolName) { targetTools = append(targetTools, tool) } else if strings.Contains(targetToolName, "*") { var patterns []string @@ -257,7 +257,7 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base } for _, pattern := range patterns { - match, err := filepath.Match(strings.ToLower(pattern), strings.ToLower(tool.Parameters.Name)) + match, err := filepath.Match(strings.ToLower(pattern), strings.ToLower(tool.Name)) if err != nil { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, err) } @@ -270,13 +270,13 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base } } - if existing, ok := localTools[strings.ToLower(tool.Parameters.Name)]; ok { + if existing, ok := localTools[strings.ToLower(tool.Name)]; ok { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, - fmt.Errorf("duplicate tool name [%s] in %s found at lines %d and %d", tool.Parameters.Name, tool.Source.Location, + fmt.Errorf("duplicate tool name [%s] in %s found at lines %d and %d", tool.Name, tool.Source.Location, tool.Source.LineNo, existing.Source.LineNo)) } - localTools[strings.ToLower(tool.Parameters.Name)] = tool + localTools[strings.ToLower(tool.Name)] = tool } return linkAll(ctx, cache, prg, base, targetTools, localTools, defaultModel) @@ -285,7 +285,7 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base func linkAll(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tools []types.Tool, localTools types.ToolSet, defaultModel string) (result []types.Tool, _ error) { localToolsMapping := make(map[string]string, len(tools)) for _, localTool := range localTools { - localToolsMapping[strings.ToLower(localTool.Parameters.Name)] = localTool.ID + localToolsMapping[strings.ToLower(localTool.Name)] = localTool.ID } for _, tool := range tools { @@ -314,7 +314,7 @@ func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *so // The below is done in two loops so that local names stay as the tool names // and don't get mangled by external references - for _, targetToolName := range tool.Parameters.ToolRefNames() { + for _, targetToolName := range tool.ToolRefNames() { noArgs, _ := types.SplitArg(targetToolName) localTool, ok := localTools[strings.ToLower(noArgs)] if ok { diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index e62fc5ef..3ab564e5 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -174,11 +174,11 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Add the new arg to the tool's arguments - tool.Parameters.Arguments.Properties[param.Value.Name] = &openapi3.SchemaRef{Value: arg} + tool.Arguments.Properties[param.Value.Name] = &openapi3.SchemaRef{Value: arg} // Check whether it is required if param.Value.Required { - tool.Parameters.Arguments.Required = append(tool.Parameters.Arguments.Required, param.Value.Name) + tool.Arguments.Required = append(tool.Arguments.Required, param.Value.Name) } // Add the parameter to the appropriate list for the tool's instructions @@ -227,7 +227,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Unfortunately, the request body doesn't contain any good descriptor for it, // so we just use "requestBodyContent" as the name of the arg. - tool.Parameters.Arguments.Properties["requestBodyContent"] = &openapi3.SchemaRef{Value: arg} + tool.Arguments.Properties["requestBodyContent"] = &openapi3.SchemaRef{Value: arg} break } @@ -310,7 +310,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Register - toolNames = append(toolNames, tool.Parameters.Name) + toolNames = append(toolNames, tool.Name) tools = append(tools, tool) operationNum++ } @@ -457,7 +457,7 @@ func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]type exportTool := types.Tool{ ToolDef: types.ToolDef{ Parameters: types.Parameters{ - Export: []string{list.Parameters.Name, getSchema.Parameters.Name, run.Parameters.Name}, + Export: []string{list.Name, getSchema.Name, run.Name}, }, }, } diff --git a/pkg/monitor/display.go b/pkg/monitor/display.go index 73a15006..6bc6e9f3 100644 --- a/pkg/monitor/display.go +++ b/pkg/monitor/display.go @@ -386,7 +386,7 @@ func (c callName) String() string { for { tool := c.prg.ToolSet[currentCall.ToolID] - name := tool.Parameters.Name + name := tool.Name if name == "" { name = tool.Source.Location } diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index e1b0b9fa..b00b1506 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -53,8 +53,8 @@ func csv(line string) (result []string) { } func addArg(line string, tool *types.Tool) error { - if tool.Parameters.Arguments == nil { - tool.Parameters.Arguments = &openapi3.Schema{ + if tool.Arguments == nil { + tool.Arguments = &openapi3.Schema{ Type: &openapi3.Types{"object"}, Properties: openapi3.Schemas{}, } @@ -65,7 +65,7 @@ func addArg(line string, tool *types.Tool) error { return fmt.Errorf("invalid arg format: %s", line) } - tool.Parameters.Arguments.Properties[key] = &openapi3.SchemaRef{ + tool.Arguments.Properties[key] = &openapi3.SchemaRef{ Value: &openapi3.Schema{ Description: strings.TrimSpace(value), Type: &openapi3.Types{"string"}, @@ -83,53 +83,53 @@ func isParam(line string, tool *types.Tool, scan *simplescanner) (_ bool, err er value = strings.TrimSpace(value) switch normalize(key) { case "name": - tool.Parameters.Name = value + tool.Name = value case "modelprovider": - tool.Parameters.ModelProvider = true + tool.ModelProvider = true case "model", "modelname": - tool.Parameters.ModelName = value + tool.ModelName = value case "globalmodel", "globalmodelname": - tool.Parameters.GlobalModelName = value + tool.GlobalModelName = value case "description": - tool.Parameters.Description = scan.AddMultiline(value) + tool.Description = scan.AddMultiline(value) case "internalprompt": v, err := toBool(value) if err != nil { return false, err } - tool.Parameters.InternalPrompt = &v + tool.InternalPrompt = &v case "chat": v, err := toBool(value) if err != nil { return false, err } - tool.Parameters.Chat = v + tool.Chat = v case "export", "exporttool", "exports", "exporttools", "sharetool", "sharetools", "sharedtool", "sharedtools": - tool.Parameters.Export = append(tool.Parameters.Export, csv(scan.AddMultiline(value))...) + tool.Export = append(tool.Export, csv(scan.AddMultiline(value))...) case "tool", "tools": - tool.Parameters.Tools = append(tool.Parameters.Tools, csv(scan.AddMultiline(value))...) + tool.Tools = append(tool.Tools, csv(scan.AddMultiline(value))...) case "inputfilter", "inputfilters": - tool.Parameters.InputFilters = append(tool.Parameters.InputFilters, csv(scan.AddMultiline(value))...) + tool.InputFilters = append(tool.InputFilters, csv(scan.AddMultiline(value))...) case "shareinputfilter", "shareinputfilters", "sharedinputfilter", "sharedinputfilters": - tool.Parameters.ExportInputFilters = append(tool.Parameters.ExportInputFilters, csv(scan.AddMultiline(value))...) + tool.ExportInputFilters = append(tool.ExportInputFilters, csv(scan.AddMultiline(value))...) case "outputfilter", "outputfilters": - tool.Parameters.OutputFilters = append(tool.Parameters.OutputFilters, csv(scan.AddMultiline(value))...) + tool.OutputFilters = append(tool.OutputFilters, csv(scan.AddMultiline(value))...) case "shareoutputfilter", "shareoutputfilters", "sharedoutputfilter", "sharedoutputfilters": - tool.Parameters.ExportOutputFilters = append(tool.Parameters.ExportOutputFilters, csv(scan.AddMultiline(value))...) + tool.ExportOutputFilters = append(tool.ExportOutputFilters, csv(scan.AddMultiline(value))...) case "agent", "agents": - tool.Parameters.Agents = append(tool.Parameters.Agents, csv(scan.AddMultiline(value))...) + tool.Agents = append(tool.Agents, csv(scan.AddMultiline(value))...) case "globaltool", "globaltools": - tool.Parameters.GlobalTools = append(tool.Parameters.GlobalTools, csv(scan.AddMultiline(value))...) + tool.GlobalTools = append(tool.GlobalTools, csv(scan.AddMultiline(value))...) case "exportcontext", "exportcontexts", "sharecontext", "sharecontexts", "sharedcontext", "sharedcontexts": - tool.Parameters.ExportContext = append(tool.Parameters.ExportContext, csv(scan.AddMultiline(value))...) + tool.ExportContext = append(tool.ExportContext, csv(scan.AddMultiline(value))...) case "context": - tool.Parameters.Context = append(tool.Parameters.Context, csv(scan.AddMultiline(value))...) + tool.Context = append(tool.Context, csv(scan.AddMultiline(value))...) case "stdin": b, err := toBool(value) if err != nil { return false, err } - tool.Parameters.Stdin = b + tool.Stdin = b case "metadata": mkey, mvalue, _ := strings.Cut(scan.AddMultiline(value), ":") if tool.MetaData == nil { @@ -141,7 +141,7 @@ func isParam(line string, tool *types.Tool, scan *simplescanner) (_ bool, err er return false, err } case "maxtoken", "maxtokens": - tool.Parameters.MaxTokens, err = strconv.Atoi(value) + tool.MaxTokens, err = strconv.Atoi(value) if err != nil { return false, err } @@ -150,21 +150,21 @@ func isParam(line string, tool *types.Tool, scan *simplescanner) (_ bool, err er if err != nil { return false, err } - tool.Parameters.Cache = &b + tool.Cache = &b case "jsonmode", "json", "jsonoutput", "jsonformat", "jsonresponse": - tool.Parameters.JSONResponse, err = toBool(value) + tool.JSONResponse, err = toBool(value) if err != nil { return false, err } case "temperature": - tool.Parameters.Temperature, err = toFloatPtr(value) + tool.Temperature, err = toFloatPtr(value) if err != nil { return false, err } case "credentials", "creds", "credential", "cred": - tool.Parameters.Credentials = append(tool.Parameters.Credentials, csv(scan.AddMultiline(value))...) + tool.Credentials = append(tool.Credentials, csv(scan.AddMultiline(value))...) case "sharecredentials", "sharecreds", "sharecredential", "sharecred", "sharedcredentials", "sharedcreds", "sharedcredential", "sharedcred": - tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, scan.AddMultiline(value)) + tool.ExportCredentials = append(tool.ExportCredentials, scan.AddMultiline(value)) case "type": tool.Type = types.ToolType(strings.ToLower(value)) default: @@ -211,7 +211,7 @@ type context struct { func (c *context) finish(tools *[]Node) { c.tool.Instructions = strings.TrimSpace(strings.Join(c.instructions, "")) if c.tool.Instructions != "" || - c.tool.Parameters.Name != "" || + c.tool.Name != "" || len(c.tool.Export) > 0 || len(c.tool.Tools) > 0 || c.tool.GlobalModelName != "" || diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index 94bdf93c..fda4a215 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -131,7 +131,7 @@ func writeError(logger mvl.Logger, w http.ResponseWriter, code int, err error) { b, err := json.Marshal(resp) if err != nil { - _, _ = w.Write([]byte(fmt.Sprintf(`{"stderr": "%s"}`, err.Error()))) + _, _ = fmt.Fprintf(w, `{"stderr": "%s"}`, err.Error()) return } @@ -148,7 +148,7 @@ func writeServerSentEvent(logger mvl.Logger, w http.ResponseWriter, event any) { return } - _, err = w.Write([]byte(fmt.Sprintf("data: %s\n\n", ev))) + _, err = fmt.Fprintf(w, "data: %s\n\n", ev) if err == nil { if f, ok := w.(http.Flusher); ok { f.Flush() diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 41066d30..f15cc68f 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -92,7 +92,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { } events := broadcaster.New[event]() - opts.Options.Runner.MonitorFactory = NewSessionFactory(events) + opts.Runner.MonitorFactory = NewSessionFactory(events) go events.Start(ctx) token := uuid.NewString() @@ -115,7 +115,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { client: g, events: events, - runtimeManager: runtimes.Default(opts.Options.Cache.CacheDir, opts.SystemToolsDir), + runtimeManager: runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir), waitingToConfirm: make(map[string]chan runner.AuthorizerResponse), waitingToPrompt: make(map[string]chan map[string]string), running: make(map[string]chan struct{}), diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 0c8bd77c..54780278 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -398,94 +398,94 @@ func (t ToolDef) String() string { func (t ToolDef) Print() string { buf := &strings.Builder{} - if t.Parameters.GlobalModelName != "" { - _, _ = fmt.Fprintf(buf, "Global Model Name: %s\n", t.Parameters.GlobalModelName) + if t.GlobalModelName != "" { + _, _ = fmt.Fprintf(buf, "Global Model Name: %s\n", t.GlobalModelName) } - if len(t.Parameters.GlobalTools) != 0 { - _, _ = fmt.Fprintf(buf, "Global Tools: %s\n", strings.Join(t.Parameters.GlobalTools, ", ")) + if len(t.GlobalTools) != 0 { + _, _ = fmt.Fprintf(buf, "Global Tools: %s\n", strings.Join(t.GlobalTools, ", ")) } - if t.Parameters.Name != "" { - _, _ = fmt.Fprintf(buf, "Name: %s\n", t.Parameters.Name) + if t.Name != "" { + _, _ = fmt.Fprintf(buf, "Name: %s\n", t.Name) } - if t.Parameters.Description != "" { - _, _ = fmt.Fprintf(buf, "Description: %s\n", t.Parameters.Description) + if t.Description != "" { + _, _ = fmt.Fprintf(buf, "Description: %s\n", t.Description) } - if t.Parameters.Type != ToolTypeDefault { + if t.Type != ToolTypeDefault { _, _ = fmt.Fprintf(buf, "Type: %s\n", strings.ToUpper(string(t.Type[0]))+string(t.Type[1:])) } - if len(t.Parameters.Agents) != 0 { - _, _ = fmt.Fprintf(buf, "Agents: %s\n", strings.Join(t.Parameters.Agents, ", ")) + if len(t.Agents) != 0 { + _, _ = fmt.Fprintf(buf, "Agents: %s\n", strings.Join(t.Agents, ", ")) } - if len(t.Parameters.Tools) != 0 { - _, _ = fmt.Fprintf(buf, "Tools: %s\n", strings.Join(t.Parameters.Tools, ", ")) + if len(t.Tools) != 0 { + _, _ = fmt.Fprintf(buf, "Tools: %s\n", strings.Join(t.Tools, ", ")) } - if len(t.Parameters.Export) != 0 { - _, _ = fmt.Fprintf(buf, "Share Tools: %s\n", strings.Join(t.Parameters.Export, ", ")) + if len(t.Export) != 0 { + _, _ = fmt.Fprintf(buf, "Share Tools: %s\n", strings.Join(t.Export, ", ")) } - if len(t.Parameters.Context) != 0 { - _, _ = fmt.Fprintf(buf, "Context: %s\n", strings.Join(t.Parameters.Context, ", ")) + if len(t.Context) != 0 { + _, _ = fmt.Fprintf(buf, "Context: %s\n", strings.Join(t.Context, ", ")) } - if len(t.Parameters.ExportContext) != 0 { - _, _ = fmt.Fprintf(buf, "Share Context: %s\n", strings.Join(t.Parameters.ExportContext, ", ")) + if len(t.ExportContext) != 0 { + _, _ = fmt.Fprintf(buf, "Share Context: %s\n", strings.Join(t.ExportContext, ", ")) } - if len(t.Parameters.InputFilters) != 0 { - _, _ = fmt.Fprintf(buf, "Input Filters: %s\n", strings.Join(t.Parameters.InputFilters, ", ")) + if len(t.InputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Input Filters: %s\n", strings.Join(t.InputFilters, ", ")) } - if len(t.Parameters.ExportInputFilters) != 0 { - _, _ = fmt.Fprintf(buf, "Share Input Filters: %s\n", strings.Join(t.Parameters.ExportInputFilters, ", ")) + if len(t.ExportInputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Share Input Filters: %s\n", strings.Join(t.ExportInputFilters, ", ")) } - if len(t.Parameters.OutputFilters) != 0 { - _, _ = fmt.Fprintf(buf, "Output Filters: %s\n", strings.Join(t.Parameters.OutputFilters, ", ")) + if len(t.OutputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Output Filters: %s\n", strings.Join(t.OutputFilters, ", ")) } - if len(t.Parameters.ExportOutputFilters) != 0 { - _, _ = fmt.Fprintf(buf, "Share Output Filters: %s\n", strings.Join(t.Parameters.ExportOutputFilters, ", ")) + if len(t.ExportOutputFilters) != 0 { + _, _ = fmt.Fprintf(buf, "Share Output Filters: %s\n", strings.Join(t.ExportOutputFilters, ", ")) } - if t.Parameters.MaxTokens != 0 { - _, _ = fmt.Fprintf(buf, "Max Tokens: %d\n", t.Parameters.MaxTokens) + if t.MaxTokens != 0 { + _, _ = fmt.Fprintf(buf, "Max Tokens: %d\n", t.MaxTokens) } - if t.Parameters.ModelName != "" { - _, _ = fmt.Fprintf(buf, "Model: %s\n", t.Parameters.ModelName) + if t.ModelName != "" { + _, _ = fmt.Fprintf(buf, "Model: %s\n", t.ModelName) } - if t.Parameters.ModelProvider { + if t.ModelProvider { _, _ = fmt.Fprintf(buf, "Model Provider: true\n") } - if t.Parameters.JSONResponse { + if t.JSONResponse { _, _ = fmt.Fprintln(buf, "JSON Response: true") } - if t.Parameters.Cache != nil && !*t.Parameters.Cache { + if t.Cache != nil && !*t.Cache { _, _ = fmt.Fprintln(buf, "Cache: false") } - if t.Parameters.Stdin { + if t.Stdin { _, _ = fmt.Fprintln(buf, "Stdin: true") } - if t.Parameters.Temperature != nil { - _, _ = fmt.Fprintf(buf, "Temperature: %f\n", *t.Parameters.Temperature) + if t.Temperature != nil { + _, _ = fmt.Fprintf(buf, "Temperature: %f\n", *t.Temperature) } - if t.Parameters.Arguments != nil { + if t.Arguments != nil { var keys []string - for k := range t.Parameters.Arguments.Properties { + for k := range t.Arguments.Properties { keys = append(keys, k) } sort.Strings(keys) for _, key := range keys { - prop := t.Parameters.Arguments.Properties[key] + prop := t.Arguments.Properties[key] _, _ = fmt.Fprintf(buf, "Parameter: %s: %s\n", key, prop.Value.Description) } } - if t.Parameters.InternalPrompt != nil { - _, _ = fmt.Fprintf(buf, "Internal Prompt: %v\n", *t.Parameters.InternalPrompt) + if t.InternalPrompt != nil { + _, _ = fmt.Fprintf(buf, "Internal Prompt: %v\n", *t.InternalPrompt) } - if len(t.Parameters.Credentials) > 0 { - for _, cred := range t.Parameters.Credentials { + if len(t.Credentials) > 0 { + for _, cred := range t.Credentials { _, _ = fmt.Fprintf(buf, "Credential: %s\n", cred) } } - if len(t.Parameters.ExportCredentials) > 0 { - for _, exportCred := range t.Parameters.ExportCredentials { + if len(t.ExportCredentials) > 0 { + for _, exportCred := range t.ExportCredentials { _, _ = fmt.Fprintf(buf, "Share Credential: %s\n", exportCred) } } - if t.Parameters.Chat { + if t.Chat { _, _ = fmt.Fprintf(buf, "Chat: true\n") } @@ -618,10 +618,11 @@ func (t Tool) getAgents(prg *Program) (result []ToolReference, _ error) { } func (t Tool) GetToolsByType(prg *Program, toolType ToolType) ([]ToolReference, error) { - if toolType == ToolTypeAgent { + switch toolType { + case ToolTypeAgent: // Agents are special, they can only be sourced from direct references and not the generic 'tool:' or shared by references return t.getAgents(prg) - } else if toolType == ToolTypeCredential { + case ToolTypeCredential: // Credentials are special too, you can only get shared credentials from directly referenced credentials return t.getCredentials(prg) } @@ -784,7 +785,7 @@ func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (re subToolName = subToolRef.Named } - args := subTool.Parameters.Arguments + args := subTool.Arguments if args == nil && !subTool.IsCommand() && !subTool.Chat { args = &system.DefaultToolSchema } else if args == nil && !subTool.IsCommand() { @@ -798,7 +799,7 @@ func toolRefsToCompletionTools(completionTools []ToolReference, prg Program) (re Function: CompletionFunctionDefinition{ ToolID: subTool.ID, Name: PickToolName(subToolName, toolNames), - Description: subTool.Parameters.Description, + Description: subTool.Description, Parameters: args, }, }) diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 086ad043..b5e0d1d5 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -50,9 +50,8 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { case "sys.download": if location := args["location"]; location != "" { return fmt.Sprintf("Downloading `%s` to `%s`", args["url"], location), nil - } else { - return fmt.Sprintf("Downloading `%s` to workspace", args["url"]), nil } + return fmt.Sprintf("Downloading `%s` to workspace", args["url"]), nil case "sys.exec": return fmt.Sprintf("Running `%s`", args["command"]), nil case "sys.find": diff --git a/static/fs.go b/static/fs.go index 88e5f4ec..aff200c1 100644 --- a/static/fs.go +++ b/static/fs.go @@ -1,9 +1,6 @@ package static -import ( - "embed" - _ "embed" -) +import "embed" //go:embed * ui/_nuxt/* var UI embed.FS diff --git a/tools/gendocs/main.go b/tools/gendocs/main.go index f931bbea..362ea17b 100644 --- a/tools/gendocs/main.go +++ b/tools/gendocs/main.go @@ -40,7 +40,7 @@ func main() { func filePrepender(filename string) string { name := filepath.Base(filename) base := strings.TrimSuffix(name, path.Ext(name)) - return fmt.Sprintf(fmTemplate, strings.Replace(base, "_", " ", -1)) + return fmt.Sprintf(fmTemplate, strings.ReplaceAll(base, "_", " ")) } func linkHandler(name string) string { From 5ff654398726bb94df1f24605e1aa26b3dfd85dd Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 24 Apr 2025 16:17:34 -0400 Subject: [PATCH 238/270] chore: add credential checkParam field (#964) If the check param changes, then the credential will be re-prompted and not used nor refreshed. Signed-off-by: Donnie Adams --- pkg/credentials/credential.go | 18 +++++++----- pkg/runner/runner.go | 7 +++-- pkg/types/credential_test.go | 52 +++++++++++++++++++++++++++++------ pkg/types/tool.go | 38 +++++++++++++++++-------- 4 files changed, 85 insertions(+), 30 deletions(-) diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index e458cb9f..9d314a70 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -20,13 +20,16 @@ const ( ) type Credential struct { - Context string `json:"context"` - ToolName string `json:"toolName"` - Type CredentialType `json:"type"` - Env map[string]string `json:"env"` - Ephemeral bool `json:"ephemeral,omitempty"` - ExpiresAt *time.Time `json:"expiresAt"` - RefreshToken string `json:"refreshToken"` + Context string `json:"context"` + ToolName string `json:"toolName"` + Type CredentialType `json:"type"` + Env map[string]string `json:"env"` + // If the CheckParam that is stored is different from the param on the tool, + // then the credential will be re-authed as if it does not exist. + CheckParam string `json:"checkParam"` + Ephemeral bool `json:"ephemeral,omitempty"` + ExpiresAt *time.Time `json:"expiresAt"` + RefreshToken string `json:"refreshToken"` } func (c Credential) IsExpired() bool { @@ -82,6 +85,7 @@ func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error Context: ctx, ToolName: tool, Type: CredentialType(credType), + CheckParam: cred.CheckParam, Env: cred.Env, ExpiresAt: cred.ExpiresAt, RefreshToken: cred.RefreshToken, diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index aea91b34..6d4e7598 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -780,7 +780,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env var nearestExpiration *time.Time for _, ref := range credToolRefs { - toolName, credentialAlias, args, err := types.ParseCredentialArgs(ref.Reference, callCtx.Input) + toolName, credentialAlias, checkParam, args, err := types.ParseCredentialArgs(ref.Reference, callCtx.Input) if err != nil { return nil, fmt.Errorf("failed to parse credential tool %q: %w", ref.Reference, err) } @@ -830,9 +830,10 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env // If the credential doesn't already exist in the store, run the credential tool in order to get the value, // and save it in the store. - if !exists || c.IsExpired() { + if !exists || c.IsExpired() || checkParam != c.CheckParam { // If the existing credential is expired, we need to provide it to the cred tool through the environment. - if exists && c.IsExpired() { + // If the check parameter is different, then we don't refresh. We should re-auth below. + if exists && c.IsExpired() && checkParam == c.CheckParam { refresh = true credJSON, err := json.Marshal(c) if err != nil { diff --git a/pkg/types/credential_test.go b/pkg/types/credential_test.go index b6f70ee3..530b23f8 100644 --- a/pkg/types/credential_test.go +++ b/pkg/types/credential_test.go @@ -9,13 +9,14 @@ import ( func TestParseCredentialArgs(t *testing.T) { tests := []struct { - name string - toolName string - input string - expectedName string - expectedAlias string - expectedArgs map[string]string - wantErr bool + name string + toolName string + input string + expectedName string + expectedAlias string + expectedCheckParam string + expectedArgs map[string]string + wantErr bool }{ { name: "empty", @@ -94,6 +95,40 @@ func TestParseCredentialArgs(t *testing.T) { "arg2": "value2", }, }, + { + name: "tool name with check parameter", + toolName: `myCredentialTool checked with myCheckParam`, + expectedName: "myCredentialTool", + expectedCheckParam: "myCheckParam", + }, + { + name: "tool name with alias and check parameter", + toolName: `myCredentialTool as myAlias checked with myCheckParam`, + expectedName: "myCredentialTool", + expectedAlias: "myAlias", + expectedCheckParam: "myCheckParam", + }, + { + name: "tool name with alias, check parameter, and args", + toolName: `myCredentialTool as myAlias checked with myCheckParam with value1 as arg1 and value2 as arg2`, + expectedName: "myCredentialTool", + expectedAlias: "myAlias", + expectedCheckParam: "myCheckParam", + expectedArgs: map[string]string{ + "arg1": "value1", + "arg2": "value2", + }, + }, + { + name: "check parameter without with", + toolName: `myCredentialTool checked myCheckParam`, + wantErr: true, + }, + { + name: "invalid check parameter", + toolName: `myCredentialTool checked with`, + wantErr: true, + }, { name: "tool name with alias but no 'as' (invalid)", toolName: "myCredentialTool myAlias", @@ -136,7 +171,7 @@ func TestParseCredentialArgs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - originalName, alias, args, err := ParseCredentialArgs(tt.toolName, tt.input) + originalName, alias, checkParam, args, err := ParseCredentialArgs(tt.toolName, tt.input) if tt.wantErr { require.Error(t, err, "expected an error but got none") return @@ -145,6 +180,7 @@ func TestParseCredentialArgs(t *testing.T) { require.NoError(t, err, "did not expect an error but got one") require.Equal(t, tt.expectedName, originalName, "unexpected original name") require.Equal(t, tt.expectedAlias, alias, "unexpected alias") + require.Equal(t, tt.expectedCheckParam, checkParam, "unexpected checkParam") require.Equal(t, len(tt.expectedArgs), len(args), "unexpected number of args") for k, v := range tt.expectedArgs { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 54780278..3d48c6e1 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -272,9 +272,9 @@ func SplitArg(hasArg string) (prefix, arg string) { // - toolName: "toolName with ${var1} as arg1 and ${var2} as arg2" // - input: `{"var1": "value1", "var2": "value2"}` // result: toolName, "", map[string]any{"arg1": "value1", "arg2": "value2"}, nil -func ParseCredentialArgs(toolName string, input string) (string, string, map[string]any, error) { +func ParseCredentialArgs(toolName string, input string) (string, string, string, map[string]any, error) { if toolName == "" { - return "", "", nil, nil + return "", "", "", nil, nil } inputMap := make(map[string]any) @@ -287,12 +287,12 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str fields, err := shlex.Split(toolName) if err != nil { - return "", "", nil, err + return "", "", "", nil, err } // If it's just the tool name, return it if len(fields) == 1 { - return toolName, "", nil, nil + return toolName, "", "", nil, nil } // Next field is "as" if there is an alias, otherwise it should be "with" @@ -301,25 +301,39 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str fields = fields[1:] if fields[0] == "as" { if len(fields) < 2 { - return "", "", nil, fmt.Errorf("expected alias after 'as'") + return "", "", "", nil, fmt.Errorf("expected alias after 'as'") } alias = fields[1] fields = fields[2:] } if len(fields) == 0 { // Nothing left, so just return - return originalName, alias, nil, nil + return originalName, alias, "", nil, nil + } + + var checkParam string + if fields[0] == "checked" { + if len(fields) < 3 || fields[1] != "with" { + return "", "", "", nil, fmt.Errorf("expected 'checked with some_value' but got %v", fields) + } + + checkParam = fields[2] + fields = fields[3:] + } + + if len(fields) == 0 { // Nothing left, so just return + return originalName, alias, checkParam, nil, nil } // Next we should have "with" followed by the args if fields[0] != "with" { - return "", "", nil, fmt.Errorf("expected 'with' but got %s", fields[0]) + return "", "", "", nil, fmt.Errorf("expected 'with' but got %s", fields[0]) } fields = fields[1:] // If there are no args, return an error if len(fields) == 0 { - return "", "", nil, fmt.Errorf("expected args after 'with'") + return "", "", "", nil, fmt.Errorf("expected args after 'with'") } args := make(map[string]any) @@ -332,7 +346,7 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str prev = "value" case "value": if field != "as" { - return "", "", nil, fmt.Errorf("expected 'as' but got %s", field) + return "", "", "", nil, fmt.Errorf("expected 'as' but got %s", field) } prev = "as" case "as": @@ -340,14 +354,14 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str prev = "name" case "name": if field != "and" { - return "", "", nil, fmt.Errorf("expected 'and' but got %s", field) + return "", "", "", nil, fmt.Errorf("expected 'and' but got %s", field) } prev = "and" } } if prev == "and" { - return "", "", nil, fmt.Errorf("expected arg name after 'and'") + return "", "", "", nil, fmt.Errorf("expected arg name after 'and'") } // Check and see if any of the arg values are references to an input @@ -360,7 +374,7 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str } } - return originalName, alias, args, nil + return originalName, alias, checkParam, args, nil } func (t Tool) GetToolRefsFromNames(names []string) (result []ToolReference, _ error) { From ed2fc20a9f108adc749c583663932e937e47011c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 5 May 2025 18:23:05 -0400 Subject: [PATCH 239/270] feat: add support for MCP servers (#965) feat: add support for MCP servers Tools can include #!mcp in their instructions with a JSON blob of the configuration. GPTScript will generate tools dynamically for these MCP servers. Signed-off-by: Donnie Adams Co-authored-by: Darren Shepherd --- go.mod | 5 +- go.sum | 10 +- pkg/cli/gptscript.go | 2 +- pkg/engine/engine.go | 21 ++ pkg/loader/loader.go | 55 ++- pkg/loader/openapi_test.go | 28 +- pkg/mcp/loader.go | 312 +++++++++++++++ pkg/mcp/runner.go | 55 +++ pkg/runner/runner.go | 12 + pkg/sdkserver/routes.go | 16 +- pkg/sdkserver/run.go | 6 +- pkg/sdkserver/server.go | 8 + pkg/tests/runner2_test.go | 357 ++++++++++++++++++ .../testdata/TestMCPLoad/call1-resp.golden | 9 + pkg/tests/testdata/TestMCPLoad/call1.golden | 3 + pkg/tests/testdata/TestMCPLoad/step1.golden | 6 + pkg/types/tool.go | 21 +- pkg/types/toolstring.go | 4 + 18 files changed, 897 insertions(+), 33 deletions(-) create mode 100644 pkg/mcp/loader.go create mode 100644 pkg/mcp/runner.go create mode 100644 pkg/tests/testdata/TestMCPLoad/call1-resp.golden create mode 100644 pkg/tests/testdata/TestMCPLoad/call1.golden create mode 100644 pkg/tests/testdata/TestMCPLoad/step1.golden diff --git a/go.mod b/go.mod index f803a3b9..fc68968e 100644 --- a/go.mod +++ b/go.mod @@ -18,10 +18,11 @@ require ( github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 - github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee + github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 + github.com/mark3labs/mcp-go v0.25.0 github.com/mholt/archives v0.1.0 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 @@ -113,6 +114,7 @@ require ( github.com/skeema/knownhosts v1.2.2 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect @@ -122,6 +124,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect diff --git a/go.sum b/go.sum index 74341af5..7ce2cd38 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7J github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 h1:QxLjsLOYlsVLPwuRkP0Q8EcAoZT1s8vU2ZBSX0+R6CI= github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= -github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee h1:70PHW6Xw70yNNZ5aX936XqcMLwNmfMZpCV3FCOGKpxE= -github.com/gptscript-ai/tui v0.0.0-20250204145344-33cd15de4cee/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= +github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 h1:wQC8sKyeGA50WnCEG+Jo5FNRIkuX3HX8d3ubyWCCoI8= +github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -270,6 +270,8 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mark3labs/mcp-go v0.25.0 h1:UUpcMT3L5hIhuDy7aifj4Bphw4Pfx1Rf8mzMXDe8RQw= +github.com/mark3labs/mcp-go v0.25.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -361,6 +363,8 @@ github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -406,6 +410,8 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 4b0642d2..b5a823b2 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -215,7 +215,7 @@ func (r *GPTScript) listTools(ctx context.Context, gptScript *gptscript.GPTScrip // Don't print instructions tool.Instructions = "" - lines = append(lines, tool.String()) + lines = append(lines, tool.Print()) } fmt.Println(strings.Join(lines, "\n---\n")) return nil diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index abf45e8c..c7867512 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -41,6 +41,11 @@ type Engine struct { RuntimeManager RuntimeManager Env []string Progress chan<- types.CompletionStatus + MCPRunner MCPRunner +} + +type MCPRunner interface { + Run(ctx context.Context, progress chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) } type State struct { @@ -307,6 +312,17 @@ func populateMessageParams(ctx Context, completion *types.CompletionRequest, too return nil } +func (e *Engine) runMCPInvoke(ctx Context, tool types.Tool, input string) (*Return, error) { + output, err := e.MCPRunner.Run(ctx.Ctx, e.Progress, tool, input) + if err != nil { + return nil, fmt.Errorf("failed to run MCP invoke: %w", err) + } + + return &Return{ + Result: &output, + }, nil +} + func (e *Engine) runCommandTools(ctx Context, tool types.Tool, input string) (*Return, error) { if tool.IsHTTP() { return e.runHTTP(ctx, tool, input) @@ -342,6 +358,10 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, err error) { } }() + if tool.IsMCPInvoke() { + return e.runMCPInvoke(ctx, tool, input) + } + if tool.IsCommand() { return e.runCommandTools(ctx, tool, input) } @@ -378,6 +398,7 @@ func addUpdateSystem(ctx Context, tool types.Tool, msgs []types.CompletionMessag instructions = append(instructions, context.Content) } + tool.Instructions = strings.TrimPrefix(tool.Instructions, types.PromptPrefix) if tool.Instructions != "" { instructions = append(instructions, tool.Instructions) } diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index e70827c6..626cc87f 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -20,6 +20,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/mcp" "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/parser" "github.com/gptscript-ai/gptscript/pkg/system" @@ -155,7 +156,23 @@ func loadOpenAPI(prg *types.Program, data []byte) *openapi3.T { return openAPIDocument } -func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, targetToolName, defaultModel string) ([]types.Tool, error) { +func processMCP(ctx context.Context, tool []types.Tool, mcpLoader MCPLoader) (result []types.Tool, _ error) { + for _, t := range tool { + if t.IsMCP() { + mcpTools, err := mcpLoader.Load(ctx, t) + if err != nil { + return nil, fmt.Errorf("error loading MCP tools: %w", err) + } + result = append(result, mcpTools...) + } else { + result = append(result, t) + } + } + + return result, nil +} + +func readTool(ctx context.Context, cache *cache.Client, mcp MCPLoader, prg *types.Program, base *source, targetToolName, defaultModel string) ([]types.Tool, error) { data := base.Content var ( @@ -212,6 +229,11 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base return nil, fmt.Errorf("no tools found in %s", base) } + tools, err := processMCP(ctx, tools, mcp) + if err != nil { + return nil, err + } + var ( localTools = types.ToolSet{} targetTools []types.Tool @@ -279,17 +301,17 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base localTools[strings.ToLower(tool.Name)] = tool } - return linkAll(ctx, cache, prg, base, targetTools, localTools, defaultModel) + return linkAll(ctx, cache, mcp, prg, base, targetTools, localTools, defaultModel) } -func linkAll(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tools []types.Tool, localTools types.ToolSet, defaultModel string) (result []types.Tool, _ error) { +func linkAll(ctx context.Context, cache *cache.Client, mcp MCPLoader, prg *types.Program, base *source, tools []types.Tool, localTools types.ToolSet, defaultModel string) (result []types.Tool, _ error) { localToolsMapping := make(map[string]string, len(tools)) for _, localTool := range localTools { localToolsMapping[strings.ToLower(localTool.Name)] = localTool.ID } for _, tool := range tools { - tool, err := link(ctx, cache, prg, base, tool, localTools, localToolsMapping, defaultModel) + tool, err := link(ctx, cache, mcp, prg, base, tool, localTools, localToolsMapping, defaultModel) if err != nil { return nil, err } @@ -298,7 +320,7 @@ func linkAll(ctx context.Context, cache *cache.Client, prg *types.Program, base return } -func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, tool types.Tool, localTools types.ToolSet, localToolsMapping map[string]string, defaultModel string) (types.Tool, error) { +func link(ctx context.Context, cache *cache.Client, mcp MCPLoader, prg *types.Program, base *source, tool types.Tool, localTools types.ToolSet, localToolsMapping map[string]string, defaultModel string) (types.Tool, error) { if existing, ok := prg.ToolSet[tool.ID]; ok { return existing, nil } @@ -323,7 +345,7 @@ func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *so linkedTool = existing } else { var err error - linkedTool, err = link(ctx, cache, prg, base, localTool, localTools, localToolsMapping, defaultModel) + linkedTool, err = link(ctx, cache, mcp, prg, base, localTool, localTools, localToolsMapping, defaultModel) if err != nil { return types.Tool{}, fmt.Errorf("failed linking %s at %s: %w", targetToolName, base, err) } @@ -333,7 +355,7 @@ func link(ctx context.Context, cache *cache.Client, prg *types.Program, base *so toolNames[targetToolName] = struct{}{} } else { toolName, subTool := types.SplitToolRef(targetToolName) - resolvedTools, err := resolve(ctx, cache, prg, base, toolName, subTool, defaultModel) + resolvedTools, err := resolve(ctx, cache, mcp, prg, base, toolName, subTool, defaultModel) if err != nil { return types.Tool{}, fmt.Errorf("failed resolving %s from %s: %w", targetToolName, base, err) } @@ -373,7 +395,7 @@ func ProgramFromSource(ctx context.Context, content, subToolName string, opts .. prg := types.Program{ ToolSet: types.ToolSet{}, } - tools, err := readTool(ctx, opt.Cache, &prg, &source{ + tools, err := readTool(ctx, opt.Cache, opt.MCPLoader, &prg, &source{ Content: []byte(content), Path: locationPath, Name: locationName, @@ -390,6 +412,12 @@ type Options struct { Cache *cache.Client Location string DefaultModel string + MCPLoader MCPLoader +} + +type MCPLoader interface { + Load(ctx context.Context, tool types.Tool) ([]types.Tool, error) + Close() error } func complete(opts ...Options) (result Options) { @@ -397,6 +425,7 @@ func complete(opts ...Options) (result Options) { result.Cache = types.FirstSet(opt.Cache, result.Cache) result.Location = types.FirstSet(opt.Location, result.Location) result.DefaultModel = types.FirstSet(opt.DefaultModel, result.DefaultModel) + result.MCPLoader = types.FirstSet(opt.MCPLoader, result.MCPLoader) } if result.Location == "" { @@ -407,6 +436,10 @@ func complete(opts ...Options) (result Options) { result.DefaultModel = builtin.GetDefaultModel() } + if result.MCPLoader == nil { + result.MCPLoader = mcp.DefaultLoader + } + return } @@ -430,7 +463,7 @@ func Program(ctx context.Context, name, subToolName string, opts ...Options) (ty Name: name, ToolSet: types.ToolSet{}, } - tools, err := resolve(ctx, opt.Cache, &prg, &source{}, name, subToolName, opt.DefaultModel) + tools, err := resolve(ctx, opt.Cache, opt.MCPLoader, &prg, &source{}, name, subToolName, opt.DefaultModel) if err != nil { return types.Program{}, err } @@ -438,7 +471,7 @@ func Program(ctx context.Context, name, subToolName string, opts ...Options) (ty return prg, nil } -func resolve(ctx context.Context, cache *cache.Client, prg *types.Program, base *source, name, subTool, defaultModel string) ([]types.Tool, error) { +func resolve(ctx context.Context, cache *cache.Client, mcp MCPLoader, prg *types.Program, base *source, name, subTool, defaultModel string) ([]types.Tool, error) { if subTool == "" { t, ok := builtin.DefaultModel(name, defaultModel) if ok { @@ -452,7 +485,7 @@ func resolve(ctx context.Context, cache *cache.Client, prg *types.Program, base return nil, err } - result, err := readTool(ctx, cache, prg, s, subTool, defaultModel) + result, err := readTool(ctx, cache, mcp, prg, s, subTool, defaultModel) if err != nil { return nil, err } diff --git a/pkg/loader/openapi_test.go b/pkg/loader/openapi_test.go index 423246d1..594d8cf7 100644 --- a/pkg/loader/openapi_test.go +++ b/pkg/loader/openapi_test.go @@ -26,7 +26,7 @@ func TestLoadOpenAPI(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err, "failed to read openapi v3") require.Equal(t, 3, numOpenAPITools(prgv3.ToolSet), "expected 3 openapi tools") @@ -35,7 +35,7 @@ func TestLoadOpenAPI(t *testing.T) { } datav2, err := os.ReadFile("testdata/openapi_v2.json") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2json, &source{Content: datav2}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv2json, &source{Content: datav2}, "", "") require.NoError(t, err, "failed to read openapi v2") require.Equal(t, 3, numOpenAPITools(prgv2json.ToolSet), "expected 3 openapi tools") @@ -44,7 +44,7 @@ func TestLoadOpenAPI(t *testing.T) { } datav2, err = os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2yaml, &source{Content: datav2}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv2yaml, &source{Content: datav2}, "", "") require.NoError(t, err, "failed to read openapi v2 (yaml)") require.Equal(t, 3, numOpenAPITools(prgv2yaml.ToolSet), "expected 3 openapi tools") @@ -57,7 +57,7 @@ func TestOpenAPIv3(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -69,7 +69,7 @@ func TestOpenAPIv3NoOperationIDs(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -81,7 +81,7 @@ func TestOpenAPIv2(t *testing.T) { } datav2, err := os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv2, &source{Content: datav2}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) @@ -94,7 +94,7 @@ func TestOpenAPIv3Revamp(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -107,7 +107,7 @@ func TestOpenAPIv3NoOperationIDsRevamp(t *testing.T) { } datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv3, &source{Content: datav3}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) @@ -120,8 +120,18 @@ func TestOpenAPIv2Revamp(t *testing.T) { } datav2, err := os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "", "") + _, err = readTool(context.Background(), nil, fakeMCPLoader{}, &prgv2, &source{Content: datav2}, "", "") require.NoError(t, err) autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) } + +type fakeMCPLoader struct{} + +func (fakeMCPLoader) Load(context.Context, types.Tool) ([]types.Tool, error) { + return nil, nil +} + +func (fakeMCPLoader) Close() error { + return nil +} diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go new file mode 100644 index 00000000..0eb713e5 --- /dev/null +++ b/pkg/mcp/loader.go @@ -0,0 +1,312 @@ +package mcp + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "maps" + "slices" + "strings" + "sync" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/mvl" + "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/gptscript-ai/gptscript/pkg/version" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/mcp" +) + +var ( + DefaultLoader = &Local{} + DefaultRunner = DefaultLoader + + logger = mvl.Package() +) + +type Local struct { + lock sync.Mutex + sessions map[string]*Session + sessionCtx context.Context + cancel context.CancelFunc +} + +type Session struct { + ID string + InitResult *mcp.InitializeResult + Client client.MCPClient + Config ServerConfig +} + +type Config struct { + MCPServers map[string]ServerConfig `json:"mcpServers"` +} + +// ServerConfig represents an MCP server configuration for tools calls. +// It is important that this type doesn't have any maps. +type ServerConfig struct { + DisableInstruction bool `json:"disableInstruction"` + Command string `json:"command"` + Args []string `json:"args"` + Env []string `json:"env"` + Server string `json:"server"` + URL string `json:"url"` + BaseURL string `json:"baseURL,omitempty"` + Headers []string `json:"headers"` + Scope string `json:"scope"` +} + +func (s *ServerConfig) GetBaseURL() string { + if s.BaseURL != "" { + return s.BaseURL + } + if s.Server != "" { + return s.Server + } + return s.URL +} + +func (l *Local) Load(ctx context.Context, tool types.Tool) (result []types.Tool, _ error) { + if !tool.IsMCP() { + return nil, nil + } + + _, configData, _ := strings.Cut(tool.Instructions, "\n") + + var servers Config + if err := json.Unmarshal([]byte(strings.TrimSpace(configData)), &servers); err != nil { + return nil, fmt.Errorf("failed to parse MCP configuration: %w\n%s", err, configData) + } + + if len(servers.MCPServers) == 0 { + // Try to load just one server + var server ServerConfig + if err := json.Unmarshal([]byte(strings.TrimSpace(configData)), &server); err != nil { + return nil, fmt.Errorf("failed to parse single MCP server configuration: %w\n%s", err, configData) + } + if server.Command == "" && server.URL == "" && server.Server == "" { + return nil, fmt.Errorf("no MCP server configuration found in tool instructions: %s", configData) + } + servers.MCPServers = map[string]ServerConfig{ + "default": server, + } + } + + if len(servers.MCPServers) > 1 { + return nil, fmt.Errorf("only a single MCP server definition is supported") + } + + for server := range maps.Keys(servers.MCPServers) { + session, err := l.loadSession(servers.MCPServers[server]) + if err != nil { + return nil, fmt.Errorf("failed to load MCP session for server %s: %w", server, err) + } + + return l.sessionToTools(ctx, session, tool.Name) + } + + // This should never happen, but just in case + return nil, fmt.Errorf("no MCP server configuration found in tool instructions: %s", configData) +} + +func (l *Local) Close() error { + if l == nil { + return nil + } + + l.lock.Lock() + defer l.lock.Unlock() + + if l.sessionCtx == nil { + return nil + } + + defer func() { + l.cancel() + l.sessionCtx = nil + }() + + var errs []error + for id, session := range l.sessions { + logger.Infof("closing MCP session %s", id) + if err := session.Client.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close MCP client %s: %w", id, err)) + } + } + + return errors.Join(errs...) +} + +func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName string) ([]types.Tool, error) { + tools, err := session.Client.ListTools(ctx, mcp.ListToolsRequest{}) + if err != nil { + return nil, fmt.Errorf("failed to list tools: %w", err) + } + + toolDefs := []types.Tool{{ /* this is a placeholder for main tool */ }} + var toolNames []string + + for _, tool := range tools.Tools { + var schema openapi3.Schema + + schemaData, err := json.Marshal(tool.InputSchema) + if err != nil { + panic(err) + } + + if tool.Name == "" { + // I dunno, bad tool? + continue + } + + if err := json.Unmarshal(schemaData, &schema); err != nil { + return nil, fmt.Errorf("failed to unmarshal tool input schema: %w", err) + } + + annotations, err := json.Marshal(tool.Annotations) + if err != nil { + return nil, fmt.Errorf("failed to marshal tool annotations: %w", err) + } + + toolDef := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: tool.Name, + Description: tool.Description, + Arguments: &schema, + }, + Instructions: types.MCPInvokePrefix + tool.Name + " " + session.ID, + }, + } + + if string(annotations) != "{}" { + toolDef.MetaData = map[string]string{ + "mcp-tool-annotations": string(annotations), + } + } + + if tool.Annotations.Title != "" && !slices.Contains(strings.Fields(tool.Annotations.Title), "as") { + toolDef.Name = tool.Annotations.Title + " as " + tool.Name + } + + toolDefs = append(toolDefs, toolDef) + toolNames = append(toolNames, tool.Name) + } + + main := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: toolName, + Description: session.InitResult.ServerInfo.Name, + Export: toolNames, + }, + MetaData: map[string]string{ + "bundle": "true", + }, + }, + } + + if session.InitResult.Instructions != "" { + data, _ := json.Marshal(map[string]any{ + "tools": toolNames, + "instructions": session.InitResult.Instructions, + }) + toolDefs = append(toolDefs, types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: session.ID, + Type: "context", + }, + Instructions: types.EchoPrefix + "\n" + `# START MCP SERVER INFO: ` + session.InitResult.ServerInfo.Name + "\n" + + `You have available the following tools from an MCP Server that has provided the following additional instructions` + "\n" + + string(data) + "\n" + + `# END MCP SERVER INFO` + "\n", + }, + }) + + main.ExportContext = append(main.ExportContext, session.ID) + } + + toolDefs[0] = main + return toolDefs, nil +} + +func (l *Local) loadSession(server ServerConfig) (*Session, error) { + id := hash.Digest(server) + l.lock.Lock() + existing, ok := l.sessions[id] + if l.sessionCtx == nil { + l.sessionCtx, l.cancel = context.WithCancel(context.Background()) + } + ctx := l.sessionCtx + l.lock.Unlock() + + if ok { + return existing, nil + } + + var ( + c *client.Client + err error + ) + if server.Command != "" { + c, err = client.NewStdioMCPClient(server.Command, server.Env, server.Args...) + if err != nil { + return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) + } + } else { + url := server.URL + if url == "" { + url = server.Server + } + + headers := make(map[string]string, len(server.Headers)) + for _, h := range server.Headers { + k, v, _ := strings.Cut(h, "=") + headers[k] = v + } + + c, err = client.NewSSEMCPClient(url, client.WithHeaders(headers)) + if err != nil { + return nil, fmt.Errorf("failed to create MCP HTTP client: %w", err) + } + + // We expect the client to outlive this one request. + if err = c.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start MCP client: %w", err) + } + } + + var initRequest mcp.InitializeRequest + initRequest.Params.ClientInfo = mcp.Implementation{ + Name: version.ProgramName, + Version: version.Get().String(), + } + + initResult, err := c.Initialize(ctx, initRequest) + if err != nil { + return nil, fmt.Errorf("failed to initialize MCP client: %w", err) + } + + result := &Session{ + ID: id, + InitResult: initResult, + Client: c, + Config: server, + } + + l.lock.Lock() + defer l.lock.Unlock() + + if existing, ok = l.sessions[id]; ok { + return existing, c.Close() + } + + if l.sessions == nil { + l.sessions = make(map[string]*Session) + } + l.sessions[id] = result + return result, nil +} diff --git a/pkg/mcp/runner.go b/pkg/mcp/runner.go new file mode 100644 index 00000000..448d58a7 --- /dev/null +++ b/pkg/mcp/runner.go @@ -0,0 +1,55 @@ +package mcp + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/mark3labs/mcp-go/mcp" +) + +func (l *Local) Run(ctx context.Context, _ chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) { + fields := strings.Fields(tool.Instructions) + if len(fields) < 2 { + return "", fmt.Errorf("invalid mcp call, invalid number of fields in %s", tool.Instructions) + } + + id := fields[1] + toolName, ok := strings.CutPrefix(fields[0], types.MCPInvokePrefix) + if !ok { + return "", fmt.Errorf("invalid mcp call, invalid tool name in %s", tool.Instructions) + } + + arguments := map[string]any{} + + if input != "" { + if err := json.Unmarshal([]byte(input), &arguments); err != nil { + return "", fmt.Errorf("failed to unmarshal input: %w", err) + } + } + + l.lock.Lock() + session, ok := l.sessions[id] + l.lock.Unlock() + if !ok { + return "", fmt.Errorf("session not found for MCP server %s", id) + } + + request := mcp.CallToolRequest{} + request.Params.Name = toolName + request.Params.Arguments = arguments + + result, err := session.Client.CallTool(ctx, request) + if err != nil { + return "", fmt.Errorf("failed to call tool %s: %w", toolName, err) + } + + str, err := json.Marshal(result) + if err != nil { + return "", fmt.Errorf("failed to marshal result: %w", err) + } + + return string(str), nil +} diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 6d4e7598..200c453b 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -14,6 +14,7 @@ import ( context2 "github.com/gptscript-ai/gptscript/pkg/context" "github.com/gptscript-ai/gptscript/pkg/credentials" "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/mcp" "github.com/gptscript-ai/gptscript/pkg/types" "golang.org/x/exp/maps" ) @@ -37,6 +38,7 @@ type Options struct { CredentialOverrides []string `usage:"-"` Sequential bool `usage:"-"` Authorizer AuthorizerFunc `usage:"-"` + MCPRunner engine.MCPRunner `usage:"-"` } type RunOptions struct { @@ -69,6 +71,9 @@ func Complete(opts ...Options) (result Options) { if opt.CredentialOverrides != nil { result.CredentialOverrides = append(result.CredentialOverrides, opt.CredentialOverrides...) } + if opt.MCPRunner != nil { + result.MCPRunner = opt.MCPRunner + } } return } @@ -87,6 +92,9 @@ func complete(opts ...Options) Options { if result.Authorizer == nil { result.Authorizer = DefaultAuthorizer } + if result.MCPRunner == nil { + result.MCPRunner = mcp.DefaultRunner + } return result } @@ -99,6 +107,7 @@ type Runner struct { credOverrides []string credStore credentials.CredentialStore sequential bool + mcpRunner engine.MCPRunner } func New(client engine.Model, credStore credentials.CredentialStore, opts ...Options) (*Runner, error) { @@ -113,6 +122,7 @@ func New(client engine.Model, credStore credentials.CredentialStore, opts ...Opt credStore: credStore, sequential: opt.Sequential, auth: opt.Authorizer, + mcpRunner: opt.MCPRunner, } if opt.StartPort != 0 { @@ -326,6 +336,7 @@ func (r *Runner) start(callCtx engine.Context, state *State, monitor Monitor, en e := engine.Engine{ Model: r.c, + MCPRunner: r.mcpRunner, RuntimeManager: runtimeWithLogger(callCtx, monitor, r.runtimeManager), Progress: progress, Env: env, @@ -524,6 +535,7 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s e := engine.Engine{ Model: r.c, + MCPRunner: r.mcpRunner, RuntimeManager: runtimeWithLogger(callCtx, monitor, r.runtimeManager), Progress: progress, Env: env, diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 1a4e28ea..52a06994 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -29,6 +29,7 @@ type server struct { datasetTool, workspaceTool string serverToolsEnv []string client *gptscript.GPTScript + mcpLoader loader.MCPLoader events *broadcaster.Broadcaster[event] runtimeManager engine.RuntimeManager @@ -283,11 +284,20 @@ func (s *server) load(w http.ResponseWriter, r *http.Request) { } if reqObject.Content != "" { - prg, err = loader.ProgramFromSource(ctx, reqObject.Content, reqObject.SubTool, loader.Options{Cache: s.client.Cache}) + prg, err = loader.ProgramFromSource(ctx, reqObject.Content, reqObject.SubTool, loader.Options{ + Cache: s.client.Cache, + MCPLoader: s.mcpLoader, + }) } else if reqObject.File != "" { - prg, err = loader.Program(ctx, reqObject.File, reqObject.SubTool, loader.Options{Cache: s.client.Cache}) + prg, err = loader.Program(ctx, reqObject.File, reqObject.SubTool, loader.Options{ + Cache: s.client.Cache, + MCPLoader: s.mcpLoader, + }) } else { - prg, err = loader.ProgramFromSource(ctx, reqObject.ToolDefs.String(), reqObject.SubTool, loader.Options{Cache: s.client.Cache}) + prg, err = loader.ProgramFromSource(ctx, reqObject.ToolDefs.String(), reqObject.SubTool, loader.Options{ + Cache: s.client.Cache, + MCPLoader: s.mcpLoader, + }) } if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index fda4a215..a2c0d505 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -36,7 +36,11 @@ func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, lo if defaultModel == "" { defaultModel = s.gptscriptOpts.OpenAI.DefaultModel } - prg, err := programLoader(ctx, toolDef.String(), subTool, loader.Options{Cache: g.Cache, DefaultModel: defaultModel}) + prg, err := programLoader(ctx, toolDef.String(), subTool, loader.Options{ + Cache: g.Cache, + DefaultModel: defaultModel, + MCPLoader: s.mcpLoader, + }) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to load program: %w", err)) return diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index f15cc68f..52e9ec1c 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -16,6 +16,8 @@ import ( "github.com/google/uuid" "github.com/gptscript-ai/broadcaster" "github.com/gptscript-ai/gptscript/pkg/gptscript" + "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/mcp" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" "github.com/gptscript-ai/gptscript/pkg/runner" @@ -26,6 +28,7 @@ import ( type Options struct { gptscript.Options + MCPLoader loader.MCPLoader ListenAddress string DatasetTool, WorkspaceTool string ServerToolsEnv []string @@ -114,6 +117,7 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { serverToolsEnv: opts.ServerToolsEnv, client: g, + mcpLoader: opts.MCPLoader, events: events, runtimeManager: runtimes.Default(opts.Cache.CacheDir, opts.SystemToolsDir), waitingToConfirm: make(map[string]chan runner.AuthorizerResponse), @@ -168,6 +172,7 @@ func complete(opts ...Options) Options { result.WorkspaceTool = types.FirstSet(opt.WorkspaceTool, result.WorkspaceTool) result.Debug = types.FirstSet(opt.Debug, result.Debug) result.DisableServerErrorLogging = types.FirstSet(opt.DisableServerErrorLogging, result.DisableServerErrorLogging) + result.MCPLoader = types.FirstSet(opt.MCPLoader, result.MCPLoader) } if result.ListenAddress == "" { @@ -183,6 +188,9 @@ func complete(opts ...Options) Options { if len(result.ServerToolsEnv) == 0 { result.ServerToolsEnv = os.Environ() } + if result.MCPLoader == nil { + result.MCPLoader = mcp.DefaultLoader + } return result } diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index f5de8e10..c531c661 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -3,11 +3,13 @@ package tests import ( "context" "encoding/json" + "runtime" "testing" "github.com/gptscript-ai/gptscript/pkg/loader" "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/gptscript-ai/gptscript/pkg/tests/tester" + "github.com/gptscript-ai/gptscript/pkg/types" "github.com/hexops/autogold/v2" "github.com/stretchr/testify/require" ) @@ -203,3 +205,358 @@ echo "${GPTSCRIPT_INPUT}" require.NoError(t, err) autogold.Expect(map[string]interface{}{"foo": "baz", "start": true}).Equal(t, data) } + +func TestMCPLoad(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on Windows") + } + + r := tester.NewRunner(t) + prg, err := loader.ProgramFromSource(context.Background(), ` +name: mcp + +#!mcp + +{ + "mcpServers": { + "sqlite": { + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "-v", + "mcp-test:/mcp", + "mcp/sqlite@sha256:007ccae941a6f6db15b26ee41d92edda50ce157176d9273449e8b3f51d979c70", + "--db-path", + "/mcp/test.db" + ] + } + } +} +`, "") + require.NoError(t, err) + + autogold.Expect(types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "mcp", + Description: "sqlite", + ModelName: "gpt-4o", + Export: []string{ + "read_query", + "write_query", + "create_table", + "list_tables", + "describe_table", + "append_insight", + }, + }, + MetaData: map[string]string{"bundle": "true"}, + }, + ID: "inline:mcp", + ToolMapping: map[string][]types.ToolReference{ + "append_insight": {{ + Reference: "append_insight", + ToolID: "inline:append_insight", + }}, + "create_table": {{ + Reference: "create_table", + ToolID: "inline:create_table", + }}, + "describe_table": {{ + Reference: "describe_table", + ToolID: "inline:describe_table", + }}, + "list_tables": {{ + Reference: "list_tables", + ToolID: "inline:list_tables", + }}, + "read_query": {{ + Reference: "read_query", + ToolID: "inline:read_query", + }}, + "write_query": {{ + Reference: "write_query", + ToolID: "inline:write_query", + }}, + }, + LocalTools: map[string]string{ + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query", + }, + Source: types.ToolSource{Location: "inline"}, + WorkingDir: ".", + }).Equal(t, prg.ToolSet[prg.EntryToolID]) + autogold.Expect(7).Equal(t, len(prg.ToolSet[prg.EntryToolID].LocalTools)) + data, _ := json.MarshalIndent(prg.ToolSet, "", " ") + autogold.Expect(`{ + "inline:append_insight": { + "name": "append_insight", + "description": "Add a business insight to the memo", + "modelName": "gpt-4o", + "internalPrompt": null, + "arguments": { + "properties": { + "insight": { + "description": "Business insight discovered from data analysis", + "type": "string" + } + }, + "required": [ + "insight" + ], + "type": "object" + }, + "instructions": "#!sys.mcp.invoke.append_insight 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "id": "inline:append_insight", + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + }, + "inline:create_table": { + "name": "create_table", + "description": "Create a new table in the SQLite database", + "modelName": "gpt-4o", + "internalPrompt": null, + "arguments": { + "properties": { + "query": { + "description": "CREATE TABLE SQL statement", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "instructions": "#!sys.mcp.invoke.create_table 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "id": "inline:create_table", + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + }, + "inline:describe_table": { + "name": "describe_table", + "description": "Get the schema information for a specific table", + "modelName": "gpt-4o", + "internalPrompt": null, + "arguments": { + "properties": { + "table_name": { + "description": "Name of the table to describe", + "type": "string" + } + }, + "required": [ + "table_name" + ], + "type": "object" + }, + "instructions": "#!sys.mcp.invoke.describe_table 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "id": "inline:describe_table", + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + }, + "inline:list_tables": { + "name": "list_tables", + "description": "List all tables in the SQLite database", + "modelName": "gpt-4o", + "internalPrompt": null, + "arguments": { + "type": "object" + }, + "instructions": "#!sys.mcp.invoke.list_tables 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "id": "inline:list_tables", + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + }, + "inline:mcp": { + "name": "mcp", + "description": "sqlite", + "modelName": "gpt-4o", + "internalPrompt": null, + "export": [ + "read_query", + "write_query", + "create_table", + "list_tables", + "describe_table", + "append_insight" + ], + "metaData": { + "bundle": "true" + }, + "id": "inline:mcp", + "toolMapping": { + "append_insight": [ + { + "reference": "append_insight", + "toolID": "inline:append_insight" + } + ], + "create_table": [ + { + "reference": "create_table", + "toolID": "inline:create_table" + } + ], + "describe_table": [ + { + "reference": "describe_table", + "toolID": "inline:describe_table" + } + ], + "list_tables": [ + { + "reference": "list_tables", + "toolID": "inline:list_tables" + } + ], + "read_query": [ + { + "reference": "read_query", + "toolID": "inline:read_query" + } + ], + "write_query": [ + { + "reference": "write_query", + "toolID": "inline:write_query" + } + ] + }, + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + }, + "inline:read_query": { + "name": "read_query", + "description": "Execute a SELECT query on the SQLite database", + "modelName": "gpt-4o", + "internalPrompt": null, + "arguments": { + "properties": { + "query": { + "description": "SELECT SQL query to execute", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "instructions": "#!sys.mcp.invoke.read_query 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "id": "inline:read_query", + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + }, + "inline:write_query": { + "name": "write_query", + "description": "Execute an INSERT, UPDATE, or DELETE query on the SQLite database", + "modelName": "gpt-4o", + "internalPrompt": null, + "arguments": { + "properties": { + "query": { + "description": "SQL query to execute", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + }, + "instructions": "#!sys.mcp.invoke.write_query 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "id": "inline:write_query", + "localTools": { + "append_insight": "inline:append_insight", + "create_table": "inline:create_table", + "describe_table": "inline:describe_table", + "list_tables": "inline:list_tables", + "mcp": "inline:mcp", + "read_query": "inline:read_query", + "write_query": "inline:write_query" + }, + "source": { + "location": "inline" + }, + "workingDir": "." + } +}`).Equal(t, string(data)) + + prg.EntryToolID = prg.ToolSet[prg.EntryToolID].LocalTools["read_query"] + resp, err := r.Chat(context.Background(), nil, prg, nil, `{"query": "SELECT 1"}`, runner.RunOptions{}) + r.AssertStep(t, resp, err) +} diff --git a/pkg/tests/testdata/TestMCPLoad/call1-resp.golden b/pkg/tests/testdata/TestMCPLoad/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestMCPLoad/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestMCPLoad/call1.golden b/pkg/tests/testdata/TestMCPLoad/call1.golden new file mode 100644 index 00000000..31048a88 --- /dev/null +++ b/pkg/tests/testdata/TestMCPLoad/call1.golden @@ -0,0 +1,3 @@ +`{ + "model": "gpt-4o" +}` diff --git a/pkg/tests/testdata/TestMCPLoad/step1.golden b/pkg/tests/testdata/TestMCPLoad/step1.golden new file mode 100644 index 00000000..ae20c8ed --- /dev/null +++ b/pkg/tests/testdata/TestMCPLoad/step1.golden @@ -0,0 +1,6 @@ +`{ + "done": true, + "content": "{\"content\":[{\"type\":\"text\",\"text\":\"[{'1': 1}]\"}]}", + "toolID": "", + "state": null +}` diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 3d48c6e1..10b47c77 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -16,11 +16,14 @@ import ( ) const ( - DaemonPrefix = "#!sys.daemon" - OpenAPIPrefix = "#!sys.openapi" - EchoPrefix = "#!sys.echo" - CallPrefix = "#!sys.call" - CommandPrefix = "#!" + DaemonPrefix = "#!sys.daemon" + OpenAPIPrefix = "#!sys.openapi" + EchoPrefix = "#!sys.echo" + CallPrefix = "#!sys.call" + MCPPrefix = "#!mcp" + MCPInvokePrefix = "#!sys.mcp.invoke." + CommandPrefix = "#!" + PromptPrefix = "!!" ) var ( @@ -876,6 +879,14 @@ func (t Tool) IsDaemon() bool { return strings.HasPrefix(t.Instructions, DaemonPrefix) } +func (t Tool) IsMCP() bool { + return strings.HasPrefix(t.Instructions, MCPPrefix) +} + +func (t Tool) IsMCPInvoke() bool { + return strings.HasPrefix(t.Instructions, MCPInvokePrefix) +} + func (t Tool) IsOpenAPI() bool { return strings.HasPrefix(t.Instructions, OpenAPIPrefix) } diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index b5e0d1d5..8d379f14 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -44,6 +44,10 @@ func ToDisplayText(tool Tool, input string) string { } func ToSysDisplayString(id string, args map[string]string) (string, error) { + if suffix, ok := strings.CutPrefix(id, MCPInvokePrefix); ok { + return fmt.Sprintf("Invoking MCP `%s`", suffix), nil + } + switch id { case "sys.append": return fmt.Sprintf("Appending to file `%s`", args["filename"]), nil From 43aca8fc57e3152d4f4d11b2888c26258b0a18ca Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 7 May 2025 18:12:16 -0400 Subject: [PATCH 240/270] chore: separate the LoadSession function in the MCP loader (#966) enhance: add the ability to specify allow tools for MCP servers Signed-off-by: Donnie Adams --- pkg/mcp/loader.go | 26 +++++++++++++++++++++++--- pkg/tests/runner2_test.go | 12 ++++++------ 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 0eb713e5..72c0ebf4 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -56,6 +56,7 @@ type ServerConfig struct { BaseURL string `json:"baseURL,omitempty"` Headers []string `json:"headers"` Scope string `json:"scope"` + AllowedTools []string `json:"allowedTools"` } func (s *ServerConfig) GetBaseURL() string { @@ -99,18 +100,31 @@ func (l *Local) Load(ctx context.Context, tool types.Tool) (result []types.Tool, } for server := range maps.Keys(servers.MCPServers) { - session, err := l.loadSession(servers.MCPServers[server]) + tools, err := l.LoadTools(ctx, servers.MCPServers[server], tool.Name) if err != nil { return nil, fmt.Errorf("failed to load MCP session for server %s: %w", server, err) } - return l.sessionToTools(ctx, session, tool.Name) + return tools, nil } // This should never happen, but just in case return nil, fmt.Errorf("no MCP server configuration found in tool instructions: %s", configData) } +func (l *Local) LoadTools(ctx context.Context, server ServerConfig, toolName string) ([]types.Tool, error) { + allowedTools := server.AllowedTools + // Reset so we don't start a new MCP server, no reason to if one is already running and the allowed tools change. + server.AllowedTools = nil + + session, err := l.loadSession(server) + if err != nil { + return nil, err + } + + return l.sessionToTools(ctx, session, toolName, allowedTools) +} + func (l *Local) Close() error { if l == nil { return nil @@ -139,7 +153,9 @@ func (l *Local) Close() error { return errors.Join(errs...) } -func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName string) ([]types.Tool, error) { +func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName string, allowedTools []string) ([]types.Tool, error) { + allToolsAllowed := len(allowedTools) == 0 || slices.Contains(allowedTools, "*") + tools, err := session.Client.ListTools(ctx, mcp.ListToolsRequest{}) if err != nil { return nil, fmt.Errorf("failed to list tools: %w", err) @@ -149,6 +165,10 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s var toolNames []string for _, tool := range tools.Tools { + if !allToolsAllowed && !slices.Contains(allowedTools, tool.Name) { + continue + } + var schema openapi3.Schema schemaData, err := json.Marshal(tool.InputSchema) diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index c531c661..75253a7b 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -313,7 +313,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.append_insight 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "instructions": "#!sys.mcp.invoke.append_insight c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", "id": "inline:append_insight", "localTools": { "append_insight": "inline:append_insight", @@ -346,7 +346,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.create_table 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "instructions": "#!sys.mcp.invoke.create_table c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", "id": "inline:create_table", "localTools": { "append_insight": "inline:append_insight", @@ -379,7 +379,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.describe_table 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "instructions": "#!sys.mcp.invoke.describe_table c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", "id": "inline:describe_table", "localTools": { "append_insight": "inline:append_insight", @@ -403,7 +403,7 @@ name: mcp "arguments": { "type": "object" }, - "instructions": "#!sys.mcp.invoke.list_tables 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "instructions": "#!sys.mcp.invoke.list_tables c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", "id": "inline:list_tables", "localTools": { "append_insight": "inline:append_insight", @@ -505,7 +505,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.read_query 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "instructions": "#!sys.mcp.invoke.read_query c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", "id": "inline:read_query", "localTools": { "append_insight": "inline:append_insight", @@ -538,7 +538,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.write_query 607ca64476abf0288ef49061557243e43735fd4de4bc5fdcd51d93049ffa023e", + "instructions": "#!sys.mcp.invoke.write_query c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", "id": "inline:write_query", "localTools": { "append_insight": "inline:append_insight", From 6edde57ce3185964eb711cadddc3bdd61d1137db Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 9 May 2025 16:46:35 -0400 Subject: [PATCH 241/270] enhance: add the ability to close an individual MCP server (#967) Signed-off-by: Donnie Adams --- pkg/engine/engine.go | 3 +++ pkg/mcp/loader.go | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index c7867512..e3ff930a 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -350,6 +350,9 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, err error) { } select { case <-ctx.userCancel: + if ret == nil { + ret = new(Return) + } if ret.Result == nil { ret.Result = new(string) } diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 72c0ebf4..86e2f8d2 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -125,6 +125,32 @@ func (l *Local) LoadTools(ctx context.Context, server ServerConfig, toolName str return l.sessionToTools(ctx, session, toolName, allowedTools) } +func (l *Local) ShutdownServer(server ServerConfig) error { + if l == nil { + return nil + } + + id := hash.Digest(server) + + l.lock.Lock() + + if l.sessionCtx == nil { + l.lock.Unlock() + return nil + } + + session := l.sessions[id] + delete(l.sessions, id) + + l.lock.Unlock() + + if session == nil { + return nil + } + + return session.Client.Close() +} + func (l *Local) Close() error { if l == nil { return nil From b98000c54e2aa6dc224a8da6c0db3932d96e0e91 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 13 May 2025 16:45:28 -0400 Subject: [PATCH 242/270] fix: correct the ordering of the as tool name aliases for MCP servers (#970) Signed-off-by: Donnie Adams --- pkg/mcp/loader.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 86e2f8d2..b3441719 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -234,11 +234,12 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s } if tool.Annotations.Title != "" && !slices.Contains(strings.Fields(tool.Annotations.Title), "as") { - toolDef.Name = tool.Annotations.Title + " as " + tool.Name + toolNames = append(toolNames, tool.Name+" as "+tool.Annotations.Title) + } else { + toolNames = append(toolNames, tool.Name) } toolDefs = append(toolDefs, toolDef) - toolNames = append(toolNames, tool.Name) } main := types.Tool{ From 9b2832d2421a1165284182251bd1e60c77705539 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 15 May 2025 12:28:46 -0400 Subject: [PATCH 243/270] chore: switch from gob to JSON encoding for digests (#971) The gob encoder is dependent on the order in which it sees types. So, even encoding a completely new type could change the "digest" of a type with the same values. This is not in the flavor or producing a digest. This change switches to JSON, which will produce a consistent encoding. Signed-off-by: Donnie Adams --- pkg/hash/sha256.go | 4 ++-- pkg/tests/runner2_test.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/hash/sha256.go b/pkg/hash/sha256.go index 05209e44..93aee56a 100644 --- a/pkg/hash/sha256.go +++ b/pkg/hash/sha256.go @@ -2,8 +2,8 @@ package hash import ( "crypto/sha256" - "encoding/gob" "encoding/hex" + "encoding/json" ) func ID(parts ...string) string { @@ -26,7 +26,7 @@ func Digest(obj any) string { case string: hash.Write([]byte(v)) default: - if err := gob.NewEncoder(hash).Encode(obj); err != nil { + if err := json.NewEncoder(hash).Encode(obj); err != nil { panic(err) } } diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 75253a7b..52098cf1 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -313,7 +313,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.append_insight c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", + "instructions": "#!sys.mcp.invoke.append_insight e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:append_insight", "localTools": { "append_insight": "inline:append_insight", @@ -346,7 +346,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.create_table c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", + "instructions": "#!sys.mcp.invoke.create_table e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:create_table", "localTools": { "append_insight": "inline:append_insight", @@ -379,7 +379,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.describe_table c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", + "instructions": "#!sys.mcp.invoke.describe_table e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:describe_table", "localTools": { "append_insight": "inline:append_insight", @@ -403,7 +403,7 @@ name: mcp "arguments": { "type": "object" }, - "instructions": "#!sys.mcp.invoke.list_tables c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", + "instructions": "#!sys.mcp.invoke.list_tables e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:list_tables", "localTools": { "append_insight": "inline:append_insight", @@ -505,7 +505,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.read_query c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", + "instructions": "#!sys.mcp.invoke.read_query e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:read_query", "localTools": { "append_insight": "inline:append_insight", @@ -538,7 +538,7 @@ name: mcp ], "type": "object" }, - "instructions": "#!sys.mcp.invoke.write_query c358c2eb93fa9a98631cd9e4f324d7b59f56aee11c7ae32a00984ad5844dc32c", + "instructions": "#!sys.mcp.invoke.write_query e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:write_query", "localTools": { "append_insight": "inline:append_insight", From 20f384dedc5e4ce0fed985d3e5d217e9d8cb251e Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 16 May 2025 10:48:34 -0400 Subject: [PATCH 244/270] enhance: allocate a random token for each daemon tool (#972) Signed-off-by: Grant Linville --- pkg/engine/daemon.go | 50 ++++++++++++++++++++++++++++++++++++-------- pkg/engine/http.go | 11 ++++++++-- 2 files changed, 50 insertions(+), 11 deletions(-) diff --git a/pkg/engine/daemon.go b/pkg/engine/daemon.go index 58de592b..3a0ecba6 100644 --- a/pkg/engine/daemon.go +++ b/pkg/engine/daemon.go @@ -11,6 +11,8 @@ import ( "sync" "time" + cryptorand "crypto/rand" + "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -19,6 +21,7 @@ var ports Ports type Ports struct { daemonPorts map[string]int64 + daemonTokens map[string]string daemonsRunning map[string]func() daemonLock sync.Mutex @@ -119,7 +122,30 @@ func getPath(instructions string) (string, string) { return strings.TrimSpace(rest), strings.TrimSpace(value) } -func (e *Engine) startDaemon(tool types.Tool) (string, error) { +func getDaemonToken(toolID string) (string, error) { + token, ok := ports.daemonTokens[toolID] + if !ok { + // Generate a new token. + tokenBytes := make([]byte, 50) + count, err := cryptorand.Read(tokenBytes) + if err != nil { + return "", fmt.Errorf("failed to generate daemon token: %w", err) + } else if count != len(tokenBytes) { + return "", fmt.Errorf("failed to generate daemon token") + } + + token = fmt.Sprintf("%x", tokenBytes) + + if ports.daemonTokens == nil { + ports.daemonTokens = map[string]string{} + } + ports.daemonTokens[toolID] = token + } + + return token, nil +} + +func (e *Engine) startDaemon(tool types.Tool) (string, string, error) { ports.daemonLock.Lock() defer ports.daemonLock.Unlock() @@ -127,10 +153,15 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { instructions, path := getPath(instructions) tool.Instructions = types.CommandPrefix + instructions + token, err := getDaemonToken(tool.ID) + if err != nil { + return "", "", err + } + port, ok := ports.daemonPorts[tool.ID] url := fmt.Sprintf("http://127.0.0.1:%d%s", port, path) if ok && ports.daemonsRunning[url] != nil { - return url, nil + return url, token, nil } if ports.daemonCtx == nil { @@ -149,18 +180,19 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { cmd, stop, err := e.newCommand(ctx, []string{ fmt.Sprintf("PORT=%d", port), fmt.Sprintf("GPTSCRIPT_PORT=%d", port), + fmt.Sprintf("GPTSCRIPT_DAEMON_TOKEN=%s", token), }, tool, "{}", false, ) if err != nil { - return url, err + return url, "", err } r, w, err := os.Pipe() if err != nil { - return "", err + return "", "", err } // Loop back to gptscript to help with process supervision @@ -178,7 +210,7 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { log.Infof("launched [%s][%s] port [%d] %v", tool.Name, tool.ID, port, cmd.Args) if err := cmd.Start(); err != nil { stop() - return url, err + return url, "", err } if ports.daemonPorts == nil { @@ -217,20 +249,20 @@ func (e *Engine) startDaemon(tool types.Tool) (string, error) { _, _ = io.ReadAll(resp.Body) _ = resp.Body.Close() }() - return url, nil + return url, token, nil } select { case <-killedCtx.Done(): - return url, fmt.Errorf("daemon failed to start: %w", context.Cause(killedCtx)) + return url, "", fmt.Errorf("daemon failed to start: %w", context.Cause(killedCtx)) case <-time.After(time.Second): } } - return url, fmt.Errorf("timeout waiting for 200 response from GET %s", url) + return url, "", fmt.Errorf("timeout waiting for 200 response from GET %s", url) } func (e *Engine) runDaemon(ctx Context, tool types.Tool, input string) (cmdRet *Return, cmdErr error) { - url, err := e.startDaemon(tool) + url, _, err := e.startDaemon(tool) if err != nil { return nil, err } diff --git a/pkg/engine/http.go b/pkg/engine/http.go index 49738b1a..a9a635e8 100644 --- a/pkg/engine/http.go +++ b/pkg/engine/http.go @@ -39,7 +39,10 @@ func (e *Engine) runHTTP(ctx Context, tool types.Tool, input string) (cmdRet *Re return nil, err } - var requestedEnvVars map[string]struct{} + var ( + requestedEnvVars map[string]struct{} + daemonToken string + ) if strings.HasSuffix(parsed.Hostname(), DaemonURLSuffix) { referencedToolName := strings.TrimSuffix(parsed.Hostname(), DaemonURLSuffix) referencedToolRefs, ok := tool.ToolMapping[referencedToolName] @@ -50,7 +53,7 @@ func (e *Engine) runHTTP(ctx Context, tool types.Tool, input string) (cmdRet *Re if !ok { return nil, fmt.Errorf("failed to find tool [%s] for [%s]", referencedToolName, parsed.Hostname()) } - toolURL, err = e.startDaemon(referencedTool) + toolURL, daemonToken, err = e.startDaemon(referencedTool) if err != nil { return nil, err } @@ -85,6 +88,10 @@ func (e *Engine) runHTTP(ctx Context, tool types.Tool, input string) (cmdRet *Re return nil, err } + if daemonToken != "" { + req.Header.Add("X-GPTScript-Daemon-Token", daemonToken) + } + for _, k := range slices.Sorted(maps.Keys(envMap)) { if _, ok := requestedEnvVars[k]; ok || strings.HasPrefix(k, "GPTSCRIPT_WORKSPACE_") { req.Header.Add("X-GPTScript-Env", k+"="+envMap[k]) From 53bf7cdc8f280545ce0f879e43eab94911495b37 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 20 May 2025 15:50:01 -0400 Subject: [PATCH 245/270] enhance: send MCP errors back to the LLM so it can correct if possible (#974) Signed-off-by: Donnie Adams --- pkg/engine/cmd.go | 6 +++--- pkg/engine/engine.go | 6 +++--- pkg/mcp/runner.go | 14 +++++++++++--- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 5fb340c5..e7671436 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -65,7 +65,7 @@ func compressEnv(envs []string) (result []string) { return } -func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCategory ToolCategory) (cmdOut string, cmdErr error) { +func (e *Engine) runCommand(ctx Context, tool types.Tool, input string) (cmdOut string, cmdErr error) { id := counter.Next() var combinedOutput string @@ -128,7 +128,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate cmd, stop, err := e.newCommand(commandCtx, extraEnv, tool, input, true) if err != nil { - if toolCategory == NoCategory && ctx.Parent != nil { + if ctx.ToolCategory == NoCategory && ctx.Parent != nil { return fmt.Sprintf("ERROR: got (%v) while parsing command", err), nil } return "", fmt.Errorf("got (%v) while parsing command", err) @@ -167,7 +167,7 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate if err := cmd.Run(); err != nil && (commandCtx.Err() == nil || ctx.Ctx.Err() != nil) { // If the command failed and the context hasn't been canceled, then return the error. - if toolCategory == NoCategory && ctx.Parent != nil { + if ctx.ToolCategory == NoCategory && ctx.Parent != nil { // If this is a sub-call, then don't return the error; return the error as a message so that the LLM can retry. return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index e3ff930a..c509af9c 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -45,7 +45,7 @@ type Engine struct { } type MCPRunner interface { - Run(ctx context.Context, progress chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) + Run(ctx Context, progress chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) } type State struct { @@ -313,7 +313,7 @@ func populateMessageParams(ctx Context, completion *types.CompletionRequest, too } func (e *Engine) runMCPInvoke(ctx Context, tool types.Tool, input string) (*Return, error) { - output, err := e.MCPRunner.Run(ctx.Ctx, e.Progress, tool, input) + output, err := e.MCPRunner.Run(ctx, e.Progress, tool, input) if err != nil { return nil, fmt.Errorf("failed to run MCP invoke: %w", err) } @@ -335,7 +335,7 @@ func (e *Engine) runCommandTools(ctx Context, tool types.Tool, input string) (*R } else if tool.IsCall() { return e.runCall(ctx, tool, input) } - s, err := e.runCommand(ctx, tool, input, ctx.ToolCategory) + s, err := e.runCommand(ctx, tool, input) return &Return{ Result: &s, }, err diff --git a/pkg/mcp/runner.go b/pkg/mcp/runner.go index 448d58a7..1a275a0c 100644 --- a/pkg/mcp/runner.go +++ b/pkg/mcp/runner.go @@ -1,16 +1,16 @@ package mcp import ( - "context" "encoding/json" "fmt" "strings" + "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/mark3labs/mcp-go/mcp" ) -func (l *Local) Run(ctx context.Context, _ chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) { +func (l *Local) Run(ctx engine.Context, _ chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) { fields := strings.Fields(tool.Instructions) if len(fields) < 2 { return "", fmt.Errorf("invalid mcp call, invalid number of fields in %s", tool.Instructions) @@ -41,8 +41,16 @@ func (l *Local) Run(ctx context.Context, _ chan<- types.CompletionStatus, tool t request.Params.Name = toolName request.Params.Arguments = arguments - result, err := session.Client.CallTool(ctx, request) + result, err := session.Client.CallTool(ctx.Ctx, request) if err != nil { + if ctx.ToolCategory == engine.NoCategory && ctx.Parent != nil { + var output []byte + if result != nil { + output, _ = json.Marshal(result) + } + // If this is a sub-call, then don't return the error; return the error as a message so that the LLM can retry. + return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, string(output)), nil + } return "", fmt.Errorf("failed to call tool %s: %w", toolName, err) } From 559876f843c0ed683160d3fc78ac8559412363e7 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 20 May 2025 16:55:54 -0400 Subject: [PATCH 246/270] chore: switch to huma for OpenAPI v3.1 support (#973) Signed-off-by: Donnie Adams --- go.mod | 32 +-- go.sum | 73 +++---- pkg/loader/openapi.go | 195 +++++++++++++++--- .../testdata/openapi/TestOpenAPIv2.golden | 33 +-- .../openapi/TestOpenAPIv2Revamp.golden | 38 ++-- .../testdata/openapi/TestOpenAPIv3.golden | 62 +++--- .../TestOpenAPIv3NoOperationIDs.golden | 62 +++--- .../TestOpenAPIv3NoOperationIDsRevamp.golden | 38 ++-- .../openapi/TestOpenAPIv3Revamp.golden | 38 ++-- pkg/mcp/loader.go | 13 +- pkg/openai/client.go | 3 +- pkg/parser/parser.go | 16 +- pkg/system/prompt.go | 30 ++- pkg/types/completion.go | 10 +- pkg/types/jsonschema.go | 20 +- pkg/types/tool.go | 58 +++--- 16 files changed, 424 insertions(+), 297 deletions(-) diff --git a/go.mod b/go.mod index fc68968e..96deab9a 100644 --- a/go.mod +++ b/go.mod @@ -7,17 +7,18 @@ require ( github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 github.com/adrg/xdg v0.4.0 github.com/chzyer/readline v1.5.1 + github.com/danielgtaylor/huma/v2 v2.32.0 github.com/docker/cli v26.0.0+incompatible github.com/docker/docker-credential-helpers v0.8.1 github.com/fatih/color v1.17.0 - github.com/getkin/kin-openapi v0.124.0 + github.com/getkin/kin-openapi v0.132.0 github.com/go-git/go-git/v5 v5.12.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb - github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 + github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0 github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 @@ -29,14 +30,14 @@ require ( github.com/rs/cors v1.11.0 github.com/samber/lo v1.38.1 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc - golang.org/x/sync v0.9.0 - golang.org/x/term v0.22.0 + golang.org/x/sync v0.10.0 + golang.org/x/term v0.27.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 @@ -62,7 +63,7 @@ require ( github.com/charmbracelet/x/ansi v0.1.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/console v1.0.4 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect @@ -70,8 +71,8 @@ require ( github.com/emirpasic/gods v1.18.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/swag v0.22.8 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gookit/color v1.5.4 // indirect @@ -82,7 +83,6 @@ require ( github.com/hexops/autogold v1.3.1 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/yaml v0.2.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect @@ -94,7 +94,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/microcosm-cc/bluemonday v1.0.26 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect @@ -102,6 +102,8 @@ require ( github.com/muesli/termenv v0.15.2 // indirect github.com/nightlyone/lockfile v1.0.0 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect + github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect + github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect @@ -128,11 +130,11 @@ require ( github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.25.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.20.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.23.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect mvdan.cc/gofumpt v0.6.0 // indirect diff --git a/go.sum b/go.sum index 7ce2cd38..02c068cc 100644 --- a/go.sum +++ b/go.sum @@ -98,13 +98,15 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/danielgtaylor/huma/v2 v2.32.0 h1:ytU9ExG/axC434+soXxwNzv0uaxOb3cyCgjj8y3PmBE= +github.com/danielgtaylor/huma/v2 v2.32.0/go.mod h1:9BxJwkeoPPDEJ2Bg4yPwL1mM1rYpAwCAWFKoo723spk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -131,8 +133,8 @@ github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUork github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/getkin/kin-openapi v0.124.0 h1:VSFNMB9C9rTKBnQ/fpyDU8ytMTr4dWI9QovSKj9kz/M= -github.com/getkin/kin-openapi v0.124.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= +github.com/getkin/kin-openapi v0.132.0 h1:3ISeLMsQzcb5v26yeJrBcdTCEQTag36ZjaGk7MIRUwk= +github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -145,10 +147,10 @@ github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZt github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/swag v0.22.8 h1:/9RjDSQ0vbFR+NyjGMkFTsA1IA0fmhKSThmfGZjicbw= -github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -201,8 +203,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61 h1:QxLjsLOYlsVLPwuRkP0Q8EcAoZT1s8vU2ZBSX0+R6CI= -github.com/gptscript-ai/go-gptscript v0.9.6-0.20250204133419-744b25b84a61/go.mod h1:/FVuLwhz+sIfsWUgUHWKi32qT0i6+IXlUlzs70KKt/Q= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0 h1:UXZRFAUPDWOgeTyjZd4M8YrEEgPc7XOfjgbm81w7x0w= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0/go.mod h1:t2TyiEa6rhd4reOcorAMUmd5MledmZuTmYrO7rV3Iy8= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 h1:wQC8sKyeGA50WnCEG+Jo5FNRIkuX3HX8d3ubyWCCoI8= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -229,8 +231,6 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4Dvx github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= -github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 h1:iCHtR9CQyktQ5+f3dMVZfwD2KWJUgm7M0gdL9NGr8KA= github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -252,8 +252,8 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -287,8 +287,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= @@ -305,6 +305,10 @@ github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAm github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 h1:3bMMZ1f+GPXFQ1uNaYbO/uECWvSfqEA+ZEXn1rFAT88= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= @@ -365,8 +369,8 @@ github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDT github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= @@ -382,8 +386,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= @@ -392,8 +396,8 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -433,8 +437,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -493,8 +497,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -510,8 +514,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -551,8 +555,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -564,8 +568,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -579,8 +583,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -663,7 +667,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index 3ab564e5..ef61adfb 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -11,6 +11,7 @@ import ( "strings" "time" + humav2 "github.com/danielgtaylor/huma/v2" "github.com/getkin/kin-openapi/openapi3" "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/types" @@ -150,10 +151,9 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) Parameters: types.Parameters{ Name: toolName, Description: toolDesc, - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{}, - Required: []string{}, + Arguments: &humav2.Schema{ + Type: humav2.TypeObject, + Properties: make(map[string]*humav2.Schema), }, }, }, @@ -174,7 +174,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Add the new arg to the tool's arguments - tool.Arguments.Properties[param.Value.Name] = &openapi3.SchemaRef{Value: arg} + tool.Arguments.Properties[param.Value.Name] = openAPI3SchemaToHumaV2Schema(arg) // Check whether it is required if param.Value.Required { @@ -227,7 +227,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Unfortunately, the request body doesn't contain any good descriptor for it, // so we just use "requestBodyContent" as the name of the arg. - tool.Arguments.Properties["requestBodyContent"] = &openapi3.SchemaRef{Value: arg} + tool.Arguments.Properties["requestBodyContent"] = openAPI3SchemaToHumaV2Schema(arg) break } @@ -373,6 +373,147 @@ func parseServer(server *openapi3.Server) (string, error) { return s, nil } +// openAPI3SchemaToHumaV2Schema converts an openapi3.Schema to a humav2.Schema +func openAPI3SchemaToHumaV2Schema(schema *openapi3.Schema) *humav2.Schema { + if schema == nil { + return nil + } + + result := &humav2.Schema{ + Title: schema.Title, + Description: schema.Description, + Format: schema.Format, + Nullable: schema.Nullable, + } + + // Convert type + if schema.Type != nil && len(*schema.Type) > 0 { + result.Type = (*schema.Type)[0] + } + + // Convert enum + if schema.Enum != nil { + result.Enum = schema.Enum + } + + // Convert min/max + if schema.Min != nil { + minVal := *schema.Min + result.Minimum = &minVal + + // In OpenAPI 3, ExclusiveMin is a boolean flag that applies to Min + // In OpenAPI 3.1, ExclusiveMinimum is a separate value + if schema.ExclusiveMin { + result.ExclusiveMinimum = &minVal + } + } + if schema.Max != nil { + maxVal := *schema.Max + result.Maximum = &maxVal + + // In OpenAPI 3, ExclusiveMax is a boolean flag that applies to Max + // In OpenAPI 3.1, ExclusiveMaximum is a separate value + if schema.ExclusiveMax { + result.ExclusiveMaximum = &maxVal + } + } + + // Convert minLength/maxLength + if schema.MinLength != 0 { + minLength := int(schema.MinLength) + result.MinLength = &minLength + } + if schema.MaxLength != nil { + maxLength := int(*schema.MaxLength) + result.MaxLength = &maxLength + } + + // Convert pattern + if schema.Pattern != "" { + result.Pattern = schema.Pattern + } + + // Convert minItems/maxItems + if schema.MinItems != 0 { + minItems := int(schema.MinItems) + result.MinItems = &minItems + } + if schema.MaxItems != nil { + maxItems := int(*schema.MaxItems) + result.MaxItems = &maxItems + } + + // Convert uniqueItems + result.UniqueItems = schema.UniqueItems + + // Convert minProperties/maxProperties + if schema.MinProps != 0 { + minProps := int(schema.MinProps) + result.MinProperties = &minProps + } + if schema.MaxProps != nil { + maxProps := int(*schema.MaxProps) + result.MaxProperties = &maxProps + } + + // Convert required + if schema.Required != nil { + result.Required = schema.Required + } + + // Convert properties + if schema.Properties != nil { + result.Properties = make(map[string]*humav2.Schema, len(schema.Properties)) + for name, propRef := range schema.Properties { + if propRef != nil && propRef.Value != nil { + result.Properties[name] = openAPI3SchemaToHumaV2Schema(propRef.Value) + } + } + } + + // Convert items + if schema.Items != nil && schema.Items.Value != nil { + result.Items = openAPI3SchemaToHumaV2Schema(schema.Items.Value) + } + + // Convert oneOf + if schema.OneOf != nil { + result.OneOf = make([]*humav2.Schema, len(schema.OneOf)) + for i, oneOfRef := range schema.OneOf { + if oneOfRef != nil && oneOfRef.Value != nil { + result.OneOf[i] = openAPI3SchemaToHumaV2Schema(oneOfRef.Value) + } + } + } + + // Convert anyOf + if schema.AnyOf != nil { + result.AnyOf = make([]*humav2.Schema, len(schema.AnyOf)) + for i, anyOfRef := range schema.AnyOf { + if anyOfRef != nil && anyOfRef.Value != nil { + result.AnyOf[i] = openAPI3SchemaToHumaV2Schema(anyOfRef.Value) + } + } + } + + // Convert allOf + if schema.AllOf != nil { + result.AllOf = make([]*humav2.Schema, len(schema.AllOf)) + for i, allOfRef := range schema.AllOf { + if allOfRef != nil && allOfRef.Value != nil { + result.AllOf[i] = openAPI3SchemaToHumaV2Schema(allOfRef.Value) + } + } + } + + // Convert not + if schema.Not != nil && schema.Not.Value != nil { + result.Not = openAPI3SchemaToHumaV2Schema(schema.Not.Value) + } + + return result +} + func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]types.Tool, error) { if t == nil { return nil, fmt.Errorf("OpenAPI spec is nil") @@ -402,16 +543,14 @@ func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]type Parameters: types.Parameters{ Name: types.ToolNormalizer("get-schema-" + t.Info.Title), Description: fmt.Sprintf("Get the JSONSchema for the arguments for an operation for %s. You must do this before you run the operation.", t.Info.Title), - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{openapi3.TypeObject}, - Properties: openapi3.Schemas{ + Arguments: &humav2.Schema{ + Type: humav2.TypeObject, + Properties: map[string]*humav2.Schema{ "operation": { - Value: &openapi3.Schema{ - Type: &openapi3.Types{openapi3.TypeString}, - Title: "operation", - Description: "the name of the operation to get the schema for", - Required: []string{"operation"}, - }, + Type: humav2.TypeString, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, }, }, }, @@ -428,24 +567,20 @@ func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]type Parameters: types.Parameters{ Name: types.ToolNormalizer("run-operation-" + t.Info.Title), Description: fmt.Sprintf("Run an operation for %s. You MUST call %s for the operation before you use this tool.", t.Info.Title, openapi.GetSchemaTool), - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{openapi3.TypeObject}, - Properties: openapi3.Schemas{ + Arguments: &humav2.Schema{ + Type: humav2.TypeObject, + Properties: map[string]*humav2.Schema{ "operation": { - Value: &openapi3.Schema{ - Type: &openapi3.Types{openapi3.TypeString}, - Title: "operation", - Description: "the name of the operation to run", - Required: []string{"operation"}, - }, + Type: humav2.TypeString, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, }, "args": { - Value: &openapi3.Schema{ - Type: &openapi3.Types{openapi3.TypeString}, - Title: "args", - Description: "the JSON string containing arguments; must match the JSONSchema for the operation", - Required: []string{"args"}, - }, + Type: humav2.TypeString, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, }, }, }, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden index 39b0b2c1..d64c70ea 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden @@ -56,16 +56,17 @@ types.ToolSet{ Name: "listPets", Description: "List all pets", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{ - "object", + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "limit": { + Type: "integer", + Description: "How many items to return at one time (max 100)", + Format: "int32", + Properties: map[string]*huma.Schema{}, + AllOf: []*huma.Schema{}, + }, }, - Required: []string{}, - Properties: openapi3.Schemas{"limit": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"integer"}, - Format: "int32", - Description: "How many items to return at one time (max 100)", - }}}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, @@ -86,13 +87,15 @@ types.ToolSet{ Name: "showPetById", Description: "Info for a specific pet", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Required: []string{"petId"}, - Properties: openapi3.Schemas{"petId": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{"petId": { + Type: "string", Description: "The id of the pet to retrieve", - }}}, + Properties: map[string]*huma.Schema{}, + AllOf: []*huma.Schema{}, + }}, + Required: []string{"petId"}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden index ebe68cc2..d89e976e 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden @@ -36,16 +36,18 @@ types.ToolSet{ Name: "getSchemaSwaggerPetstore", Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{ - "object", + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "operation": { + Type: "string", + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{ + "operation", + }, + }, }, - Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, - Title: "operation", - Description: "the name of the operation to get the schema for", - Required: []string{"operation"}, - }}}, }, }, Instructions: "#!sys.openapi get-schema ", @@ -84,21 +86,21 @@ types.ToolSet{ Name: "runOperationSwaggerPetstore", Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{ - "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "args": { + Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", Required: []string{"args"}, - }}, - "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + }, + "operation": { + Type: "string", Title: "operation", Description: "the name of the operation to run", Required: []string{"operation"}, - }}, + }, }, }, }, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden index 37ac2fe2..710440cf 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden @@ -37,30 +37,25 @@ types.ToolSet{ Name: "createPets", Description: "Create a pet", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{ - "object", - }, - Required: []string{}, - Properties: openapi3.Schemas{"requestBodyContent": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Required: []string{ - "id", - "name", - }, - Properties: openapi3.Schemas{ - "id": &openapi3.SchemaRef{ - Value: &openapi3.Schema{ - Type: &openapi3.Types{ - "integer", - }, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "requestBodyContent": { + Type: "object", + Properties: map[string]*huma.Schema{ + "id": { + Type: "integer", Format: "int64", }, + "name": {Type: "string"}, + "tag": {Type: "string"}, + }, + Required: []string{ + "id", + "name", }, - "name": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, - "tag": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, }, - }}}, + }, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, @@ -81,15 +76,14 @@ types.ToolSet{ Name: "listPets", Description: "List all pets", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Required: []string{}, - Properties: openapi3.Schemas{"limit": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"integer"}, - Format: "int32", + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{"limit": { + Type: "integer", Description: "How many items to return at one time (max 100)", - Max: valast.Ptr(float64(100)), - }}}, + Format: "int32", + Maximum: valast.Ptr(float64(100)), + }}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, @@ -110,13 +104,13 @@ types.ToolSet{ Name: "showPetById", Description: "Info for a specific pet", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Required: []string{"petId"}, - Properties: openapi3.Schemas{"petId": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{"petId": { + Type: "string", Description: "The id of the pet to retrieve", - }}}, + }}, + Required: []string{"petId"}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden index e950e19c..c12c7834 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden @@ -37,17 +37,16 @@ types.ToolSet{ Name: "get_pets", Description: "List all pets", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{ - "object", + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "limit": { + Type: "integer", + Description: "How many items to return at one time (max 100)", + Format: "int32", + Maximum: valast.Ptr(float64(100)), + }, }, - Required: []string{}, - Properties: openapi3.Schemas{"limit": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"integer"}, - Format: "int32", - Description: "How many items to return at one time (max 100)", - Max: valast.Ptr(float64(100)), - }}}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, @@ -68,13 +67,13 @@ types.ToolSet{ Name: "get_pets_petId", Description: "Info for a specific pet", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Required: []string{"petId"}, - Properties: openapi3.Schemas{"petId": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{"petId": { + Type: "string", Description: "The id of the pet to retrieve", - }}}, + }}, + Required: []string{"petId"}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, @@ -95,28 +94,23 @@ types.ToolSet{ Name: "post_pets", Description: "Create a pet", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Required: []string{}, - Properties: openapi3.Schemas{"requestBodyContent": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{"requestBodyContent": { + Type: "object", + Properties: map[string]*huma.Schema{ + "id": { + Type: "integer", + Format: "int64", + }, + "name": {Type: "string"}, + "tag": {Type: "string"}, + }, Required: []string{ "id", "name", }, - Properties: openapi3.Schemas{ - "id": &openapi3.SchemaRef{ - Value: &openapi3.Schema{ - Type: &openapi3.Types{ - "integer", - }, - Format: "int64", - }, - }, - "name": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, - "tag": &openapi3.SchemaRef{Value: &openapi3.Schema{Type: &openapi3.Types{"string"}}}, - }, - }}}, + }}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden index ebe68cc2..d89e976e 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden @@ -36,16 +36,18 @@ types.ToolSet{ Name: "getSchemaSwaggerPetstore", Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{ - "object", + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "operation": { + Type: "string", + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{ + "operation", + }, + }, }, - Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, - Title: "operation", - Description: "the name of the operation to get the schema for", - Required: []string{"operation"}, - }}}, }, }, Instructions: "#!sys.openapi get-schema ", @@ -84,21 +86,21 @@ types.ToolSet{ Name: "runOperationSwaggerPetstore", Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{ - "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "args": { + Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", Required: []string{"args"}, - }}, - "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + }, + "operation": { + Type: "string", Title: "operation", Description: "the name of the operation to run", Required: []string{"operation"}, - }}, + }, }, }, }, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden index ebe68cc2..d89e976e 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden @@ -36,16 +36,18 @@ types.ToolSet{ Name: "getSchemaSwaggerPetstore", Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{ - "object", + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "operation": { + Type: "string", + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{ + "operation", + }, + }, }, - Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, - Title: "operation", - Description: "the name of the operation to get the schema for", - Required: []string{"operation"}, - }}}, }, }, Instructions: "#!sys.openapi get-schema ", @@ -84,21 +86,21 @@ types.ToolSet{ Name: "runOperationSwaggerPetstore", Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", ModelName: "gpt-4o", - Arguments: &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{ - "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + Arguments: &huma.Schema{ + Type: "object", + Properties: map[string]*huma.Schema{ + "args": { + Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", Required: []string{"args"}, - }}, - "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ - Type: &openapi3.Types{"string"}, + }, + "operation": { + Type: "string", Title: "operation", Description: "the name of the operation to run", Required: []string{"operation"}, - }}, + }, }, }, }, diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index b3441719..d31c6503 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/getkin/kin-openapi/openapi3" + humav2 "github.com/danielgtaylor/huma/v2" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/types" @@ -194,19 +194,18 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s if !allToolsAllowed && !slices.Contains(allowedTools, tool.Name) { continue } + if tool.Name == "" { + // I dunno, bad tool? + continue + } - var schema openapi3.Schema + var schema humav2.Schema schemaData, err := json.Marshal(tool.InputSchema) if err != nil { panic(err) } - if tool.Name == "" { - // I dunno, bad tool? - continue - } - if err := json.Unmarshal(schemaData, &schema); err != nil { return nil, fmt.Errorf("failed to unmarshal tool input schema: %w", err) } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 7715c657..69e6621d 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -11,6 +11,7 @@ import ( "strings" "time" + humav2 "github.com/danielgtaylor/huma/v2" openai "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/counter" @@ -405,7 +406,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques var params any = tool.Function.Parameters if tool.Function.Parameters == nil || len(tool.Function.Parameters.Properties) == 0 { params = map[string]any{ - "type": "object", + "type": humav2.TypeObject, "properties": map[string]any{}, } } diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index b00b1506..e7ec287d 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -10,7 +10,7 @@ import ( "strconv" "strings" - "github.com/getkin/kin-openapi/openapi3" + humav2 "github.com/danielgtaylor/huma/v2" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -54,9 +54,9 @@ func csv(line string) (result []string) { func addArg(line string, tool *types.Tool) error { if tool.Arguments == nil { - tool.Arguments = &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{}, + tool.Arguments = &humav2.Schema{ + Type: "object", + Properties: make(map[string]*humav2.Schema, 1), } } @@ -65,11 +65,9 @@ func addArg(line string, tool *types.Tool) error { return fmt.Errorf("invalid arg format: %s", line) } - tool.Arguments.Properties[key] = &openapi3.SchemaRef{ - Value: &openapi3.Schema{ - Description: strings.TrimSpace(value), - Type: &openapi3.Types{"string"}, - }, + tool.Arguments.Properties[key] = &humav2.Schema{ + Description: strings.TrimSpace(value), + Type: "string", } return nil diff --git a/pkg/system/prompt.go b/pkg/system/prompt.go index 6b1815fd..a4fe5f26 100644 --- a/pkg/system/prompt.go +++ b/pkg/system/prompt.go @@ -5,7 +5,7 @@ import ( "os" "strings" - "github.com/getkin/kin-openapi/openapi3" + humav2 "github.com/danielgtaylor/huma/v2" ) // Suffix is default suffix of gptscript files @@ -26,26 +26,22 @@ You don't move to the next step until you have a result. // to just send pure text but the interface required JSON (as that is the fundamental interface of tools in OpenAI) var DefaultPromptParameter = "defaultPromptParameter" -var DefaultToolSchema = openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{ - DefaultPromptParameter: &openapi3.SchemaRef{ - Value: &openapi3.Schema{ - Description: "Prompt to send to the tool. This may be an instruction or question.", - Type: &openapi3.Types{"string"}, - }, +var DefaultToolSchema = humav2.Schema{ + Type: "object", + Properties: map[string]*humav2.Schema{ + DefaultPromptParameter: { + Description: "Prompt to send to the tool. This may be an instruction or question.", + Type: "string", }, }, } -var DefaultChatSchema = openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{ - DefaultPromptParameter: &openapi3.SchemaRef{ - Value: &openapi3.Schema{ - Description: "Prompt to send to the assistant. This may be an instruction or question.", - Type: &openapi3.Types{"string"}, - }, +var DefaultChatSchema = humav2.Schema{ + Type: "object", + Properties: map[string]*humav2.Schema{ + DefaultPromptParameter: { + Description: "Prompt to send to the assistant. This may be an instruction or question.", + Type: "string", }, }, } diff --git a/pkg/types/completion.go b/pkg/types/completion.go index 2362071f..fbd2fb3b 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/getkin/kin-openapi/openapi3" + humav2 "github.com/danielgtaylor/huma/v2" ) type CompletionRequest struct { @@ -31,10 +31,10 @@ type ChatCompletionTool struct { } type CompletionFunctionDefinition struct { - ToolID string `json:"toolID,omitempty"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - Parameters *openapi3.Schema `json:"parameters"` + ToolID string `json:"toolID,omitempty"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Parameters *humav2.Schema `json:"parameters"` } // Chat message role defined by the OpenAI API. diff --git a/pkg/types/jsonschema.go b/pkg/types/jsonschema.go index 6fd0d4ea..b88e37b6 100644 --- a/pkg/types/jsonschema.go +++ b/pkg/types/jsonschema.go @@ -1,21 +1,17 @@ package types -import ( - "github.com/getkin/kin-openapi/openapi3" -) +import humav2 "github.com/danielgtaylor/huma/v2" -func ObjectSchema(kv ...string) *openapi3.Schema { - s := &openapi3.Schema{ - Type: &openapi3.Types{"object"}, - Properties: openapi3.Schemas{}, +func ObjectSchema(kv ...string) *humav2.Schema { + s := &humav2.Schema{ + Type: humav2.TypeObject, + Properties: make(map[string]*humav2.Schema, len(kv)/2), } for i, v := range kv { if i%2 == 1 { - s.Properties[kv[i-1]] = &openapi3.SchemaRef{ - Value: &openapi3.Schema{ - Description: v, - Type: &openapi3.Types{"string"}, - }, + s.Properties[kv[i-1]] = &humav2.Schema{ + Description: v, + Type: humav2.TypeString, } } } diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 10b47c77..c5346319 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -9,7 +9,7 @@ import ( "sort" "strings" - "github.com/getkin/kin-openapi/openapi3" + humav2 "github.com/danielgtaylor/huma/v2" "github.com/google/shlex" "github.com/gptscript-ai/gptscript/pkg/system" "golang.org/x/exp/maps" @@ -120,33 +120,33 @@ func (p Program) SetBlocking() Program { type BuiltinFunc func(ctx context.Context, env []string, input string, progress chan<- string) (string, error) type Parameters struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - MaxTokens int `json:"maxTokens,omitempty"` - ModelName string `json:"modelName,omitempty"` - ModelProvider bool `json:"modelProvider,omitempty"` - JSONResponse bool `json:"jsonResponse,omitempty"` - Chat bool `json:"chat,omitempty"` - Temperature *float32 `json:"temperature,omitempty"` - Cache *bool `json:"cache,omitempty"` - InternalPrompt *bool `json:"internalPrompt"` - Arguments *openapi3.Schema `json:"arguments,omitempty"` - Tools []string `json:"tools,omitempty"` - GlobalTools []string `json:"globalTools,omitempty"` - GlobalModelName string `json:"globalModelName,omitempty"` - Context []string `json:"context,omitempty"` - ExportContext []string `json:"exportContext,omitempty"` - Export []string `json:"export,omitempty"` - Agents []string `json:"agents,omitempty"` - Credentials []string `json:"credentials,omitempty"` - ExportCredentials []string `json:"exportCredentials,omitempty"` - InputFilters []string `json:"inputFilters,omitempty"` - ExportInputFilters []string `json:"exportInputFilters,omitempty"` - OutputFilters []string `json:"outputFilters,omitempty"` - ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` - Blocking bool `json:"-"` - Stdin bool `json:"stdin,omitempty"` - Type ToolType `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + MaxTokens int `json:"maxTokens,omitempty"` + ModelName string `json:"modelName,omitempty"` + ModelProvider bool `json:"modelProvider,omitempty"` + JSONResponse bool `json:"jsonResponse,omitempty"` + Chat bool `json:"chat,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + Cache *bool `json:"cache,omitempty"` + InternalPrompt *bool `json:"internalPrompt"` + Arguments *humav2.Schema `json:"arguments,omitempty"` + Tools []string `json:"tools,omitempty"` + GlobalTools []string `json:"globalTools,omitempty"` + GlobalModelName string `json:"globalModelName,omitempty"` + Context []string `json:"context,omitempty"` + ExportContext []string `json:"exportContext,omitempty"` + Export []string `json:"export,omitempty"` + Agents []string `json:"agents,omitempty"` + Credentials []string `json:"credentials,omitempty"` + ExportCredentials []string `json:"exportCredentials,omitempty"` + InputFilters []string `json:"inputFilters,omitempty"` + ExportInputFilters []string `json:"exportInputFilters,omitempty"` + OutputFilters []string `json:"outputFilters,omitempty"` + ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` + Blocking bool `json:"-"` + Stdin bool `json:"stdin,omitempty"` + Type ToolType `json:"type,omitempty"` } func (p Parameters) allExports() []string { @@ -486,7 +486,7 @@ func (t ToolDef) Print() string { sort.Strings(keys) for _, key := range keys { prop := t.Arguments.Properties[key] - _, _ = fmt.Fprintf(buf, "Parameter: %s: %s\n", key, prop.Value.Description) + _, _ = fmt.Fprintf(buf, "Parameter: %s: %s\n", key, prop.Description) } } if t.InternalPrompt != nil { From 454dd09597ec78b639ad543602e018be316b3cec Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 27 May 2025 11:13:42 -0400 Subject: [PATCH 247/270] feat: add support for MCP HTTP streaming Signed-off-by: Donnie Adams --- pkg/mcp/loader.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index d31c6503..f3dffcf6 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -15,7 +15,8 @@ import ( "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/gptscript-ai/gptscript/pkg/version" - "github.com/mark3labs/mcp-go/client" + mcpclient "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" "github.com/mark3labs/mcp-go/mcp" ) @@ -36,7 +37,7 @@ type Local struct { type Session struct { ID string InitResult *mcp.InitializeResult - Client client.MCPClient + Client mcpclient.MCPClient Config ServerConfig } @@ -117,7 +118,7 @@ func (l *Local) LoadTools(ctx context.Context, server ServerConfig, toolName str // Reset so we don't start a new MCP server, no reason to if one is already running and the allowed tools change. server.AllowedTools = nil - session, err := l.loadSession(server) + session, err := l.loadSession(server, true) if err != nil { return nil, err } @@ -279,7 +280,7 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s return toolDefs, nil } -func (l *Local) loadSession(server ServerConfig) (*Session, error) { +func (l *Local) loadSession(server ServerConfig, tryHTTPStreaming bool) (*Session, error) { id := hash.Digest(server) l.lock.Lock() existing, ok := l.sessions[id] @@ -294,11 +295,11 @@ func (l *Local) loadSession(server ServerConfig) (*Session, error) { } var ( - c *client.Client + c *mcpclient.Client err error ) if server.Command != "" { - c, err = client.NewStdioMCPClient(server.Command, server.Env, server.Args...) + c, err = mcpclient.NewStdioMCPClient(server.Command, server.Env, server.Args...) if err != nil { return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) } @@ -314,7 +315,11 @@ func (l *Local) loadSession(server ServerConfig) (*Session, error) { headers[k] = v } - c, err = client.NewSSEMCPClient(url, client.WithHeaders(headers)) + if tryHTTPStreaming { + c, err = mcpclient.NewStreamableHttpClient(url, transport.WithHTTPHeaders(headers)) + } else { + c, err = mcpclient.NewSSEMCPClient(url, mcpclient.WithHeaders(headers)) + } if err != nil { return nil, fmt.Errorf("failed to create MCP HTTP client: %w", err) } @@ -333,6 +338,13 @@ func (l *Local) loadSession(server ServerConfig) (*Session, error) { initResult, err := c.Initialize(ctx, initRequest) if err != nil { + if server.Command == "" && tryHTTPStreaming { + // The MCP spec indicates that trying to initialize the client for HTTP streaming and checking for an error + // is the recommended way to determine if the server supports HTTP streaming, falling back to SEE. + // Ideally, we can check for a 400-level error, but our client implementation doesn't expose that information. + // Retrying on any error is harmless. + return l.loadSession(server, false) + } return nil, fmt.Errorf("failed to initialize MCP client: %w", err) } From 977aabb1d9ce0b218374160ca14e384f06614187 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 27 May 2025 11:25:05 -0400 Subject: [PATCH 248/270] chore: bump mcp dependency Signed-off-by: Donnie Adams --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 96deab9a..ea6f8799 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 - github.com/mark3labs/mcp-go v0.25.0 + github.com/mark3labs/mcp-go v0.30.0 github.com/mholt/archives v0.1.0 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 diff --git a/go.sum b/go.sum index 02c068cc..e0f89743 100644 --- a/go.sum +++ b/go.sum @@ -270,8 +270,8 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mark3labs/mcp-go v0.25.0 h1:UUpcMT3L5hIhuDy7aifj4Bphw4Pfx1Rf8mzMXDe8RQw= -github.com/mark3labs/mcp-go v0.25.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= +github.com/mark3labs/mcp-go v0.30.0 h1:Taz7fiefkxY/l8jz1nA90V+WdM2eoMtlvwfWforVYbo= +github.com/mark3labs/mcp-go v0.30.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= From ce6afe2a23236cdd60c635049c760c380c79f240 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 28 May 2025 08:52:27 -0400 Subject: [PATCH 249/270] enhance: expose MCP server capabilities (#976) Signed-off-by: Donnie Adams --- pkg/mcp/client.go | 106 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 pkg/mcp/client.go diff --git a/pkg/mcp/client.go b/pkg/mcp/client.go new file mode 100644 index 00000000..ff712ef9 --- /dev/null +++ b/pkg/mcp/client.go @@ -0,0 +1,106 @@ +package mcp + +import ( + "context" + + mcpclient "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/mcp" +) + +type Client interface { + mcpclient.MCPClient + Capabilities() mcp.ServerCapabilities +} + +func (l *Local) Client(server ServerConfig) (Client, error) { + session, err := l.loadSession(server, true) + if err != nil { + return nil, err + } + + return &client{session}, nil +} + +type client struct { + *Session +} + +func (c *client) Initialize(ctx context.Context, request mcp.InitializeRequest) (*mcp.InitializeResult, error) { + return c.Client.Initialize(ctx, request) +} + +func (c *client) Ping(ctx context.Context) error { + return c.Client.Ping(ctx) +} + +func (c *client) ListResourcesByPage(ctx context.Context, request mcp.ListResourcesRequest) (*mcp.ListResourcesResult, error) { + return c.Client.ListResourcesByPage(ctx, request) +} + +func (c *client) ListResources(ctx context.Context, request mcp.ListResourcesRequest) (*mcp.ListResourcesResult, error) { + return c.Client.ListResources(ctx, request) +} + +func (c *client) ListResourceTemplatesByPage(ctx context.Context, request mcp.ListResourceTemplatesRequest) (*mcp.ListResourceTemplatesResult, error) { + return c.Client.ListResourceTemplatesByPage(ctx, request) +} + +func (c *client) ListResourceTemplates(ctx context.Context, request mcp.ListResourceTemplatesRequest) (*mcp.ListResourceTemplatesResult, error) { + return c.Client.ListResourceTemplates(ctx, request) +} + +func (c *client) ReadResource(ctx context.Context, request mcp.ReadResourceRequest) (*mcp.ReadResourceResult, error) { + return c.Client.ReadResource(ctx, request) +} + +func (c *client) Subscribe(ctx context.Context, request mcp.SubscribeRequest) error { + return c.Client.Subscribe(ctx, request) +} + +func (c *client) Unsubscribe(ctx context.Context, request mcp.UnsubscribeRequest) error { + return c.Client.Unsubscribe(ctx, request) +} + +func (c *client) ListPromptsByPage(ctx context.Context, request mcp.ListPromptsRequest) (*mcp.ListPromptsResult, error) { + return c.Client.ListPromptsByPage(ctx, request) +} + +func (c *client) ListPrompts(ctx context.Context, request mcp.ListPromptsRequest) (*mcp.ListPromptsResult, error) { + return c.Client.ListPrompts(ctx, request) +} + +func (c *client) GetPrompt(ctx context.Context, request mcp.GetPromptRequest) (*mcp.GetPromptResult, error) { + return c.Client.GetPrompt(ctx, request) +} + +func (c *client) ListToolsByPage(ctx context.Context, request mcp.ListToolsRequest) (*mcp.ListToolsResult, error) { + return c.Client.ListToolsByPage(ctx, request) +} + +func (c *client) ListTools(ctx context.Context, request mcp.ListToolsRequest) (*mcp.ListToolsResult, error) { + return c.Client.ListTools(ctx, request) +} + +func (c *client) CallTool(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return c.Client.CallTool(ctx, request) +} + +func (c *client) SetLevel(ctx context.Context, request mcp.SetLevelRequest) error { + return c.Client.SetLevel(ctx, request) +} + +func (c *client) Complete(ctx context.Context, request mcp.CompleteRequest) (*mcp.CompleteResult, error) { + return c.Client.Complete(ctx, request) +} + +func (c *client) Close() error { + return c.Client.Close() +} + +func (c *client) OnNotification(handler func(notification mcp.JSONRPCNotification)) { + c.Client.OnNotification(handler) +} + +func (c *client) Capabilities() mcp.ServerCapabilities { + return c.InitResult.Capabilities +} From c810be4bf185a6c377f33c0ff665188ba39c64a3 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 30 May 2025 10:52:44 -0400 Subject: [PATCH 250/270] fix: distinguish between nil and non-nil allowed tools (#977) If allowedTools is nil, then all tools are allowed. If allowed tools is non-nil and has length zero, then no tools are allowed. Signed-off-by: Donnie Adams --- pkg/mcp/loader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index f3dffcf6..c9f1b8a3 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -181,7 +181,7 @@ func (l *Local) Close() error { } func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName string, allowedTools []string) ([]types.Tool, error) { - allToolsAllowed := len(allowedTools) == 0 || slices.Contains(allowedTools, "*") + allToolsAllowed := allowedTools == nil || slices.Contains(allowedTools, "*") tools, err := session.Client.ListTools(ctx, mcp.ListToolsRequest{}) if err != nil { From fe3ace9cd757ad18a5e84e405449f0349250d13d Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 11 Jun 2025 15:22:40 -0400 Subject: [PATCH 251/270] chore: replace mcp-go with nanobot Signed-off-by: Donnie Adams --- .gitignore | 1 + go.mod | 50 ++++---- go.sum | 121 +++++++++--------- pkg/cli/main.go | 24 +++- pkg/gptscript/gptscript.go | 8 +- pkg/mcp/client.go | 102 ++------------- pkg/mcp/loader.go | 130 +++++++------------- pkg/mcp/runner.go | 7 +- pkg/tests/runner2_test.go | 2 +- pkg/tests/runner_test.go | 12 ++ pkg/tests/testdata/TestMCPLoad/step1.golden | 2 +- 11 files changed, 186 insertions(+), 273 deletions(-) diff --git a/.gitignore b/.gitignore index 759e3286..32148f05 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /bin +/pkg/tests/bin /.idea /static/ui **/node_modules/ diff --git a/go.mod b/go.mod index ea6f8799..72324362 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/gptscript-ai/gptscript -go 1.23.1 +go 1.24.2 + +toolchain go1.24.4 require ( github.com/AlecAivazis/survey/v2 v2.3.7 @@ -12,32 +14,32 @@ require ( github.com/docker/docker-credential-helpers v0.8.1 github.com/fatih/color v1.17.0 github.com/getkin/kin-openapi v0.132.0 - github.com/go-git/go-git/v5 v5.12.0 + github.com/go-git/go-git/v5 v5.13.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d - github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb + github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0 github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 - github.com/mark3labs/mcp-go v0.30.0 github.com/mholt/archives v0.1.0 + github.com/nanobot-ai/nanobot v0.0.6-0.20250612211144-0a23cf13a10f github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 github.com/rs/cors v1.11.0 github.com/samber/lo v1.38.1 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 + github.com/spf13/cobra v1.9.1 + github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc - golang.org/x/sync v0.10.0 - golang.org/x/term v0.27.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sync v0.14.0 + golang.org/x/term v0.32.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 @@ -49,7 +51,7 @@ require ( atomicgo.dev/schedule v0.1.0 // indirect dario.cat/mergo v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/STARRY-S/zip v0.2.1 // indirect github.com/alecthomas/chroma/v2 v2.8.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect @@ -58,23 +60,29 @@ require ( github.com/bodgit/plumbing v1.3.0 // indirect github.com/bodgit/sevenzip v1.6.0 // indirect github.com/bodgit/windows v1.0.1 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect github.com/charmbracelet/glamour v0.7.0 // indirect - github.com/charmbracelet/lipgloss v0.11.0 // indirect - github.com/charmbracelet/x/ansi v0.1.1 // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/console v1.0.4 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/cyphar/filepath-securejoin v0.2.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/dlclark/regexp2 v1.11.4 // indirect + github.com/dop251/goja v0.0.0-20250531102226-cb187b08699c // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-git/go-billy/v5 v5.6.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -99,7 +107,7 @@ require ( github.com/microcosm-cc/bluemonday v1.0.26 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.15.2 // indirect + github.com/muesli/termenv v0.16.0 // indirect github.com/nightlyone/lockfile v1.0.0 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect @@ -113,10 +121,9 @@ require ( github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect - github.com/skeema/knownhosts v1.2.2 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect - github.com/spf13/cast v1.7.1 // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect @@ -126,14 +133,13 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.33.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.23.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.sum b/go.sum index e0f89743..e1f6e2dd 100644 --- a/go.sum +++ b/go.sum @@ -40,13 +40,15 @@ github.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/ github.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE= github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4= github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= @@ -74,14 +76,19 @@ github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc= github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng= github.com/charmbracelet/glamour v0.7.0/go.mod h1:jUMh5MeihljJPQbJ/wf4ldw2+yBP59+ctV36jASy7ps= -github.com/charmbracelet/lipgloss v0.11.0 h1:UoAcbQ6Qml8hDwSWs0Y1cB5TEQuZkDPH/ZqwWWYTG4g= -github.com/charmbracelet/lipgloss v0.11.0/go.mod h1:1UdRTH9gYgpcdNN5oBtjbu/IzNKtzVtb7sqN1t9LNn8= -github.com/charmbracelet/x/ansi v0.1.1 h1:CGAduulr6egay/YVbGc8Hsu8deMg1xZ/bkaXTPi1JDk= -github.com/charmbracelet/x/ansi v0.1.1/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -92,35 +99,36 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danielgtaylor/huma/v2 v2.32.0 h1:ytU9ExG/axC434+soXxwNzv0uaxOb3cyCgjj8y3PmBE= github.com/danielgtaylor/huma/v2 v2.32.0/go.mod h1:9BxJwkeoPPDEJ2Bg4yPwL1mM1rYpAwCAWFKoo723spk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= -github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= +github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUyzJVPxD30I= github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/dop251/goja v0.0.0-20250531102226-cb187b08699c h1:In87uFQZsuGfjDDNfWnzMVY6JVTwc8XYMl6W2DAmNjk= +github.com/dop251/goja v0.0.0-20250531102226-cb187b08699c/go.mod h1:MxLav0peU43GgvwVgNbLAj1s/bSGboKkhuULvq/7hx4= github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy v1.2.1 h1:njjgvO6cRG9rIqN2ebkqy6cQz2Njkx7Fsfv/zIZqgug= +github.com/elazarl/goproxy v1.2.1/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -135,22 +143,24 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/getkin/kin-openapi v0.132.0 h1:3ISeLMsQzcb5v26yeJrBcdTCEQTag36ZjaGk7MIRUwk= github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= -github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= -github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.0 h1:w2hPNtoehvJIxR00Vb4xX94qHQi/ApZfX+nBE2Cjio8= +github.com/go-git/go-billy/v5 v5.6.0/go.mod h1:sFDq7xD3fn3E0GOwUSZqHo9lrkmx8xJhA0ZrfvjBRGM= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-git/v5 v5.13.0 h1:vLn5wlGIh/X78El6r3Jr+30W16Blk0CTcxTYcYPWi5E= +github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkvVkiXRR/zw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -178,12 +188,14 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -201,8 +213,8 @@ github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtI github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d h1:p5uqZufDIMQzAALblZFkr8fwbnZbFXbBCR1ZMAFylXk= github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= -github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= -github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= +github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 h1:xm5ZZFraWFwxyE7TBEncCXArubCDZTwG6s5bpMzqhSY= +github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0 h1:UXZRFAUPDWOgeTyjZd4M8YrEEgPc7XOfjgbm81w7x0w= github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0/go.mod h1:t2TyiEa6rhd4reOcorAMUmd5MledmZuTmYrO7rV3Iy8= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 h1:wQC8sKyeGA50WnCEG+Jo5FNRIkuX3HX8d3ubyWCCoI8= @@ -270,8 +282,6 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mark3labs/mcp-go v0.30.0 h1:Taz7fiefkxY/l8jz1nA90V+WdM2eoMtlvwfWforVYbo= -github.com/mark3labs/mcp-go v0.30.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -299,8 +309,10 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/nanobot-ai/nanobot v0.0.6-0.20250612211144-0a23cf13a10f h1:p/YUKTP0n5w/YByPm+UPPSpp5d9m/VJB0dbQnQ5naPo= +github.com/nanobot-ai/nanobot v0.0.6-0.20250612211144-0a23cf13a10f/go.mod h1:XAvQcMgztKKR8Ul7/i28MfepoyC72ZGwG3uzAIH9F6c= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= @@ -311,8 +323,8 @@ github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletI github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 h1:3bMMZ1f+GPXFQ1uNaYbO/uECWvSfqEA+ZEXn1rFAT88= github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= @@ -346,8 +358,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -361,18 +373,16 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= -github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -414,8 +424,6 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= @@ -434,8 +442,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= @@ -447,8 +453,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -489,11 +495,9 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= @@ -514,8 +518,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -548,28 +552,25 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -580,7 +581,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -664,6 +664,7 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/cli/main.go b/pkg/cli/main.go index b607281b..33048e0e 100644 --- a/pkg/cli/main.go +++ b/pkg/cli/main.go @@ -2,24 +2,36 @@ package cli import ( "context" + "fmt" "os" "os/signal" "github.com/gptscript-ai/cmd" "github.com/gptscript-ai/gptscript/pkg/daemon" "github.com/gptscript-ai/gptscript/pkg/mvl" + "github.com/nanobot-ai/nanobot/pkg/supervise" ) func Main() { - if len(os.Args) > 2 && os.Args[1] == "sys.daemon" { - if os.Getenv("GPTSCRIPT_DEBUG") == "true" { - mvl.SetDebug() + if len(os.Args) > 2 { + if os.Args[1] == "sys.daemon" { + if os.Getenv("GPTSCRIPT_DEBUG") == "true" { + mvl.SetDebug() + } + if err := daemon.SysDaemon(); err != nil { + log.Debugf("failed running daemon: %v", err) + } + os.Exit(0) } - if err := daemon.SysDaemon(); err != nil { - log.Debugf("failed running daemon: %v", err) + if os.Args[1] == "_exec" { + if err := supervise.Daemon(); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed running _exec: %v\n", err) + os.Exit(1) + } + os.Exit(0) } - os.Exit(0) } + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() cmd.MainCtx(ctx, New()) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index f92f9324..b7facbd1 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -19,6 +19,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/llm" "github.com/gptscript-ai/gptscript/pkg/loader" + "github.com/gptscript-ai/gptscript/pkg/mcp" "github.com/gptscript-ai/gptscript/pkg/monitor" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/openai" @@ -262,7 +263,7 @@ func (g *GPTScript) Run(ctx context.Context, prg types.Program, envs []string, i return g.Runner.Run(ctx, prg, envs, input, opts) } -func (g *GPTScript) Close(closeDaemons bool) { +func (g *GPTScript) Close(closeDaemonsAndMCP bool) { if g.DeleteWorkspaceOnClose && g.WorkspacePath != "" { if err := os.RemoveAll(g.WorkspacePath); err != nil { log.Errorf("failed to delete workspace %s: %s", g.WorkspacePath, err) @@ -271,8 +272,11 @@ func (g *GPTScript) Close(closeDaemons bool) { g.close() - if closeDaemons { + if closeDaemonsAndMCP { engine.CloseDaemons() + if err := mcp.DefaultLoader.Close(); err != nil { + log.Errorf("failed to close MCP loader: %s", err) + } } } diff --git a/pkg/mcp/client.go b/pkg/mcp/client.go index ff712ef9..73f4e0d9 100644 --- a/pkg/mcp/client.go +++ b/pkg/mcp/client.go @@ -1,106 +1,24 @@ package mcp import ( - "context" - - mcpclient "github.com/mark3labs/mcp-go/client" - "github.com/mark3labs/mcp-go/mcp" + nmcp "github.com/nanobot-ai/nanobot/pkg/mcp" ) -type Client interface { - mcpclient.MCPClient - Capabilities() mcp.ServerCapabilities -} - -func (l *Local) Client(server ServerConfig) (Client, error) { - session, err := l.loadSession(server, true) +func (l *Local) Client(server ServerConfig) (*Client, error) { + session, err := l.loadSession(server, "default") if err != nil { return nil, err } - return &client{session}, nil -} - -type client struct { - *Session -} - -func (c *client) Initialize(ctx context.Context, request mcp.InitializeRequest) (*mcp.InitializeResult, error) { - return c.Client.Initialize(ctx, request) -} - -func (c *client) Ping(ctx context.Context) error { - return c.Client.Ping(ctx) -} - -func (c *client) ListResourcesByPage(ctx context.Context, request mcp.ListResourcesRequest) (*mcp.ListResourcesResult, error) { - return c.Client.ListResourcesByPage(ctx, request) -} - -func (c *client) ListResources(ctx context.Context, request mcp.ListResourcesRequest) (*mcp.ListResourcesResult, error) { - return c.Client.ListResources(ctx, request) -} - -func (c *client) ListResourceTemplatesByPage(ctx context.Context, request mcp.ListResourceTemplatesRequest) (*mcp.ListResourceTemplatesResult, error) { - return c.Client.ListResourceTemplatesByPage(ctx, request) -} - -func (c *client) ListResourceTemplates(ctx context.Context, request mcp.ListResourceTemplatesRequest) (*mcp.ListResourceTemplatesResult, error) { - return c.Client.ListResourceTemplates(ctx, request) -} - -func (c *client) ReadResource(ctx context.Context, request mcp.ReadResourceRequest) (*mcp.ReadResourceResult, error) { - return c.Client.ReadResource(ctx, request) -} - -func (c *client) Subscribe(ctx context.Context, request mcp.SubscribeRequest) error { - return c.Client.Subscribe(ctx, request) -} - -func (c *client) Unsubscribe(ctx context.Context, request mcp.UnsubscribeRequest) error { - return c.Client.Unsubscribe(ctx, request) -} - -func (c *client) ListPromptsByPage(ctx context.Context, request mcp.ListPromptsRequest) (*mcp.ListPromptsResult, error) { - return c.Client.ListPromptsByPage(ctx, request) -} - -func (c *client) ListPrompts(ctx context.Context, request mcp.ListPromptsRequest) (*mcp.ListPromptsResult, error) { - return c.Client.ListPrompts(ctx, request) -} - -func (c *client) GetPrompt(ctx context.Context, request mcp.GetPromptRequest) (*mcp.GetPromptResult, error) { - return c.Client.GetPrompt(ctx, request) -} - -func (c *client) ListToolsByPage(ctx context.Context, request mcp.ListToolsRequest) (*mcp.ListToolsResult, error) { - return c.Client.ListToolsByPage(ctx, request) -} - -func (c *client) ListTools(ctx context.Context, request mcp.ListToolsRequest) (*mcp.ListToolsResult, error) { - return c.Client.ListTools(ctx, request) -} - -func (c *client) CallTool(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return c.Client.CallTool(ctx, request) -} - -func (c *client) SetLevel(ctx context.Context, request mcp.SetLevelRequest) error { - return c.Client.SetLevel(ctx, request) -} - -func (c *client) Complete(ctx context.Context, request mcp.CompleteRequest) (*mcp.CompleteResult, error) { - return c.Client.Complete(ctx, request) -} - -func (c *client) Close() error { - return c.Client.Close() + return &Client{ + Client: session.Client, + }, nil } -func (c *client) OnNotification(handler func(notification mcp.JSONRPCNotification)) { - c.Client.OnNotification(handler) +type Client struct { + *nmcp.Client } -func (c *client) Capabilities() mcp.ServerCapabilities { - return c.InitResult.Capabilities +func (c *Client) Capabilities() nmcp.ServerCapabilities { + return c.Session.InitializeResult.Capabilities } diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index c9f1b8a3..87e2d0bc 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -14,10 +14,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/types" - "github.com/gptscript-ai/gptscript/pkg/version" - mcpclient "github.com/mark3labs/mcp-go/client" - "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" + nmcp "github.com/nanobot-ai/nanobot/pkg/mcp" ) var ( @@ -35,10 +32,9 @@ type Local struct { } type Session struct { - ID string - InitResult *mcp.InitializeResult - Client mcpclient.MCPClient - Config ServerConfig + ID string + Client *nmcp.Client + Config ServerConfig } type Config struct { @@ -101,7 +97,7 @@ func (l *Local) Load(ctx context.Context, tool types.Tool) (result []types.Tool, } for server := range maps.Keys(servers.MCPServers) { - tools, err := l.LoadTools(ctx, servers.MCPServers[server], tool.Name) + tools, err := l.LoadTools(ctx, servers.MCPServers[server], server, tool.Name) if err != nil { return nil, fmt.Errorf("failed to load MCP session for server %s: %w", server, err) } @@ -113,12 +109,12 @@ func (l *Local) Load(ctx context.Context, tool types.Tool) (result []types.Tool, return nil, fmt.Errorf("no MCP server configuration found in tool instructions: %s", configData) } -func (l *Local) LoadTools(ctx context.Context, server ServerConfig, toolName string) ([]types.Tool, error) { +func (l *Local) LoadTools(ctx context.Context, server ServerConfig, serverName, toolName string) ([]types.Tool, error) { allowedTools := server.AllowedTools // Reset so we don't start a new MCP server, no reason to if one is already running and the allowed tools change. server.AllowedTools = nil - session, err := l.loadSession(server, true) + session, err := l.loadSession(server, serverName) if err != nil { return nil, err } @@ -145,11 +141,12 @@ func (l *Local) ShutdownServer(server ServerConfig) error { l.lock.Unlock() - if session == nil { - return nil + if session != nil && session.Client != nil { + session.Client.Session.Close() + session.Client.Session.Wait() } - return session.Client.Close() + return nil } func (l *Local) Close() error { @@ -172,9 +169,8 @@ func (l *Local) Close() error { var errs []error for id, session := range l.sessions { logger.Infof("closing MCP session %s", id) - if err := session.Client.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close MCP client %s: %w", id, err)) - } + session.Client.Session.Close() + session.Client.Session.Wait() } return errors.Join(errs...) @@ -183,7 +179,7 @@ func (l *Local) Close() error { func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName string, allowedTools []string) ([]types.Tool, error) { allToolsAllowed := allowedTools == nil || slices.Contains(allowedTools, "*") - tools, err := session.Client.ListTools(ctx, mcp.ListToolsRequest{}) + tools, err := session.Client.ListTools(ctx) if err != nil { return nil, fmt.Errorf("failed to list tools: %w", err) } @@ -227,13 +223,13 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s }, } - if string(annotations) != "{}" { + if string(annotations) != "{}" && string(annotations) != "null" { toolDef.MetaData = map[string]string{ "mcp-tool-annotations": string(annotations), } } - if tool.Annotations.Title != "" && !slices.Contains(strings.Fields(tool.Annotations.Title), "as") { + if tool.Annotations != nil && tool.Annotations.Title != "" && !slices.Contains(strings.Fields(tool.Annotations.Title), "as") { toolNames = append(toolNames, tool.Name+" as "+tool.Annotations.Title) } else { toolNames = append(toolNames, tool.Name) @@ -246,7 +242,7 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s ToolDef: types.ToolDef{ Parameters: types.Parameters{ Name: toolName, - Description: session.InitResult.ServerInfo.Name, + Description: session.Client.Session.InitializeResult.ServerInfo.Name, Export: toolNames, }, MetaData: map[string]string{ @@ -255,10 +251,10 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s }, } - if session.InitResult.Instructions != "" { + if session.Client.Session.InitializeResult.Instructions != "" { data, _ := json.Marshal(map[string]any{ "tools": toolNames, - "instructions": session.InitResult.Instructions, + "instructions": session.Client.Session.InitializeResult.Instructions, }) toolDefs = append(toolDefs, types.Tool{ ToolDef: types.ToolDef{ @@ -266,7 +262,7 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s Name: session.ID, Type: "context", }, - Instructions: types.EchoPrefix + "\n" + `# START MCP SERVER INFO: ` + session.InitResult.ServerInfo.Name + "\n" + + Instructions: types.EchoPrefix + "\n" + `# START MCP SERVER INFO: ` + session.Client.Session.InitializeResult.ServerInfo.Name + "\n" + `You have available the following tools from an MCP Server that has provided the following additional instructions` + "\n" + string(data) + "\n" + `# END MCP SERVER INFO` + "\n", @@ -280,91 +276,59 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s return toolDefs, nil } -func (l *Local) loadSession(server ServerConfig, tryHTTPStreaming bool) (*Session, error) { +func (l *Local) loadSession(server ServerConfig, serverName string) (*Session, error) { id := hash.Digest(server) l.lock.Lock() existing, ok := l.sessions[id] if l.sessionCtx == nil { l.sessionCtx, l.cancel = context.WithCancel(context.Background()) } - ctx := l.sessionCtx l.lock.Unlock() if ok { return existing, nil } - var ( - c *mcpclient.Client - err error - ) - if server.Command != "" { - c, err = mcpclient.NewStdioMCPClient(server.Command, server.Env, server.Args...) - if err != nil { - return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) - } - } else { - url := server.URL - if url == "" { - url = server.Server - } - - headers := make(map[string]string, len(server.Headers)) - for _, h := range server.Headers { - k, v, _ := strings.Cut(h, "=") - headers[k] = v - } - - if tryHTTPStreaming { - c, err = mcpclient.NewStreamableHttpClient(url, transport.WithHTTPHeaders(headers)) - } else { - c, err = mcpclient.NewSSEMCPClient(url, mcpclient.WithHeaders(headers)) - } - if err != nil { - return nil, fmt.Errorf("failed to create MCP HTTP client: %w", err) - } - - // We expect the client to outlive this one request. - if err = c.Start(ctx); err != nil { - return nil, fmt.Errorf("failed to start MCP client: %w", err) - } - } - - var initRequest mcp.InitializeRequest - initRequest.Params.ClientInfo = mcp.Implementation{ - Name: version.ProgramName, - Version: version.Get().String(), - } - - initResult, err := c.Initialize(ctx, initRequest) + c, err := nmcp.NewClient(l.sessionCtx, serverName, nmcp.Server{ + Unsandboxed: true, + Env: splitIntoMap(server.Env), + Command: server.Command, + Args: server.Args, + BaseURL: server.GetBaseURL(), + Headers: splitIntoMap(server.Headers), + }) if err != nil { - if server.Command == "" && tryHTTPStreaming { - // The MCP spec indicates that trying to initialize the client for HTTP streaming and checking for an error - // is the recommended way to determine if the server supports HTTP streaming, falling back to SEE. - // Ideally, we can check for a 400-level error, but our client implementation doesn't expose that information. - // Retrying on any error is harmless. - return l.loadSession(server, false) - } - return nil, fmt.Errorf("failed to initialize MCP client: %w", err) + return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) } result := &Session{ - ID: id, - InitResult: initResult, - Client: c, - Config: server, + ID: id, + Client: c, + Config: server, } l.lock.Lock() defer l.lock.Unlock() if existing, ok = l.sessions[id]; ok { - return existing, c.Close() + c.Session.Close() + return existing, nil } if l.sessions == nil { - l.sessions = make(map[string]*Session) + l.sessions = make(map[string]*Session, 1) } l.sessions[id] = result return result, nil } + +func splitIntoMap(list []string) map[string]string { + result := make(map[string]string, len(list)) + for _, s := range list { + k, v, ok := strings.Cut(s, "=") + if ok { + result[k] = v + } + } + return result +} diff --git a/pkg/mcp/runner.go b/pkg/mcp/runner.go index 1a275a0c..37032392 100644 --- a/pkg/mcp/runner.go +++ b/pkg/mcp/runner.go @@ -7,7 +7,6 @@ import ( "github.com/gptscript-ai/gptscript/pkg/engine" "github.com/gptscript-ai/gptscript/pkg/types" - "github.com/mark3labs/mcp-go/mcp" ) func (l *Local) Run(ctx engine.Context, _ chan<- types.CompletionStatus, tool types.Tool, input string) (string, error) { @@ -37,11 +36,7 @@ func (l *Local) Run(ctx engine.Context, _ chan<- types.CompletionStatus, tool ty return "", fmt.Errorf("session not found for MCP server %s", id) } - request := mcp.CallToolRequest{} - request.Params.Name = toolName - request.Params.Arguments = arguments - - result, err := session.Client.CallTool(ctx.Ctx, request) + result, err := session.Client.Call(ctx.Ctx, toolName, arguments) if err != nil { if ctx.ToolCategory == engine.NoCategory && ctx.Parent != nil { var output []byte diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 52098cf1..80131245 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -212,7 +212,7 @@ func TestMCPLoad(t *testing.T) { } r := tester.NewRunner(t) - prg, err := loader.ProgramFromSource(context.Background(), ` + prg, err := loader.ProgramFromSource(t.Context(), ` name: mcp #!mcp diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index ce3cebe6..bb1193ea 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "io" "os" + "os/exec" "runtime" "testing" @@ -27,6 +28,17 @@ func toJSONString(t *testing.T, v interface{}) string { return string(x) } +func TestMain(m *testing.M) { + cmd := exec.CommandContext(context.Background(), "go", "build", "-o", "bin/gptscript", "../../main.go") + if err := cmd.Run(); err != nil { + panic(err) + } + + os.Setenv("NANOBOT_BIN", "bin/gptscript") + defer os.Unsetenv("NANOBOT_BIN") + m.Run() +} + func TestAsterick(t *testing.T) { r := tester.NewRunner(t) p, err := r.Load("") diff --git a/pkg/tests/testdata/TestMCPLoad/step1.golden b/pkg/tests/testdata/TestMCPLoad/step1.golden index ae20c8ed..c5961afa 100644 --- a/pkg/tests/testdata/TestMCPLoad/step1.golden +++ b/pkg/tests/testdata/TestMCPLoad/step1.golden @@ -1,6 +1,6 @@ `{ "done": true, - "content": "{\"content\":[{\"type\":\"text\",\"text\":\"[{'1': 1}]\"}]}", + "content": "{\"isError\":false,\"content\":[{\"type\":\"text\",\"text\":\"[{'1': 1}]\"}]}", "toolID": "", "state": null }` From 118e3aece6577669f069f6a4faf41fe4ac90fd25 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 11 Jun 2025 21:46:43 -0400 Subject: [PATCH 252/270] chore: use npm for init-docs and bump vulnerable dependencies Signed-off-by: Donnie Adams --- Makefile | 2 +- docs/package-lock.json | 1235 ++++++++++++++++++++-------------------- 2 files changed, 610 insertions(+), 627 deletions(-) diff --git a/Makefile b/Makefile index b2e8482a..e9f079f6 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,7 @@ serve-docs: # This will initialize the node_modules needed to run the docs dev server. Run this before running serve-docs init-docs: - docker run --rm --workdir=/docs -v $${PWD}/docs:/docs node:18-buster yarn install + docker run --rm --workdir=/docs -v $${PWD}/docs:/docs node:18-buster npm install # Ensure docs build without errors. Makes sure generated docs are in-sync with CLI. validate-docs: gen-docs diff --git a/docs/package-lock.json b/docs/package-lock.json index 9bbb14b2..43b226ef 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -198,81 +198,19 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", - "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", "dependencies": { - "@babel/highlight": "^7.23.4", - "chalk": "^2.4.2" + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/code-frame/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/code-frame/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/code-frame/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/compat-data": { "version": "7.23.5", "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", @@ -602,17 +540,19 @@ } }, "node_modules/@babel/helper-string-parser": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", - "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -639,99 +579,26 @@ } }, "node_modules/@babel/helpers": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", - "integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==", - "dependencies": { - "@babel/template": "^7.23.9", - "@babel/traverse": "^7.23.9", - "@babel/types": "^7.23.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", - "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.6.tgz", + "integrity": "sha512-muE8Tt8M22638HU31A3CgfSUciwz1fhATfoVai05aPXGor//CdWDCbnlY1yvBPo07njuVOCNGCSp/GTt12lIug==", + "license": "MIT", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.20", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0" + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.6" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/@babel/parser": { + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.5.tgz", + "integrity": "sha512-OsQd175SxWkGlzbny8J3K8TnnDD0N3lrIUtB92xwyRpzaenGZhxDvxN/JgU00U3CDZNj9tPuDJ5H0WS4Nt3vKg==", + "license": "MIT", "dependencies": { - "has-flag": "^3.0.0" + "@babel/types": "^7.27.3" }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/parser": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", - "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", "bin": { "parser": "bin/babel-parser.js" }, @@ -2040,36 +1907,35 @@ "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" }, "node_modules/@babel/runtime": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.9.tgz", - "integrity": "sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz", + "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/runtime-corejs3": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.9.tgz", - "integrity": "sha512-oeOFTrYWdWXCvXGB5orvMTJ6gCZ9I6FBjR+M38iKNXCsPxr4xT0RTdg5uz1H7QP8pp74IzPtwritEr+JscqHXQ==", + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.27.6.tgz", + "integrity": "sha512-vDVrlmRAY8z9Ul/HxT+8ceAru95LQgkSKiXkSYZvqtbkPSfhZJgpRp45Cldbh1GJ1kxzQkI70AqyrTI58KpaWQ==", + "license": "MIT", "dependencies": { - "core-js-pure": "^3.30.2", - "regenerator-runtime": "^0.14.0" + "core-js-pure": "^3.30.2" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/template": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", - "integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.23.5", - "@babel/parser": "^7.23.9", - "@babel/types": "^7.23.9" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -2096,13 +1962,13 @@ } }, "node_modules/@babel/types": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", - "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.6.tgz", + "integrity": "sha512-ETyHEk2VHHvl9b9jZP5IHPavHYk57EhanlRRuae9XCpb/j5bDCbPPMOBfCWhnl/7EDJz0jEMCi/RhccCE8r1+Q==", + "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.23.4", - "@babel/helper-validator-identifier": "^7.22.20", - "to-fast-properties": "^2.0.0" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -2834,9 +2700,10 @@ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", - "integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -3359,9 +3226,10 @@ } }, "node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" }, "node_modules/@types/estree-jsx": { "version": "1.0.4", @@ -3648,145 +3516,162 @@ "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" }, "node_modules/@webassemblyjs/ast": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", - "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "license": "MIT", "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" } }, "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", - "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", - "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", - "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==" + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", - "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "license": "MIT", "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", - "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", - "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" } }, "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", - "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "license": "MIT", "dependencies": { "@xtuc/ieee754": "^1.2.0" } }, "node_modules/@webassemblyjs/leb128": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", - "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "license": "Apache-2.0", "dependencies": { "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/utf8": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", - "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", + "license": "MIT" }, "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", - "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/helper-wasm-section": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-opt": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6", - "@webassemblyjs/wast-printer": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", - "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", - "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", - "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", - "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/ast": "1.14.1", "@xtuc/long": "4.2.2" } }, "node_modules/@xtuc/ieee754": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "license": "BSD-3-Clause" }, "node_modules/@xtuc/long": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "license": "Apache-2.0" }, "node_modules/accepts": { "version": "1.3.8", @@ -3820,9 +3705,10 @@ } }, "node_modules/acorn": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", - "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -3830,14 +3716,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-assertions": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", - "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -4198,20 +4076,21 @@ } }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", - "raw-body": "2.5.1", + "qs": "6.13.0", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -4224,6 +4103,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -4232,6 +4112,7 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } @@ -4239,7 +4120,8 @@ "node_modules/body-parser/node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" }, "node_modules/bonjour-service": { "version": "1.2.1", @@ -4277,29 +4159,31 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "version": "4.25.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.0.tgz", + "integrity": "sha512-PJ8gYKeS5e/whHBh8xrwYK+dAvEj7JXtz6uTucnMRB8OiGTsKccFekoRrjajPBHV8oOY+2tI4uxeceSimKwMFA==", "funding": [ { "type": "opencollective", @@ -4314,11 +4198,12 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "caniuse-lite": "^1.0.30001718", + "electron-to-chromium": "^1.5.160", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.3" }, "bin": { "browserslist": "cli.js" @@ -4394,6 +4279,35 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -4434,9 +4348,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001589", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001589.tgz", - "integrity": "sha512-vNQWS6kI+q6sBlHbh71IIeC+sRwK2N3EDySc/updIGhIee2x5z00J4c1242/5/d6EpEMdOnk/m+6tuk4/tcsqg==", + "version": "1.0.30001722", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001722.tgz", + "integrity": "sha512-DCQHBBZtiK6JVkAGw7drvAMK0Q0POD/xZvEmDp6baiMMP6QXXk9HpD6mNYBZWhOPG6LvIDb82ITqtWjhDckHCA==", "funding": [ { "type": "opencollective", @@ -4450,7 +4364,8 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ] + ], + "license": "CC-BY-4.0" }, "node_modules/ccount": { "version": "2.0.1", @@ -4736,7 +4651,8 @@ "node_modules/colorette": { "version": "2.0.20", "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "license": "MIT" }, "node_modules/combine-promises": { "version": "1.2.0", @@ -4879,6 +4795,7 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -4889,9 +4806,10 @@ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -5028,9 +4946,10 @@ } }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -5484,6 +5403,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -5500,6 +5420,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", "engines": { "node": ">= 0.8", "npm": "1.2.8000 || >= 1.4.16" @@ -5676,6 +5597,20 @@ "node": ">=8" } }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/duplexer": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", @@ -5689,12 +5624,14 @@ "node_modules/ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.4.679", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.679.tgz", - "integrity": "sha512-NhQMsz5k0d6m9z3qAxnsOR/ebal4NAGsrNVRwcDo4Kc/zQ7KdsTKZUxZoygHcVRb0QDW3waEDIcE3isZ79RP6g==" + "version": "1.5.166", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.166.tgz", + "integrity": "sha512-QPWqHL0BglzPYyJJ1zSSmwFFL6MFXhbACOCcsCdUMCkzPdS9/OIBVxg516X/Ado2qwAq8k0nJJ7phQPCqiaFAw==", + "license": "ISC" }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -5724,17 +5661,19 @@ } }, "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/enhanced-resolve": { - "version": "5.15.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz", - "integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==", + "version": "5.18.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz", + "integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==", + "license": "MIT", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -5763,12 +5702,10 @@ } }, "node_modules/es-define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", - "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", - "dependencies": { - "get-intrinsic": "^1.2.4" - }, + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", "engines": { "node": ">= 0.4" } @@ -5786,10 +5723,23 @@ "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.4.1.tgz", "integrity": "sha512-cXLGjP0c4T3flZJKQSuziYoq7MlT+rnvfZjfp7h+I7K9BNX54kP9nyWvdbwjQ4u1iWbOL4u96fgeZLToQlZC7w==" }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", "engines": { "node": ">=6" } @@ -5923,15 +5873,12 @@ } }, "node_modules/estree-util-value-to-estree": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.0.1.tgz", - "integrity": "sha512-b2tdzTurEIbwRh+mKrEcaWfu1wgb8J1hVsgREg7FFiecWwK/PhO8X0kyc+0bIcKNtD4sqxIdNoRy6/p/TvECEA==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.4.0.tgz", + "integrity": "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==", + "license": "MIT", "dependencies": { - "@types/estree": "^1.0.0", - "is-plain-obj": "^4.0.0" - }, - "engines": { - "node": ">=16.0.0" + "@types/estree": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/remcohaszing" @@ -5981,6 +5928,7 @@ "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -6033,36 +5981,37 @@ } }, "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.7.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.12", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -6071,6 +6020,10 @@ }, "engines": { "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/express/node_modules/content-disposition": { @@ -6098,9 +6051,10 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" }, "node_modules/express/node_modules/range-parser": { "version": "1.2.1", @@ -6151,14 +6105,6 @@ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" }, - "node_modules/fast-url-parser": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", - "dependencies": { - "punycode": "^1.3.2" - } - }, "node_modules/fastq": { "version": "1.17.1", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", @@ -6274,9 +6220,10 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6285,12 +6232,13 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -6305,6 +6253,7 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } @@ -6312,7 +6261,8 @@ "node_modules/finalhandler/node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" }, "node_modules/find-cache-dir": { "version": "4.0.0", @@ -6353,15 +6303,16 @@ } }, "node_modules/follow-redirects": { - "version": "1.15.5", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", - "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", "funding": [ { "type": "individual", "url": "https://github.com/sponsors/RubenVerborgh" } ], + "license": "MIT", "engines": { "node": ">=4.0" }, @@ -6531,6 +6482,7 @@ "version": "0.5.2", "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -6588,15 +6540,21 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", - "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "hasown": "^2.0.0" + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -6610,6 +6568,19 @@ "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/get-stream": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", @@ -6659,7 +6630,8 @@ "node_modules/glob-to-regexp": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "license": "BSD-2-Clause" }, "node_modules/global-dirs": { "version": "3.0.1", @@ -6746,11 +6718,12 @@ } }, "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "dependencies": { - "get-intrinsic": "^1.1.3" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -6868,21 +6841,11 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/has-proto": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", - "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -6902,9 +6865,10 @@ } }, "node_modules/hasown": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz", - "integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", "dependencies": { "function-bind": "^1.1.2" }, @@ -7310,6 +7274,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", @@ -7340,9 +7305,10 @@ } }, "node_modules/http-proxy-middleware": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", - "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz", + "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==", + "license": "MIT", "dependencies": { "@types/http-proxy": "^1.17.8", "http-proxy": "^1.18.1", @@ -7397,6 +7363,7 @@ "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -7424,9 +7391,10 @@ } }, "node_modules/image-size": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", - "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.2.1.tgz", + "integrity": "sha512-rH+46sQJ2dlwfjfhCyNx5thzrv+dtmBIhPHk0zgRUukHzZ/kRueTJXoYYsclBaKcSMBWuGbOFXtioLpzTb5euw==", + "license": "MIT", "dependencies": { "queue": "6.0.2" }, @@ -7698,6 +7666,7 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", "engines": { "node": ">=0.12.0" } @@ -8140,6 +8109,15 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/mdast-util-directive": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", @@ -8527,6 +8505,7 @@ "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -8543,9 +8522,13 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/merge-stream": { "version": "2.0.0", @@ -10240,11 +10223,12 @@ ] }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { @@ -10255,6 +10239,7 @@ "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", "bin": { "mime": "cli.js" }, @@ -10369,15 +10354,16 @@ } }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -10430,9 +10416,10 @@ } }, "node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "license": "MIT" }, "node_modules/normalize-path": { "version": "3.0.0", @@ -10497,9 +10484,13 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -10538,6 +10529,7 @@ "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", "dependencies": { "ee-first": "1.1.1" }, @@ -10827,9 +10819,10 @@ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "node_modules/path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz", + "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==", + "license": "MIT", "dependencies": { "isarray": "0.0.1" } @@ -10853,9 +10846,10 @@ } }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", @@ -11570,9 +11564,10 @@ } }, "node_modules/prismjs": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", "engines": { "node": ">=6" } @@ -11638,11 +11633,6 @@ "node": ">= 0.10" } }, - "node_modules/punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" - }, "node_modules/pupa": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", @@ -11658,11 +11648,12 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -11726,9 +11717,10 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -11743,6 +11735,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -12095,11 +12088,6 @@ "node": ">=4" } }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" - }, "node_modules/regenerator-transform": { "version": "0.15.2", "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", @@ -12569,7 +12557,8 @@ "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" }, "node_modules/sax": { "version": "1.3.0", @@ -12585,9 +12574,10 @@ } }, "node_modules/schema-utils": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", - "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.2.tgz", + "integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.9.0", @@ -12595,7 +12585,7 @@ "ajv-keywords": "^5.1.0" }, "engines": { - "node": ">= 12.13.0" + "node": ">= 10.13.0" }, "funding": { "type": "opencollective", @@ -12682,9 +12672,10 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -12708,6 +12699,7 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } @@ -12715,17 +12707,29 @@ "node_modules/send/node_modules/debug/node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" }, "node_modules/send/node_modules/range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -12739,24 +12743,25 @@ } }, "node_modules/serve-handler": { - "version": "6.1.5", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", - "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "version": "6.1.6", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.6.tgz", + "integrity": "sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ==", + "license": "MIT", "dependencies": { "bytes": "3.0.0", "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", "mime-types": "2.1.18", "minimatch": "3.1.2", "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", + "path-to-regexp": "3.3.0", "range-parser": "1.2.0" } }, "node_modules/serve-handler/node_modules/path-to-regexp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", + "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", + "license": "MIT" }, "node_modules/serve-index": { "version": "1.9.1", @@ -12829,14 +12834,15 @@ } }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" @@ -12861,7 +12867,8 @@ "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" }, "node_modules/shallow-clone": { "version": "3.0.1", @@ -12923,14 +12930,69 @@ } }, "node_modules/side-channel": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.5.tgz", - "integrity": "sha512-QcgiIWV4WV7qWExbN5llt6frQB/lBven9pqliLXfGPB+K9ZYXxDozp0wLkHS24kWCm+6YXH/f0HhnObZnZOBnQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", "dependencies": { - "call-bind": "^1.0.6", "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" }, "engines": { "node": ">= 0.4" @@ -13118,6 +13180,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -13390,12 +13453,13 @@ } }, "node_modules/terser": { - "version": "5.27.2", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.27.2.tgz", - "integrity": "sha512-sHXmLSkImesJ4p5apTeT63DsV4Obe1s37qT8qvwHRmVxKTBH7Rv9Wr26VcAMmLbmk9UliiwK8z+657NyJHHy/w==", + "version": "5.42.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.42.0.tgz", + "integrity": "sha512-UYCvU9YQW2f/Vwl+P0GfhxJxbUGLwd+5QrrGgLajzWAtC/23AX0vcise32kkP7Eu0Wu9VlzzHAXkLObgjQfFlQ==", + "license": "BSD-2-Clause", "dependencies": { "@jridgewell/source-map": "^0.3.3", - "acorn": "^8.8.2", + "acorn": "^8.14.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, @@ -13407,15 +13471,16 @@ } }, "node_modules/terser-webpack-plugin": { - "version": "5.3.10", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", - "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "version": "5.3.14", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz", + "integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==", + "license": "MIT", "dependencies": { - "@jridgewell/trace-mapping": "^0.3.20", + "@jridgewell/trace-mapping": "^0.3.25", "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.1", - "terser": "^5.26.0" + "schema-utils": "^4.3.0", + "serialize-javascript": "^6.0.2", + "terser": "^5.31.1" }, "engines": { "node": ">= 10.13.0" @@ -13439,29 +13504,6 @@ } } }, - "node_modules/terser-webpack-plugin/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, "node_modules/terser-webpack-plugin/node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", @@ -13475,28 +13517,6 @@ "node": ">= 10.13.0" } }, - "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, "node_modules/terser-webpack-plugin/node_modules/supports-color": { "version": "8.1.1", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", @@ -13536,18 +13556,11 @@ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "engines": { - "node": ">=4" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -13559,6 +13572,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", "engines": { "node": ">=0.6" } @@ -13609,6 +13623,7 @@ "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -13621,6 +13636,7 @@ "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -13629,6 +13645,7 @@ "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -13838,14 +13855,15 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", "funding": [ { "type": "opencollective", @@ -13860,9 +13878,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.2.0", + "picocolors": "^1.1.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -14135,9 +14154,10 @@ } }, "node_modules/watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.4.tgz", + "integrity": "sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==", + "license": "MIT", "dependencies": { "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.1.2" @@ -14164,33 +14184,34 @@ } }, "node_modules/webpack": { - "version": "5.90.3", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.90.3.tgz", - "integrity": "sha512-h6uDYlWCctQRuXBs1oYpVe6sFcWedl0dpcVaTf/YF67J9bKvwJajFulMVSYKHrksMB3I/pIagRzDxwxkebuzKA==", - "dependencies": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^1.0.5", - "@webassemblyjs/ast": "^1.11.5", - "@webassemblyjs/wasm-edit": "^1.11.5", - "@webassemblyjs/wasm-parser": "^1.11.5", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.9.0", - "browserslist": "^4.21.10", + "version": "5.99.9", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.99.9.tgz", + "integrity": "sha512-brOPwM3JnmOa+7kd3NsmOUOwbDAj8FT9xDsG3IW0MgbN9yZV7Oi/s/+MNQ/EcSMqw7qfoRyXPoeEWT8zLVdVGg==", + "license": "MIT", + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.15.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", + "graceful-fs": "^4.2.11", "json-parse-even-better-errors": "^2.3.1", "loader-runner": "^4.2.0", "mime-types": "^2.1.27", "neo-async": "^2.6.2", - "schema-utils": "^3.2.0", + "schema-utils": "^4.3.2", "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.10", - "watchpack": "^2.4.0", + "terser-webpack-plugin": "^5.3.11", + "watchpack": "^2.4.1", "webpack-sources": "^3.2.3" }, "bin": { @@ -14244,9 +14265,10 @@ } }, "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "license": "MIT", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -14269,6 +14291,7 @@ "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -14277,6 +14300,7 @@ "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -14288,14 +14312,16 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/webpack-dev-server": { - "version": "4.15.1", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz", - "integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==", + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "license": "MIT", "dependencies": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", @@ -14325,7 +14351,7 @@ "serve-index": "^1.9.1", "sockjs": "^0.3.24", "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.1", + "webpack-dev-middleware": "^5.3.4", "ws": "^8.13.0" }, "bin": { @@ -14351,9 +14377,10 @@ } }, "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.18.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz", + "integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==", + "license": "MIT", "engines": { "node": ">=10.0.0" }, @@ -14391,34 +14418,6 @@ "node": ">=10.13.0" } }, - "node_modules/webpack/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/webpack/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, "node_modules/webpack/node_modules/mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", @@ -14438,23 +14437,6 @@ "node": ">= 0.6" } }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, "node_modules/webpackbar": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", @@ -14595,9 +14577,10 @@ } }, "node_modules/ws": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", - "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "license": "MIT", "engines": { "node": ">=8.3.0" }, From 6e16e631cf6241c8a5d257fadd20f07f3fa3f96d Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 13 Jun 2025 15:44:27 -0400 Subject: [PATCH 253/270] enhance: provide contexts to credential helpers when listing credentials (#979) Signed-off-by: Grant Linville --- Makefile | 2 +- pkg/credentials/store.go | 5 +- pkg/credentials/toolstore.go | 64 ++++++++++++++- pkg/credentials/toolstore_test.go | 130 ++++++++++++++++++++++++++++++ 4 files changed, 195 insertions(+), 6 deletions(-) create mode 100644 pkg/credentials/toolstore_test.go diff --git a/Makefile b/Makefile index e9f079f6..284c94c9 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,7 @@ init-docs: # Ensure docs build without errors. Makes sure generated docs are in-sync with CLI. validate-docs: gen-docs - docker run --rm --workdir=/docs -v $${PWD}/docs:/docs node:18-buster yarn build + docker run --rm --workdir=/docs -v $${PWD}/docs:/docs node:18-buster npm run build if [ -n "$$(git status --porcelain --untracked-files=no)" ]; then \ git status --porcelain --untracked-files=no; \ echo "Encountered dirty repo!"; \ diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index def6ff89..6e5d24ca 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -269,8 +269,9 @@ func (s *Store) recreateCredential(store credentials.Store, serverAddress string func (s *Store) getStore() (credentials.Store, error) { if s.program != nil { return &toolCredentialStore{ - file: credentials.NewFileStore(s.cfg), - program: s.program, + file: credentials.NewFileStore(s.cfg), + program: s.program, + contexts: s.credCtxs, }, nil } return credentials.NewFileStore(s.cfg), nil diff --git a/pkg/credentials/toolstore.go b/pkg/credentials/toolstore.go index 536e2aa1..66d4d38d 100644 --- a/pkg/credentials/toolstore.go +++ b/pkg/credentials/toolstore.go @@ -1,7 +1,10 @@ package credentials import ( + "bytes" + "encoding/json" "errors" + "fmt" "net/url" "regexp" "strings" @@ -13,8 +16,9 @@ import ( ) type toolCredentialStore struct { - file credentials.Store - program client.ProgramFunc + file credentials.Store + program client.ProgramFunc + contexts []string } func (h *toolCredentialStore) Erase(serverAddress string) error { @@ -42,8 +46,21 @@ func (h *toolCredentialStore) Get(serverAddress string) (types.AuthConfig, error }, nil } +// GetAll will list all credentials in the credential store. +// It MAY (but is not required to) filter the credentials based on the contexts provided. +// This is only supported by some credential stores, while others will ignore it and return all credentials. +// The caller of this function is still required to filter the output to only include the contexts requested. func (h *toolCredentialStore) GetAll() (map[string]types.AuthConfig, error) { - serverAddresses, err := client.List(h.program) + var ( + serverAddresses map[string]string + err error + ) + if len(h.contexts) == 0 { + serverAddresses, err = client.List(h.program) + } else { + serverAddresses, err = listWithContexts(h.program, h.contexts) + } + if err != nil { return nil, err } @@ -94,3 +111,44 @@ func (h *toolCredentialStore) Store(authConfig types.AuthConfig) error { Secret: authConfig.Password, }) } + +// listWithContexts is almost an exact copy of the List function in Docker's libraries, +// the only difference being that we pass the context through as input to the program. +// This will allow some credential stores, like Postgres, to do an optimized list. +func listWithContexts(program client.ProgramFunc, contexts []string) (map[string]string, error) { + cmd := program(credentials2.ActionList) + + contextsJSON, err := json.Marshal(contexts) + if err != nil { + return nil, err + } + + cmd.Input(bytes.NewReader(contextsJSON)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} + +func isValidCredsMessage(msg string) error { + if credentials2.IsCredentialsMissingServerURLMessage(msg) { + return credentials2.NewErrCredentialsMissingServerURL() + } + if credentials2.IsCredentialsMissingUsernameMessage(msg) { + return credentials2.NewErrCredentialsMissingUsername() + } + return nil +} diff --git a/pkg/credentials/toolstore_test.go b/pkg/credentials/toolstore_test.go new file mode 100644 index 00000000..fdbae25c --- /dev/null +++ b/pkg/credentials/toolstore_test.go @@ -0,0 +1,130 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "io" + "testing" + + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +type mockProgram struct { + // mode is either "db" or "normal" + // db mode will honor contexts, normal mode will not + mode string + action string + contexts []string +} + +func (m *mockProgram) Input(in io.Reader) { + switch m.action { + case credentials.ActionList: + var contexts []string + if err := json.NewDecoder(in).Decode(&contexts); err == nil && len(contexts) > 0 { + m.contexts = contexts + } + } + // TODO: add other cases here as needed +} + +func (m *mockProgram) Output() ([]byte, error) { + switch m.action { + case credentials.ActionList: + switch m.mode { + case "db": + // Return only credentials that are in the list of contexts. + creds := make(map[string]string) + for _, context := range m.contexts { + creds[fmt.Sprintf("https://example///%s", context)] = "username" + } + return json.Marshal(creds) + case "normal": + // Return credentials in the list of contexts, plus some made up extras. + creds := make(map[string]string) + for _, context := range m.contexts { + creds[fmt.Sprintf("https://example///%s", context)] = "username" + } + creds[fmt.Sprintf("https://example///%s", "otherContext1")] = "username" + creds[fmt.Sprintf("https://example///%s", "otherContext2")] = "username" + return json.Marshal(creds) + } + } + return nil, nil +} + +func newMockProgram(t *testing.T, mode string) client.ProgramFunc { + t.Helper() + return func(args ...string) client.Program { + p := &mockProgram{ + mode: mode, + } + if len(args) > 0 { + p.action = args[0] + } + return p + } +} + +func TestGetAll(t *testing.T) { + dbProgram := newMockProgram(t, "db") + normalProgram := newMockProgram(t, "normal") + + tests := []struct { + name string + program client.ProgramFunc + wantErr bool + contexts []string + expected map[string]types.AuthConfig + }{ + {name: "db", program: dbProgram, wantErr: false, contexts: []string{"credctx"}, expected: map[string]types.AuthConfig{ + "https://example///credctx": { + Username: "username", + ServerAddress: "https://example///credctx", + }, + }}, + {name: "normal", program: normalProgram, wantErr: false, contexts: []string{"credctx"}, expected: map[string]types.AuthConfig{ + "https://example///credctx": { + Username: "username", + ServerAddress: "https://example///credctx", + }, + "https://example///otherContext1": { + Username: "username", + ServerAddress: "https://example///otherContext1", + }, + "https://example///otherContext2": { + Username: "username", + ServerAddress: "https://example///otherContext2", + }, + }}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + store := &toolCredentialStore{ + program: test.program, + contexts: test.contexts, + } + got, err := store.GetAll() + if (err != nil) != test.wantErr { + t.Errorf("GetAll() error = %v, wantErr %v", err, test.wantErr) + } + if len(got) != len(test.expected) { + t.Errorf("GetAll() got %d credentials, want %d", len(got), len(test.expected)) + } + for name, cred := range got { + if _, ok := test.expected[name]; !ok { + t.Errorf("GetAll() got unexpected credential: %s", name) + } + if got[name].Username != test.expected[name].Username { + t.Errorf("GetAll() got unexpected username for %s", cred.ServerAddress) + } + if got[name].Username != test.expected[name].Username { + t.Errorf("GetAll() got unexpected username for %s", name) + } + } + }) + } +} From 70db87d90468546495a717bc65f49e70a5bcd0e8 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 13 Jun 2025 21:45:19 -0400 Subject: [PATCH 254/270] chore: bump nanobot to pickup HTTP streamable fix (#982) Signed-off-by: Donnie Adams --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 72324362..1956ae29 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 github.com/mholt/archives v0.1.0 - github.com/nanobot-ai/nanobot v0.0.6-0.20250612211144-0a23cf13a10f + github.com/nanobot-ai/nanobot v0.0.6-0.20250614013307-b0dcecdd9510 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 github.com/rs/cors v1.11.0 diff --git a/go.sum b/go.sum index e1f6e2dd..9ce21ece 100644 --- a/go.sum +++ b/go.sum @@ -311,8 +311,8 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/nanobot-ai/nanobot v0.0.6-0.20250612211144-0a23cf13a10f h1:p/YUKTP0n5w/YByPm+UPPSpp5d9m/VJB0dbQnQ5naPo= -github.com/nanobot-ai/nanobot v0.0.6-0.20250612211144-0a23cf13a10f/go.mod h1:XAvQcMgztKKR8Ul7/i28MfepoyC72ZGwG3uzAIH9F6c= +github.com/nanobot-ai/nanobot v0.0.6-0.20250614013307-b0dcecdd9510 h1:kBJ38jH3Fhm4BOxAE5nwwOwnjFEzxTnPMsskf2NyCbw= +github.com/nanobot-ai/nanobot v0.0.6-0.20250614013307-b0dcecdd9510/go.mod h1:XAvQcMgztKKR8Ul7/i28MfepoyC72ZGwG3uzAIH9F6c= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= From a1f3754d4147840959f78d1f2f25164ab37865a4 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 17 Jun 2025 09:23:53 -0400 Subject: [PATCH 255/270] chore: replace huma with a fork that properly unmarshal the Schema type (#983) Signed-off-by: Donnie Adams --- go.mod | 16 +++++++++------- go.sum | 31 ++++++++++++++++--------------- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 1956ae29..c32461f5 100644 --- a/go.mod +++ b/go.mod @@ -4,12 +4,14 @@ go 1.24.2 toolchain go1.24.4 +replace github.com/danielgtaylor/huma/v2 => github.com/gptscript-ai/huma v0.0.0-20250617131016-b2081da6c65b + require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 github.com/adrg/xdg v0.4.0 github.com/chzyer/readline v1.5.1 - github.com/danielgtaylor/huma/v2 v2.32.0 + github.com/danielgtaylor/huma/v2 v2.32.1-0.20250509235652-c7ead6f3c67f github.com/docker/cli v26.0.0+incompatible github.com/docker/docker-credential-helpers v0.8.1 github.com/fatih/color v1.17.0 @@ -20,7 +22,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 - github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0 + github.com/gptscript-ai/go-gptscript v0.9.6-0.20250617131750-9129819aea51 github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 @@ -95,12 +97,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect @@ -136,11 +138,11 @@ require ( github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.38.0 // indirect golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.33.0 // indirect + golang.org/x/net v0.40.0 // indirect golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/text v0.25.0 // indirect golang.org/x/tools v0.23.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect mvdan.cc/gofumpt v0.6.0 // indirect diff --git a/go.sum b/go.sum index 9ce21ece..4699026a 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,6 @@ github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/danielgtaylor/huma/v2 v2.32.0 h1:ytU9ExG/axC434+soXxwNzv0uaxOb3cyCgjj8y3PmBE= -github.com/danielgtaylor/huma/v2 v2.32.0/go.mod h1:9BxJwkeoPPDEJ2Bg4yPwL1mM1rYpAwCAWFKoo723spk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -215,8 +213,10 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 h1:xm5ZZFraWFwxyE7TBEncCXArubCDZTwG6s5bpMzqhSY= github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0 h1:UXZRFAUPDWOgeTyjZd4M8YrEEgPc7XOfjgbm81w7x0w= -github.com/gptscript-ai/go-gptscript v0.9.6-0.20250520154649-f1616a06f1b0/go.mod h1:t2TyiEa6rhd4reOcorAMUmd5MledmZuTmYrO7rV3Iy8= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250617131750-9129819aea51 h1:9s53UDNVXF+ujMwhg/7LiZlIMYOpn2Ap8WBc1i4Pi/Y= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250617131750-9129819aea51/go.mod h1:LQ4E2g+t+L/it13Le5m9Hfgn4huS8bO4hcTawFlUzSY= +github.com/gptscript-ai/huma v0.0.0-20250617131016-b2081da6c65b h1:QReUetqY+ep2sj6g83oqldPHzwH2T2TG1sv0IWE2hL0= +github.com/gptscript-ai/huma v0.0.0-20250617131016-b2081da6c65b/go.mod h1:y2Eq35Y5Xy6+MZRPgn81/bjNBiEHqEQba+vY+fLigjU= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 h1:wQC8sKyeGA50WnCEG+Jo5FNRIkuX3HX8d3ubyWCCoI8= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -257,15 +257,15 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -284,8 +284,9 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -443,8 +444,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -501,8 +502,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -583,8 +584,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 0494b4a2e662074b0c9ec14450d0d783481ae073 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 19 Jun 2025 13:05:13 -0400 Subject: [PATCH 256/270] enhance: allow passing of MCP client options when creating clients (#984) Signed-off-by: Donnie Adams --- pkg/mcp/client.go | 4 ++-- pkg/mcp/loader.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/mcp/client.go b/pkg/mcp/client.go index 73f4e0d9..f4ecee15 100644 --- a/pkg/mcp/client.go +++ b/pkg/mcp/client.go @@ -4,8 +4,8 @@ import ( nmcp "github.com/nanobot-ai/nanobot/pkg/mcp" ) -func (l *Local) Client(server ServerConfig) (*Client, error) { - session, err := l.loadSession(server, "default") +func (l *Local) Client(server ServerConfig, clientOpts ...nmcp.ClientOption) (*Client, error) { + session, err := l.loadSession(server, "default", clientOpts...) if err != nil { return nil, err } diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 87e2d0bc..3e28a8d4 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -276,7 +276,7 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s return toolDefs, nil } -func (l *Local) loadSession(server ServerConfig, serverName string) (*Session, error) { +func (l *Local) loadSession(server ServerConfig, serverName string, clientOpts ...nmcp.ClientOption) (*Session, error) { id := hash.Digest(server) l.lock.Lock() existing, ok := l.sessions[id] @@ -296,7 +296,7 @@ func (l *Local) loadSession(server ServerConfig, serverName string) (*Session, e Args: server.Args, BaseURL: server.GetBaseURL(), Headers: splitIntoMap(server.Headers), - }) + }, clientOpts...) if err != nil { return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) } From ea419a794e0ecff4e92c659bfedf80c63406d3ef Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 20 Jun 2025 10:04:50 -0400 Subject: [PATCH 257/270] chore: bump nanobot to pickup stdio fix (#985) Signed-off-by: Donnie Adams --- go.mod | 5 +++-- go.sum | 11 ++++++----- pkg/mcp/loader.go | 11 +++++------ 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index c32461f5..2ae5d00e 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 github.com/mholt/archives v0.1.0 - github.com/nanobot-ai/nanobot v0.0.6-0.20250614013307-b0dcecdd9510 + github.com/nanobot-ai/nanobot v0.0.6-0.20250620135741-a1afee774884 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 github.com/rs/cors v1.11.0 @@ -66,11 +66,12 @@ require ( github.com/charmbracelet/glamour v0.7.0 // indirect github.com/charmbracelet/lipgloss v1.1.0 // indirect github.com/charmbracelet/x/ansi v0.8.0 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect github.com/charmbracelet/x/term v0.2.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/console v1.0.4 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/creack/pty v1.1.24 // indirect github.com/cyphar/filepath-securejoin v0.2.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.11.4 // indirect diff --git a/go.sum b/go.sum index 4699026a..25e7105d 100644 --- a/go.sum +++ b/go.sum @@ -85,8 +85,8 @@ github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoF github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= -github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= -github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -107,8 +107,9 @@ github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6 github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -312,8 +313,8 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/nanobot-ai/nanobot v0.0.6-0.20250614013307-b0dcecdd9510 h1:kBJ38jH3Fhm4BOxAE5nwwOwnjFEzxTnPMsskf2NyCbw= -github.com/nanobot-ai/nanobot v0.0.6-0.20250614013307-b0dcecdd9510/go.mod h1:XAvQcMgztKKR8Ul7/i28MfepoyC72ZGwG3uzAIH9F6c= +github.com/nanobot-ai/nanobot v0.0.6-0.20250620135741-a1afee774884 h1:sZhePJP/7Kh5WLeujUI/39Cysn6APii09s0aciRS+ig= +github.com/nanobot-ai/nanobot v0.0.6-0.20250620135741-a1afee774884/go.mod h1:okGlfo6y6kP/mFLN4XpKkRIYzU9EXXjPO2KlcafbwrM= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 3e28a8d4..13b9e4fc 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -290,12 +290,11 @@ func (l *Local) loadSession(server ServerConfig, serverName string, clientOpts . } c, err := nmcp.NewClient(l.sessionCtx, serverName, nmcp.Server{ - Unsandboxed: true, - Env: splitIntoMap(server.Env), - Command: server.Command, - Args: server.Args, - BaseURL: server.GetBaseURL(), - Headers: splitIntoMap(server.Headers), + Env: splitIntoMap(server.Env), + Command: server.Command, + Args: server.Args, + BaseURL: server.GetBaseURL(), + Headers: splitIntoMap(server.Headers), }, clientOpts...) if err != nil { return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) From 7a49337eef7be3771033c606bf9ff72bdad52e5b Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 23 Jun 2025 13:57:41 -0400 Subject: [PATCH 258/270] chore: bump nanobot to fix empty init message issue (#986) Signed-off-by: Donnie Adams --- go.mod | 2 +- go.sum | 4 ++-- pkg/mcp/loader.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2ae5d00e..140d511a 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 github.com/mholt/archives v0.1.0 - github.com/nanobot-ai/nanobot v0.0.6-0.20250620135741-a1afee774884 + github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 github.com/rs/cors v1.11.0 diff --git a/go.sum b/go.sum index 25e7105d..9c280bad 100644 --- a/go.sum +++ b/go.sum @@ -313,8 +313,8 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/nanobot-ai/nanobot v0.0.6-0.20250620135741-a1afee774884 h1:sZhePJP/7Kh5WLeujUI/39Cysn6APii09s0aciRS+ig= -github.com/nanobot-ai/nanobot v0.0.6-0.20250620135741-a1afee774884/go.mod h1:okGlfo6y6kP/mFLN4XpKkRIYzU9EXXjPO2KlcafbwrM= +github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09 h1:nMo9dQvmdetj+INyOvg37igNG1Q3nWzXCOnNRDDNv7M= +github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09/go.mod h1:okGlfo6y6kP/mFLN4XpKkRIYzU9EXXjPO2KlcafbwrM= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 13b9e4fc..086f74bb 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -297,7 +297,7 @@ func (l *Local) loadSession(server ServerConfig, serverName string, clientOpts . Headers: splitIntoMap(server.Headers), }, clientOpts...) if err != nil { - return nil, fmt.Errorf("failed to create MCP stdio client: %w", err) + return nil, fmt.Errorf("failed to create MCP client: %w", err) } result := &Session{ From d845bab986068d40728b0393f03caf226f3c2902 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Mon, 23 Jun 2025 14:54:18 -0400 Subject: [PATCH 259/270] test: remove mistral from smoke tests The smoke tests for `mistral-large-2402` are failing because its responses for certain test cases have changed since the initial golden files were generated. After a bit of investigation, it looks like this model was deprecated in November 2024 and retired in June 2025. Also, from what I can tell, contemporary Mistral models like `mistral-large-2411` and `mistral-medium-2505` don't actually make the expected tool calls required to pass the GPTScript's smoke tests, so swapping out the model is not feasible at this time. In light of these findings, remove mistral from the set of smoke tested model providers. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- .github/workflows/smoke.yaml | 31 - .../Bob/mistral-large-2402-expected.json | 643 ------------------ .../mistral-large-2402-expected.json | 633 ----------------- 3 files changed, 1307 deletions(-) delete mode 100644 pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json delete mode 100644 pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index 9f736949..d3a40aa6 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -147,34 +147,3 @@ jobs: echo "Running smoke test for model claude-3-5-sonnet-20240620" export PATH="$(pwd)/bin:${PATH}" make smoke - - mistral-large-2402: - needs: check-label - if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} - runs-on: ubuntu-22.04 - steps: - - name: Checkout base repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: Checkout PR code if running for a PR - if: ${{ github.event_name == 'pull_request_target' }} - uses: actions/checkout@v4 - with: - fetch-depth: 1 - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} - - uses: actions/setup-go@v5 - with: - cache: false - go-version: "1.21" - - env: - OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider - MISTRAL_API_KEY: ${{ secrets.SMOKE_MISTRAL_API_KEY }} - GPTSCRIPT_CREDENTIAL_OVERRIDE: "github.com/gptscript-ai/mistral-laplateforme-provider/credential:MISTRAL_API_KEY" - name: Run smoke test for mistral-large-2402 - run: | - echo "Running smoke test for model mistral-large-2402" - export PATH="$(pwd)/bin:${PATH}" - make smoke diff --git a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json deleted file mode 100644 index ca392c03..00000000 --- a/pkg/tests/smoke/testdata/Bob/mistral-large-2402-expected.json +++ /dev/null @@ -1,643 +0,0 @@ -[ - { - "time": "2024-10-14T18:59:18.199427-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:18.19975-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:19.063682-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:19.063951-04:00", - "callContext": { - "id": "1728946760", - "tool": { - "name": "Mistral La Plateforme Provider", - "description": "Model provider for Mistral models running on La Plateforme", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:20.078127-04:00", - "callContext": { - "id": "1728946760", - "tool": { - "name": "Mistral La Plateforme Provider", - "description": "Model provider for Mistral models running on La Plateforme", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callFinish", - "usage": {}, - "content": "http://127.0.0.1:10912" - }, - { - "time": "2024-10-14T18:59:20.078235-04:00", - "type": "runFinish", - "usage": {} - }, - { - "time": "2024-10-14T18:59:20.078285-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946761", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T18:59:21.857633-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946761", - "usage": { - "promptTokens": 195, - "completionTokens": 23, - "totalTokens": 218 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "pIj9ljPqt", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - } - ], - "usage": { - "promptTokens": 195, - "completionTokens": 23, - "totalTokens": 218 - } - } - }, - { - "time": "2024-10-14T18:59:21.858005-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolSubCalls": { - "pIj9ljPqt": { - "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\": \"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-10-14T18:59:21.858212-04:00", - "callContext": { - "id": "pIj9ljPqt", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946759" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\": \"how are you doing\"}" - }, - { - "time": "2024-10-14T18:59:22.381191-04:00", - "callContext": { - "id": "pIj9ljPqt", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946759" - }, - "type": "callChat", - "chatCompletionId": "1728946762", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T18:59:23.160275-04:00", - "callContext": { - "id": "pIj9ljPqt", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946759" - }, - "type": "callChat", - "chatCompletionId": "1728946762", - "usage": { - "promptTokens": 163, - "completionTokens": 18, - "totalTokens": 181 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 163, - "completionTokens": 18, - "totalTokens": 181 - } - } - }, - { - "time": "2024-10-14T18:59:23.160433-04:00", - "callContext": { - "id": "pIj9ljPqt", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946759" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-10-14T18:59:23.160522-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-10-14T18:59:23.531261-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946763", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T18:59:24.303745-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946763", - "usage": { - "promptTokens": 252, - "completionTokens": 18, - "totalTokens": 270 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 252, - "completionTokens": 18, - "totalTokens": 270 - } - } - }, - { - "time": "2024-10-14T18:59:24.303903-04:00", - "callContext": { - "id": "1728946759", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-10-14T18:59:24.303961-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json b/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json deleted file mode 100644 index 4506754b..00000000 --- a/pkg/tests/smoke/testdata/BobAsShell/mistral-large-2402-expected.json +++ /dev/null @@ -1,633 +0,0 @@ -[ - { - "time": "2024-10-14T17:38:47.018065-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:47.018394-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:47.47198-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:47.472449-04:00", - "callContext": { - "id": "1728941929", - "tool": { - "name": "Mistral La Plateforme Provider", - "description": "Model provider for Mistral models running on La Plateforme", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:50.566081-04:00", - "callContext": { - "id": "1728941929", - "tool": { - "name": "Mistral La Plateforme Provider", - "description": "Model provider for Mistral models running on La Plateforme", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/mistral-laplateforme-provider/credential with \"Please enter your Mistral La Plateforme API Key\" as message and token as field and \"MISTRAL_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "mistral la plateforme provider": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt:Mistral La Plateforme Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/mistral-laplateforme-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "aa4353e7d1de7e90e1078bfbc88526266e587a64" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/mistral-laplateforme-provider/aa4353e7d1de7e90e1078bfbc88526266e587a64" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callFinish", - "usage": {}, - "content": "http://127.0.0.1:11133" - }, - { - "time": "2024-10-14T17:38:50.56681-04:00", - "type": "runFinish", - "usage": {} - }, - { - "time": "2024-10-14T17:38:50.567218-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941930", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T17:38:51.51096-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941930", - "usage": { - "promptTokens": 195, - "completionTokens": 23, - "totalTokens": 218 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "KLMoUpwIL", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - } - ], - "usage": { - "promptTokens": 195, - "completionTokens": 23, - "totalTokens": 218 - } - } - }, - { - "time": "2024-10-14T17:38:51.511569-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolSubCalls": { - "KLMoUpwIL": { - "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\": \"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-10-14T17:38:51.511777-04:00", - "callContext": { - "id": "KLMoUpwIL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941928", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\": \"how are you doing\"}" - }, - { - "time": "2024-10-14T17:38:51.513152-04:00", - "callContext": { - "id": "KLMoUpwIL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941928", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1728941931", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T17:38:51.528154-04:00", - "callContext": { - "id": "KLMoUpwIL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941928", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1728941931", - "usage": {}, - "chatResponse": { - "usage": {} - } - }, - { - "time": "2024-10-14T17:38:51.528298-04:00", - "callContext": { - "id": "KLMoUpwIL", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941928", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-10-14T17:38:51.528421-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-10-14T17:38:51.894619-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941932", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T17:38:52.586731-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941932", - "usage": { - "promptTokens": 254, - "completionTokens": 18, - "totalTokens": 272 - }, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - } - ], - "usage": { - "promptTokens": 254, - "completionTokens": 18, - "totalTokens": 272 - } - } - }, - { - "time": "2024-10-14T17:38:52.587128-04:00", - "callContext": { - "id": "1728941928", - "tool": { - "modelName": "mistral-large-2402 from github.com/gptscript-ai/mistral-laplateforme-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-10-14T17:38:52.587221-04:00", - "type": "runFinish", - "usage": {} - } -] From 30c8c32e5d5dbb19db2dd7cc414304aa3298a231 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 15 Jul 2025 15:35:35 -0400 Subject: [PATCH 260/270] chore: switch from huma to go-sdk JSON Schema (#990) * chore: switch from huma to go-sdk JSON Schema Signed-off-by: Donnie Adams --- go.mod | 18 +++-- go.sum | 40 +++++------ pkg/loader/loader_test.go | 8 +-- pkg/loader/openapi.go | 65 +++++++++-------- .../testdata/openapi/TestOpenAPIv2.golden | 28 ++++---- .../openapi/TestOpenAPIv2Revamp.golden | 14 ++-- .../testdata/openapi/TestOpenAPIv3.golden | 42 ++++++----- .../TestOpenAPIv3NoOperationIDs.golden | 46 ++++++------ .../TestOpenAPIv3NoOperationIDsRevamp.golden | 14 ++-- .../openapi/TestOpenAPIv3Revamp.golden | 14 ++-- pkg/mcp/loader.go | 4 +- pkg/openai/client.go | 3 +- pkg/parser/parser.go | 8 +-- pkg/system/prompt.go | 10 +-- pkg/tests/runner2_test.go | 70 +++++++++---------- pkg/tests/runner_test.go | 16 ++--- pkg/tests/testdata/TestAgentOnly/call1.golden | 8 +-- pkg/tests/testdata/TestAgentOnly/call2.golden | 16 ++--- pkg/tests/testdata/TestAgentOnly/step1.golden | 24 +++---- pkg/tests/testdata/TestAgents/call1.golden | 16 ++--- pkg/tests/testdata/TestAgents/call2.golden | 8 +-- pkg/tests/testdata/TestAgents/call3.golden | 16 ++--- pkg/tests/testdata/TestAgents/step1.golden | 40 +++++------ pkg/tests/testdata/TestAsterick/call1.golden | 16 ++--- pkg/tests/testdata/TestCase/call1.golden | 8 +-- pkg/tests/testdata/TestCase2/call1.golden | 8 +-- .../testdata/TestContextSubChat/call1.golden | 8 +-- .../testdata/TestContextSubChat/call2.golden | 8 +-- .../testdata/TestDualSubChat/call1.golden | 16 ++--- .../testdata/TestDualSubChat/call2.golden | 8 +-- .../testdata/TestDualSubChat/call3.golden | 8 +-- .../testdata/TestDualSubChat/call4.golden | 8 +-- .../testdata/TestDualSubChat/call5.golden | 8 +-- .../testdata/TestDualSubChat/call6.golden | 8 +-- .../testdata/TestDualSubChat/call7.golden | 16 ++--- .../testdata/TestDualSubChat/step1.golden | 32 ++++----- .../testdata/TestDualSubChat/step2.golden | 24 +++---- .../testdata/TestDualSubChat/step3.golden | 24 +++---- pkg/tests/testdata/TestExport/call1.golden | 24 +++---- pkg/tests/testdata/TestExport/call3.golden | 24 +++---- .../testdata/TestExportContext/call1.golden | 16 ++--- pkg/tests/testdata/TestSubChat/call1.golden | 8 +-- .../testdata/TestSysContext/call1.golden | 8 +-- .../testdata/TestSysContext/step1.golden | 8 +-- pkg/tests/testdata/TestToolAs/call1.golden | 16 ++--- .../testdata/TestToolRefAll/call1.golden | 24 +++---- .../testdata/TestToolsChange/call1.golden | 28 ++++---- .../testdata/TestToolsChange/call2.golden | 20 +++--- .../testdata/TestToolsChange/step1.golden | 28 ++++---- .../testdata/TestToolsChange/step2.golden | 20 +++--- pkg/types/completion.go | 10 +-- pkg/types/jsonschema.go | 14 ++-- pkg/types/tool.go | 56 +++++++-------- 53 files changed, 522 insertions(+), 510 deletions(-) diff --git a/go.mod b/go.mod index 140d511a..770c14e1 100644 --- a/go.mod +++ b/go.mod @@ -4,14 +4,11 @@ go 1.24.2 toolchain go1.24.4 -replace github.com/danielgtaylor/huma/v2 => github.com/gptscript-ai/huma v0.0.0-20250617131016-b2081da6c65b - require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 github.com/adrg/xdg v0.4.0 github.com/chzyer/readline v1.5.1 - github.com/danielgtaylor/huma/v2 v2.32.1-0.20250509235652-c7ead6f3c67f github.com/docker/cli v26.0.0+incompatible github.com/docker/docker-credential-helpers v0.8.1 github.com/fatih/color v1.17.0 @@ -22,12 +19,13 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 - github.com/gptscript-ai/go-gptscript v0.9.6-0.20250617131750-9129819aea51 + github.com/gptscript-ai/go-gptscript v0.9.6-0.20250714170123-17ad44ae8c54 github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 github.com/mholt/archives v0.1.0 + github.com/modelcontextprotocol/go-sdk v0.2.0 github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 @@ -40,7 +38,7 @@ require ( github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.14.0 + golang.org/x/sync v0.15.0 golang.org/x/term v0.32.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 @@ -139,12 +137,12 @@ require ( github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.38.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.40.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.25.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/tools v0.34.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect mvdan.cc/gofumpt v0.6.0 // indirect ) diff --git a/go.sum b/go.sum index 9c280bad..e33f7f84 100644 --- a/go.sum +++ b/go.sum @@ -214,10 +214,8 @@ github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1 github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 h1:xm5ZZFraWFwxyE7TBEncCXArubCDZTwG6s5bpMzqhSY= github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.6-0.20250617131750-9129819aea51 h1:9s53UDNVXF+ujMwhg/7LiZlIMYOpn2Ap8WBc1i4Pi/Y= -github.com/gptscript-ai/go-gptscript v0.9.6-0.20250617131750-9129819aea51/go.mod h1:LQ4E2g+t+L/it13Le5m9Hfgn4huS8bO4hcTawFlUzSY= -github.com/gptscript-ai/huma v0.0.0-20250617131016-b2081da6c65b h1:QReUetqY+ep2sj6g83oqldPHzwH2T2TG1sv0IWE2hL0= -github.com/gptscript-ai/huma v0.0.0-20250617131016-b2081da6c65b/go.mod h1:y2Eq35Y5Xy6+MZRPgn81/bjNBiEHqEQba+vY+fLigjU= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250714170123-17ad44ae8c54 h1:9OAiDBdOQUHVL89wmb38+/XOuewboMhgnk6NqoJiC00= +github.com/gptscript-ai/go-gptscript v0.9.6-0.20250714170123-17ad44ae8c54/go.mod h1:HLPvKBhDtsEkyyUWefJVhPpl98R3tZG6ps7+mQ+EKVI= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 h1:wQC8sKyeGA50WnCEG+Jo5FNRIkuX3HX8d3ubyWCCoI8= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -265,8 +263,8 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -307,6 +305,8 @@ github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I= github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/modelcontextprotocol/go-sdk v0.2.0 h1:PESNYOmyM1c369tRkzXLY5hHrazj8x9CY1Xu0fLCryM= +github.com/modelcontextprotocol/go-sdk v0.2.0/go.mod h1:0sL9zUKKs2FTTkeCCVnKqbLJTw5TScefPAzojjU459E= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= @@ -408,8 +408,8 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -445,8 +445,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -479,8 +479,8 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -503,8 +503,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -520,8 +520,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -585,8 +585,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -618,8 +618,8 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/loader/loader_test.go b/pkg/loader/loader_test.go index 7c480034..a39c2df9 100644 --- a/pkg/loader/loader_test.go +++ b/pkg/loader/loader_test.go @@ -131,13 +131,13 @@ func TestHelloWorld(t *testing.T) { "modelName": "gpt-4o", "internalPrompt": null, "arguments": { + "type": "object", "properties": { "input": { - "description": "Any string", - "type": "string" + "type": "string", + "description": "Any string" } - }, - "type": "object" + } }, "instructions": "echo \"${input}\"", "id": "https://get.gptscript.ai/echo.gpt:", diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index ef61adfb..ce8c5dc6 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -11,10 +11,10 @@ import ( "strings" "time" - humav2 "github.com/danielgtaylor/huma/v2" "github.com/getkin/kin-openapi/openapi3" "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/modelcontextprotocol/go-sdk/jsonschema" ) var toolNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_-]+`) @@ -151,9 +151,9 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) Parameters: types.Parameters{ Name: toolName, Description: toolDesc, - Arguments: &humav2.Schema{ - Type: humav2.TypeObject, - Properties: make(map[string]*humav2.Schema), + Arguments: &jsonschema.Schema{ + Type: "object", + Properties: make(map[string]*jsonschema.Schema), }, }, }, @@ -174,7 +174,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Add the new arg to the tool's arguments - tool.Arguments.Properties[param.Value.Name] = openAPI3SchemaToHumaV2Schema(arg) + tool.Arguments.Properties[param.Value.Name] = openAPI3SchemaToJSONSchema(arg) // Check whether it is required if param.Value.Required { @@ -227,7 +227,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) } // Unfortunately, the request body doesn't contain any good descriptor for it, // so we just use "requestBodyContent" as the name of the arg. - tool.Arguments.Properties["requestBodyContent"] = openAPI3SchemaToHumaV2Schema(arg) + tool.Arguments.Properties["requestBodyContent"] = openAPI3SchemaToJSONSchema(arg) break } @@ -373,22 +373,27 @@ func parseServer(server *openapi3.Server) (string, error) { return s, nil } -// openAPI3SchemaToHumaV2Schema converts an openapi3.Schema to a humav2.Schema -func openAPI3SchemaToHumaV2Schema(schema *openapi3.Schema) *humav2.Schema { +// openAPI3SchemaToJSONSchema converts an openapi3.Schema to a jsonschema.Schema +func openAPI3SchemaToJSONSchema(schema *openapi3.Schema) *jsonschema.Schema { if schema == nil { return nil } - result := &humav2.Schema{ + result := &jsonschema.Schema{ Title: schema.Title, Description: schema.Description, Format: schema.Format, - Nullable: schema.Nullable, } // Convert type if schema.Type != nil && len(*schema.Type) > 0 { - result.Type = (*schema.Type)[0] + result.Types = *schema.Type + } + + // In OpenAPI v3.0, there is a nullable field. + // In OpenAPI v3.1, nullable is specified by providing a separate type. + if schema.Nullable && !slices.Contains(result.Types, "null") { + result.Types = append(result.Types, "null") } // Convert enum @@ -463,52 +468,52 @@ func openAPI3SchemaToHumaV2Schema(schema *openapi3.Schema) *humav2.Schema { // Convert properties if schema.Properties != nil { - result.Properties = make(map[string]*humav2.Schema, len(schema.Properties)) + result.Properties = make(map[string]*jsonschema.Schema, len(schema.Properties)) for name, propRef := range schema.Properties { if propRef != nil && propRef.Value != nil { - result.Properties[name] = openAPI3SchemaToHumaV2Schema(propRef.Value) + result.Properties[name] = openAPI3SchemaToJSONSchema(propRef.Value) } } } // Convert items if schema.Items != nil && schema.Items.Value != nil { - result.Items = openAPI3SchemaToHumaV2Schema(schema.Items.Value) + result.Items = openAPI3SchemaToJSONSchema(schema.Items.Value) } // Convert oneOf if schema.OneOf != nil { - result.OneOf = make([]*humav2.Schema, len(schema.OneOf)) + result.OneOf = make([]*jsonschema.Schema, len(schema.OneOf)) for i, oneOfRef := range schema.OneOf { if oneOfRef != nil && oneOfRef.Value != nil { - result.OneOf[i] = openAPI3SchemaToHumaV2Schema(oneOfRef.Value) + result.OneOf[i] = openAPI3SchemaToJSONSchema(oneOfRef.Value) } } } // Convert anyOf if schema.AnyOf != nil { - result.AnyOf = make([]*humav2.Schema, len(schema.AnyOf)) + result.AnyOf = make([]*jsonschema.Schema, len(schema.AnyOf)) for i, anyOfRef := range schema.AnyOf { if anyOfRef != nil && anyOfRef.Value != nil { - result.AnyOf[i] = openAPI3SchemaToHumaV2Schema(anyOfRef.Value) + result.AnyOf[i] = openAPI3SchemaToJSONSchema(anyOfRef.Value) } } } // Convert allOf if schema.AllOf != nil { - result.AllOf = make([]*humav2.Schema, len(schema.AllOf)) + result.AllOf = make([]*jsonschema.Schema, len(schema.AllOf)) for i, allOfRef := range schema.AllOf { if allOfRef != nil && allOfRef.Value != nil { - result.AllOf[i] = openAPI3SchemaToHumaV2Schema(allOfRef.Value) + result.AllOf[i] = openAPI3SchemaToJSONSchema(allOfRef.Value) } } } // Convert not if schema.Not != nil && schema.Not.Value != nil { - result.Not = openAPI3SchemaToHumaV2Schema(schema.Not.Value) + result.Not = openAPI3SchemaToJSONSchema(schema.Not.Value) } return result @@ -543,11 +548,11 @@ func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]type Parameters: types.Parameters{ Name: types.ToolNormalizer("get-schema-" + t.Info.Title), Description: fmt.Sprintf("Get the JSONSchema for the arguments for an operation for %s. You must do this before you run the operation.", t.Info.Title), - Arguments: &humav2.Schema{ - Type: humav2.TypeObject, - Properties: map[string]*humav2.Schema{ + Arguments: &jsonschema.Schema{ + Type: "object", + Properties: map[string]*jsonschema.Schema{ "operation": { - Type: humav2.TypeString, + Type: "string", Title: "operation", Description: "the name of the operation to get the schema for", Required: []string{"operation"}, @@ -567,17 +572,17 @@ func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]type Parameters: types.Parameters{ Name: types.ToolNormalizer("run-operation-" + t.Info.Title), Description: fmt.Sprintf("Run an operation for %s. You MUST call %s for the operation before you use this tool.", t.Info.Title, openapi.GetSchemaTool), - Arguments: &humav2.Schema{ - Type: humav2.TypeObject, - Properties: map[string]*humav2.Schema{ + Arguments: &jsonschema.Schema{ + Type: "object", + Properties: map[string]*jsonschema.Schema{ "operation": { - Type: humav2.TypeString, + Type: "string", Title: "operation", Description: "the name of the operation to run", Required: []string{"operation"}, }, "args": { - Type: humav2.TypeString, + Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", Required: []string{"args"}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden index d64c70ea..ebf29cb4 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden @@ -56,15 +56,17 @@ types.ToolSet{ Name: "listPets", Description: "List all pets", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "limit": { - Type: "integer", Description: "How many items to return at one time (max 100)", - Format: "int32", - Properties: map[string]*huma.Schema{}, - AllOf: []*huma.Schema{}, + Types: []string{ + "integer", + }, + Properties: map[string]*jsonschema.Schema{}, + AllOf: []*jsonschema.Schema{}, + Format: "int32", }, }, }, @@ -87,15 +89,15 @@ types.ToolSet{ Name: "showPetById", Description: "Info for a specific pet", ModelName: "gpt-4o", - Arguments: &huma.Schema{ - Type: "object", - Properties: map[string]*huma.Schema{"petId": { - Type: "string", + Arguments: &jsonschema.Schema{ + Type: "object", + Required: []string{"petId"}, + Properties: map[string]*jsonschema.Schema{"petId": { Description: "The id of the pet to retrieve", - Properties: map[string]*huma.Schema{}, - AllOf: []*huma.Schema{}, + Types: []string{"string"}, + Properties: map[string]*jsonschema.Schema{}, + AllOf: []*jsonschema.Schema{}, }}, - Required: []string{"petId"}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden index d89e976e..ac32cc58 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden @@ -36,13 +36,13 @@ types.ToolSet{ Name: "getSchemaSwaggerPetstore", Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "operation": { - Type: "string", Title: "operation", Description: "the name of the operation to get the schema for", + Type: "string", Required: []string{ "operation", }, @@ -86,19 +86,19 @@ types.ToolSet{ Name: "runOperationSwaggerPetstore", Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "args": { - Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Type: "string", Required: []string{"args"}, }, "operation": { - Type: "string", Title: "operation", Description: "the name of the operation to run", + Type: "string", Required: []string{"operation"}, }, }, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden index 710440cf..7e4a7993 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden @@ -37,23 +37,27 @@ types.ToolSet{ Name: "createPets", Description: "Create a pet", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "requestBodyContent": { - Type: "object", - Properties: map[string]*huma.Schema{ - "id": { - Type: "integer", - Format: "int64", - }, - "name": {Type: "string"}, - "tag": {Type: "string"}, + Types: []string{ + "object", }, Required: []string{ "id", "name", }, + Properties: map[string]*jsonschema.Schema{ + "id": { + Types: []string{ + "integer", + }, + Format: "int64", + }, + "name": {Types: []string{"string"}}, + "tag": {Types: []string{"string"}}, + }, }, }, }, @@ -76,13 +80,13 @@ types.ToolSet{ Name: "listPets", Description: "List all pets", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{"limit": { - Type: "integer", + Properties: map[string]*jsonschema.Schema{"limit": { Description: "How many items to return at one time (max 100)", - Format: "int32", + Types: []string{"integer"}, Maximum: valast.Ptr(float64(100)), + Format: "int32", }}, }, }, @@ -104,13 +108,13 @@ types.ToolSet{ Name: "showPetById", Description: "Info for a specific pet", ModelName: "gpt-4o", - Arguments: &huma.Schema{ - Type: "object", - Properties: map[string]*huma.Schema{"petId": { - Type: "string", + Arguments: &jsonschema.Schema{ + Type: "object", + Required: []string{"petId"}, + Properties: map[string]*jsonschema.Schema{"petId": { Description: "The id of the pet to retrieve", + Types: []string{"string"}, }}, - Required: []string{"petId"}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden index c12c7834..5ebd8aa9 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden @@ -37,14 +37,16 @@ types.ToolSet{ Name: "get_pets", Description: "List all pets", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "limit": { - Type: "integer", Description: "How many items to return at one time (max 100)", - Format: "int32", - Maximum: valast.Ptr(float64(100)), + Types: []string{ + "integer", + }, + Maximum: valast.Ptr(float64(100)), + Format: "int32", }, }, }, @@ -67,13 +69,13 @@ types.ToolSet{ Name: "get_pets_petId", Description: "Info for a specific pet", ModelName: "gpt-4o", - Arguments: &huma.Schema{ - Type: "object", - Properties: map[string]*huma.Schema{"petId": { - Type: "string", + Arguments: &jsonschema.Schema{ + Type: "object", + Required: []string{"petId"}, + Properties: map[string]*jsonschema.Schema{"petId": { Description: "The id of the pet to retrieve", + Types: []string{"string"}, }}, - Required: []string{"petId"}, }, }, Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, @@ -94,22 +96,24 @@ types.ToolSet{ Name: "post_pets", Description: "Create a pet", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{"requestBodyContent": { - Type: "object", - Properties: map[string]*huma.Schema{ - "id": { - Type: "integer", - Format: "int64", - }, - "name": {Type: "string"}, - "tag": {Type: "string"}, - }, + Properties: map[string]*jsonschema.Schema{"requestBodyContent": { + Types: []string{"object"}, Required: []string{ "id", "name", }, + Properties: map[string]*jsonschema.Schema{ + "id": { + Types: []string{ + "integer", + }, + Format: "int64", + }, + "name": {Types: []string{"string"}}, + "tag": {Types: []string{"string"}}, + }, }}, }, }, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden index d89e976e..ac32cc58 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden @@ -36,13 +36,13 @@ types.ToolSet{ Name: "getSchemaSwaggerPetstore", Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "operation": { - Type: "string", Title: "operation", Description: "the name of the operation to get the schema for", + Type: "string", Required: []string{ "operation", }, @@ -86,19 +86,19 @@ types.ToolSet{ Name: "runOperationSwaggerPetstore", Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "args": { - Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Type: "string", Required: []string{"args"}, }, "operation": { - Type: "string", Title: "operation", Description: "the name of the operation to run", + Type: "string", Required: []string{"operation"}, }, }, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden index d89e976e..ac32cc58 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden @@ -36,13 +36,13 @@ types.ToolSet{ Name: "getSchemaSwaggerPetstore", Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "operation": { - Type: "string", Title: "operation", Description: "the name of the operation to get the schema for", + Type: "string", Required: []string{ "operation", }, @@ -86,19 +86,19 @@ types.ToolSet{ Name: "runOperationSwaggerPetstore", Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", ModelName: "gpt-4o", - Arguments: &huma.Schema{ + Arguments: &jsonschema.Schema{ Type: "object", - Properties: map[string]*huma.Schema{ + Properties: map[string]*jsonschema.Schema{ "args": { - Type: "string", Title: "args", Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Type: "string", Required: []string{"args"}, }, "operation": { - Type: "string", Title: "operation", Description: "the name of the operation to run", + Type: "string", Required: []string{"operation"}, }, }, diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 086f74bb..9469db7a 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -10,10 +10,10 @@ import ( "strings" "sync" - humav2 "github.com/danielgtaylor/huma/v2" "github.com/gptscript-ai/gptscript/pkg/hash" "github.com/gptscript-ai/gptscript/pkg/mvl" "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/modelcontextprotocol/go-sdk/jsonschema" nmcp "github.com/nanobot-ai/nanobot/pkg/mcp" ) @@ -196,7 +196,7 @@ func (l *Local) sessionToTools(ctx context.Context, session *Session, toolName s continue } - var schema humav2.Schema + var schema jsonschema.Schema schemaData, err := json.Marshal(tool.InputSchema) if err != nil { diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 69e6621d..7715c657 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -11,7 +11,6 @@ import ( "strings" "time" - humav2 "github.com/danielgtaylor/huma/v2" openai "github.com/gptscript-ai/chat-completion-client" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/counter" @@ -406,7 +405,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques var params any = tool.Function.Parameters if tool.Function.Parameters == nil || len(tool.Function.Parameters.Properties) == 0 { params = map[string]any{ - "type": humav2.TypeObject, + "type": "object", "properties": map[string]any{}, } } diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index e7ec287d..3d26d9cc 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -10,8 +10,8 @@ import ( "strconv" "strings" - humav2 "github.com/danielgtaylor/huma/v2" "github.com/gptscript-ai/gptscript/pkg/types" + "github.com/modelcontextprotocol/go-sdk/jsonschema" ) var ( @@ -54,9 +54,9 @@ func csv(line string) (result []string) { func addArg(line string, tool *types.Tool) error { if tool.Arguments == nil { - tool.Arguments = &humav2.Schema{ + tool.Arguments = &jsonschema.Schema{ Type: "object", - Properties: make(map[string]*humav2.Schema, 1), + Properties: make(map[string]*jsonschema.Schema, 1), } } @@ -65,7 +65,7 @@ func addArg(line string, tool *types.Tool) error { return fmt.Errorf("invalid arg format: %s", line) } - tool.Arguments.Properties[key] = &humav2.Schema{ + tool.Arguments.Properties[key] = &jsonschema.Schema{ Description: strings.TrimSpace(value), Type: "string", } diff --git a/pkg/system/prompt.go b/pkg/system/prompt.go index a4fe5f26..04497854 100644 --- a/pkg/system/prompt.go +++ b/pkg/system/prompt.go @@ -5,7 +5,7 @@ import ( "os" "strings" - humav2 "github.com/danielgtaylor/huma/v2" + "github.com/modelcontextprotocol/go-sdk/jsonschema" ) // Suffix is default suffix of gptscript files @@ -26,9 +26,9 @@ You don't move to the next step until you have a result. // to just send pure text but the interface required JSON (as that is the fundamental interface of tools in OpenAI) var DefaultPromptParameter = "defaultPromptParameter" -var DefaultToolSchema = humav2.Schema{ +var DefaultToolSchema = jsonschema.Schema{ Type: "object", - Properties: map[string]*humav2.Schema{ + Properties: map[string]*jsonschema.Schema{ DefaultPromptParameter: { Description: "Prompt to send to the tool. This may be an instruction or question.", Type: "string", @@ -36,9 +36,9 @@ var DefaultToolSchema = humav2.Schema{ }, } -var DefaultChatSchema = humav2.Schema{ +var DefaultChatSchema = jsonschema.Schema{ Type: "object", - Properties: map[string]*humav2.Schema{ + Properties: map[string]*jsonschema.Schema{ DefaultPromptParameter: { Description: "Prompt to send to the assistant. This may be an instruction or question.", Type: "string", diff --git a/pkg/tests/runner2_test.go b/pkg/tests/runner2_test.go index 80131245..9668a98a 100644 --- a/pkg/tests/runner2_test.go +++ b/pkg/tests/runner2_test.go @@ -302,16 +302,16 @@ name: mcp "modelName": "gpt-4o", "internalPrompt": null, "arguments": { - "properties": { - "insight": { - "description": "Business insight discovered from data analysis", - "type": "string" - } - }, + "type": "object", "required": [ "insight" ], - "type": "object" + "properties": { + "insight": { + "type": "string", + "description": "Business insight discovered from data analysis" + } + } }, "instructions": "#!sys.mcp.invoke.append_insight e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:append_insight", @@ -335,16 +335,16 @@ name: mcp "modelName": "gpt-4o", "internalPrompt": null, "arguments": { - "properties": { - "query": { - "description": "CREATE TABLE SQL statement", - "type": "string" - } - }, + "type": "object", "required": [ "query" ], - "type": "object" + "properties": { + "query": { + "type": "string", + "description": "CREATE TABLE SQL statement" + } + } }, "instructions": "#!sys.mcp.invoke.create_table e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:create_table", @@ -368,16 +368,16 @@ name: mcp "modelName": "gpt-4o", "internalPrompt": null, "arguments": { - "properties": { - "table_name": { - "description": "Name of the table to describe", - "type": "string" - } - }, + "type": "object", "required": [ "table_name" ], - "type": "object" + "properties": { + "table_name": { + "type": "string", + "description": "Name of the table to describe" + } + } }, "instructions": "#!sys.mcp.invoke.describe_table e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:describe_table", @@ -494,16 +494,16 @@ name: mcp "modelName": "gpt-4o", "internalPrompt": null, "arguments": { - "properties": { - "query": { - "description": "SELECT SQL query to execute", - "type": "string" - } - }, + "type": "object", "required": [ "query" ], - "type": "object" + "properties": { + "query": { + "type": "string", + "description": "SELECT SQL query to execute" + } + } }, "instructions": "#!sys.mcp.invoke.read_query e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:read_query", @@ -527,16 +527,16 @@ name: mcp "modelName": "gpt-4o", "internalPrompt": null, "arguments": { - "properties": { - "query": { - "description": "SQL query to execute", - "type": "string" - } - }, + "type": "object", "required": [ "query" ], - "type": "object" + "properties": { + "query": { + "type": "string", + "description": "SQL query to execute" + } + } }, "instructions": "#!sys.mcp.invoke.write_query e592cc0c9483290685611ba70bd8595829cc794f7eae0419eabb3388bf0d3529", "id": "inline:write_query", diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index bb1193ea..bda3a5b6 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -264,13 +264,13 @@ func TestSubChat(t *testing.T) { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -389,13 +389,13 @@ func TestSubChat(t *testing.T) { "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgentOnly/call1.golden b/pkg/tests/testdata/TestAgentOnly/call1.golden index b63c6fd3..6fadf4ed 100644 --- a/pkg/tests/testdata/TestAgentOnly/call1.golden +++ b/pkg/tests/testdata/TestAgentOnly/call1.golden @@ -7,13 +7,13 @@ "toolID": "testdata/TestAgentOnly/test.gpt:agent2", "name": "agent2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgentOnly/call2.golden b/pkg/tests/testdata/TestAgentOnly/call2.golden index 7f6b155b..909faf53 100644 --- a/pkg/tests/testdata/TestAgentOnly/call2.golden +++ b/pkg/tests/testdata/TestAgentOnly/call2.golden @@ -7,13 +7,13 @@ "toolID": "testdata/TestAgentOnly/test.gpt:agent3", "name": "agent3", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -22,13 +22,13 @@ "toolID": "testdata/TestAgentOnly/test.gpt:agent1", "name": "agent1", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgentOnly/step1.golden b/pkg/tests/testdata/TestAgentOnly/step1.golden index 2cda2025..9bf8e319 100644 --- a/pkg/tests/testdata/TestAgentOnly/step1.golden +++ b/pkg/tests/testdata/TestAgentOnly/step1.golden @@ -15,13 +15,13 @@ "toolID": "testdata/TestAgentOnly/test.gpt:agent2", "name": "agent2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -99,13 +99,13 @@ "toolID": "testdata/TestAgentOnly/test.gpt:agent3", "name": "agent3", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -114,13 +114,13 @@ "toolID": "testdata/TestAgentOnly/test.gpt:agent1", "name": "agent1", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgents/call1.golden b/pkg/tests/testdata/TestAgents/call1.golden index d3c4a86d..be85da0e 100644 --- a/pkg/tests/testdata/TestAgents/call1.golden +++ b/pkg/tests/testdata/TestAgents/call1.golden @@ -7,13 +7,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -22,13 +22,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgents/call2.golden b/pkg/tests/testdata/TestAgents/call2.golden index 950ad2ea..b0c301f8 100644 --- a/pkg/tests/testdata/TestAgents/call2.golden +++ b/pkg/tests/testdata/TestAgents/call2.golden @@ -7,13 +7,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgents/call3.golden b/pkg/tests/testdata/TestAgents/call3.golden index 5b1638e0..237c6c84 100644 --- a/pkg/tests/testdata/TestAgents/call3.golden +++ b/pkg/tests/testdata/TestAgents/call3.golden @@ -7,13 +7,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent3", "name": "agent3", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -22,13 +22,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAgents/step1.golden b/pkg/tests/testdata/TestAgents/step1.golden index 72e01114..d97b6495 100644 --- a/pkg/tests/testdata/TestAgents/step1.golden +++ b/pkg/tests/testdata/TestAgents/step1.golden @@ -15,13 +15,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -30,13 +30,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -110,13 +110,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent2", "name": "agent2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -181,13 +181,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent3", "name": "agent3", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -196,13 +196,13 @@ "toolID": "testdata/TestAgents/test.gpt:agent1", "name": "agent1", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestAsterick/call1.golden b/pkg/tests/testdata/TestAsterick/call1.golden index 3f2fa0b1..d741da5d 100644 --- a/pkg/tests/testdata/TestAsterick/call1.golden +++ b/pkg/tests/testdata/TestAsterick/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestAsterick/other.gpt:afoo", "name": "afoo", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestAsterick/other.gpt:a", "name": "a", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestCase/call1.golden b/pkg/tests/testdata/TestCase/call1.golden index 1e6d76a5..c9bbcbb2 100644 --- a/pkg/tests/testdata/TestCase/call1.golden +++ b/pkg/tests/testdata/TestCase/call1.golden @@ -7,13 +7,13 @@ "name": "Bob", "description": "I'm Bob, a friendly guy.", "parameters": { + "type": "object", "properties": { "question": { - "description": "The question to ask Bob.", - "type": "string" + "type": "string", + "description": "The question to ask Bob." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestCase2/call1.golden b/pkg/tests/testdata/TestCase2/call1.golden index d9b446d0..fc7a5728 100644 --- a/pkg/tests/testdata/TestCase2/call1.golden +++ b/pkg/tests/testdata/TestCase2/call1.golden @@ -7,13 +7,13 @@ "name": "bob", "description": "I'm Bob, a friendly guy.", "parameters": { + "type": "object", "properties": { "question": { - "description": "The question to ask Bob.", - "type": "string" + "type": "string", + "description": "The question to ask Bob." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestContextSubChat/call1.golden b/pkg/tests/testdata/TestContextSubChat/call1.golden index 225401db..fd641ecc 100644 --- a/pkg/tests/testdata/TestContextSubChat/call1.golden +++ b/pkg/tests/testdata/TestContextSubChat/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestContextSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestContextSubChat/call2.golden b/pkg/tests/testdata/TestContextSubChat/call2.golden index a6cf25c6..b5f30991 100644 --- a/pkg/tests/testdata/TestContextSubChat/call2.golden +++ b/pkg/tests/testdata/TestContextSubChat/call2.golden @@ -8,13 +8,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call1.golden b/pkg/tests/testdata/TestDualSubChat/call1.golden index 2baf798a..52ab033a 100644 --- a/pkg/tests/testdata/TestDualSubChat/call1.golden +++ b/pkg/tests/testdata/TestDualSubChat/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call2.golden b/pkg/tests/testdata/TestDualSubChat/call2.golden index a6cf25c6..b5f30991 100644 --- a/pkg/tests/testdata/TestDualSubChat/call2.golden +++ b/pkg/tests/testdata/TestDualSubChat/call2.golden @@ -8,13 +8,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call3.golden b/pkg/tests/testdata/TestDualSubChat/call3.golden index ddcc81c9..ca431fc8 100644 --- a/pkg/tests/testdata/TestDualSubChat/call3.golden +++ b/pkg/tests/testdata/TestDualSubChat/call3.golden @@ -8,13 +8,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call4.golden b/pkg/tests/testdata/TestDualSubChat/call4.golden index 600e3ba5..580fdb86 100644 --- a/pkg/tests/testdata/TestDualSubChat/call4.golden +++ b/pkg/tests/testdata/TestDualSubChat/call4.golden @@ -8,13 +8,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call5.golden b/pkg/tests/testdata/TestDualSubChat/call5.golden index 54823934..de6a4a77 100644 --- a/pkg/tests/testdata/TestDualSubChat/call5.golden +++ b/pkg/tests/testdata/TestDualSubChat/call5.golden @@ -8,13 +8,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call6.golden b/pkg/tests/testdata/TestDualSubChat/call6.golden index a9a8a1c4..d3bbb31d 100644 --- a/pkg/tests/testdata/TestDualSubChat/call6.golden +++ b/pkg/tests/testdata/TestDualSubChat/call6.golden @@ -8,13 +8,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/call7.golden b/pkg/tests/testdata/TestDualSubChat/call7.golden index ff19a0e8..de734587 100644 --- a/pkg/tests/testdata/TestDualSubChat/call7.golden +++ b/pkg/tests/testdata/TestDualSubChat/call7.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/step1.golden b/pkg/tests/testdata/TestDualSubChat/step1.golden index f29dfd60..b866f976 100644 --- a/pkg/tests/testdata/TestDualSubChat/step1.golden +++ b/pkg/tests/testdata/TestDualSubChat/step1.golden @@ -14,13 +14,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -29,13 +29,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -135,13 +135,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } @@ -200,13 +200,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/step2.golden b/pkg/tests/testdata/TestDualSubChat/step2.golden index 830a8e7c..8876c290 100644 --- a/pkg/tests/testdata/TestDualSubChat/step2.golden +++ b/pkg/tests/testdata/TestDualSubChat/step2.golden @@ -14,13 +14,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -29,13 +29,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -142,13 +142,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestDualSubChat/step3.golden b/pkg/tests/testdata/TestDualSubChat/step3.golden index 4f3b415a..1876df5b 100644 --- a/pkg/tests/testdata/TestDualSubChat/step3.golden +++ b/pkg/tests/testdata/TestDualSubChat/step3.golden @@ -14,13 +14,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -29,13 +29,13 @@ "toolID": "testdata/TestDualSubChat/test.gpt:chatbot2", "name": "chatbot2", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } @@ -142,13 +142,13 @@ "name": "chatFinish", "description": "Concludes the conversation. This can not be used to ask a question.", "parameters": { + "type": "object", "properties": { "return": { - "description": "The instructed value to return or a summary of the dialog if no value is instructed", - "type": "string" + "type": "string", + "description": "The instructed value to return or a summary of the dialog if no value is instructed" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestExport/call1.golden b/pkg/tests/testdata/TestExport/call1.golden index b700ee55..8b663ff3 100644 --- a/pkg/tests/testdata/TestExport/call1.golden +++ b/pkg/tests/testdata/TestExport/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestExport/parent.gpt:frommain", "name": "frommain", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestExport/sub/child.gpt:transient", "name": "transient", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -36,13 +36,13 @@ "toolID": "testdata/TestExport/parent.gpt:parent-local", "name": "parentLocal", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestExport/call3.golden b/pkg/tests/testdata/TestExport/call3.golden index d2abca0c..181980aa 100644 --- a/pkg/tests/testdata/TestExport/call3.golden +++ b/pkg/tests/testdata/TestExport/call3.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestExport/parent.gpt:frommain", "name": "frommain", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestExport/sub/child.gpt:transient", "name": "transient", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -36,13 +36,13 @@ "toolID": "testdata/TestExport/parent.gpt:parent-local", "name": "parentLocal", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestExportContext/call1.golden b/pkg/tests/testdata/TestExportContext/call1.golden index 0ee8f9fe..7ba20676 100644 --- a/pkg/tests/testdata/TestExportContext/call1.golden +++ b/pkg/tests/testdata/TestExportContext/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestExportContext/test.gpt:subtool", "name": "subtool", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -22,13 +22,13 @@ "name": "sampletool", "description": "sample", "parameters": { + "type": "object", "properties": { "foo": { - "description": "foo description", - "type": "string" + "type": "string", + "description": "foo description" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestSubChat/call1.golden b/pkg/tests/testdata/TestSubChat/call1.golden index 0d906395..f0e0b491 100644 --- a/pkg/tests/testdata/TestSubChat/call1.golden +++ b/pkg/tests/testdata/TestSubChat/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestSubChat/test.gpt:chatbot", "name": "chatbot", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the assistant. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the assistant. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestSysContext/call1.golden b/pkg/tests/testdata/TestSysContext/call1.golden index 4c9c51d0..f1926c33 100644 --- a/pkg/tests/testdata/TestSysContext/call1.golden +++ b/pkg/tests/testdata/TestSysContext/call1.golden @@ -7,13 +7,13 @@ "toolID": "testdata/TestSysContext/file.gpt:I am Superman Agent", "name": "iAmSuperman", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestSysContext/step1.golden b/pkg/tests/testdata/TestSysContext/step1.golden index 426e5991..2755652a 100644 --- a/pkg/tests/testdata/TestSysContext/step1.golden +++ b/pkg/tests/testdata/TestSysContext/step1.golden @@ -15,13 +15,13 @@ "toolID": "testdata/TestSysContext/file.gpt:I am Superman Agent", "name": "iAmSuperman", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestToolAs/call1.golden b/pkg/tests/testdata/TestToolAs/call1.golden index 55796fea..758f9cca 100644 --- a/pkg/tests/testdata/TestToolAs/call1.golden +++ b/pkg/tests/testdata/TestToolAs/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestToolAs/test.gpt:infile", "name": "local", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestToolAs/other.gpt:", "name": "remote", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestToolRefAll/call1.golden b/pkg/tests/testdata/TestToolRefAll/call1.golden index 9289affa..2d7a7b08 100644 --- a/pkg/tests/testdata/TestToolRefAll/call1.golden +++ b/pkg/tests/testdata/TestToolRefAll/call1.golden @@ -6,13 +6,13 @@ "toolID": "testdata/TestToolRefAll/test.gpt:tool", "name": "tool", "parameters": { + "type": "object", "properties": { "toolArg": { - "description": "stuff", - "type": "string" + "type": "string", + "description": "stuff" } - }, - "type": "object" + } } } }, @@ -21,13 +21,13 @@ "toolID": "testdata/TestToolRefAll/test.gpt:agentAssistant", "name": "agentAssistant", "parameters": { + "type": "object", "properties": { "defaultPromptParameter": { - "description": "Prompt to send to the tool. This may be an instruction or question.", - "type": "string" + "type": "string", + "description": "Prompt to send to the tool. This may be an instruction or question." } - }, - "type": "object" + } } } }, @@ -36,13 +36,13 @@ "toolID": "testdata/TestToolRefAll/test.gpt:none", "name": "none", "parameters": { + "type": "object", "properties": { "noneArg": { - "description": "stuff", - "type": "string" + "type": "string", + "description": "stuff" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestToolsChange/call1.golden b/pkg/tests/testdata/TestToolsChange/call1.golden index 69ab3d03..8bbc8ed5 100644 --- a/pkg/tests/testdata/TestToolsChange/call1.golden +++ b/pkg/tests/testdata/TestToolsChange/call1.golden @@ -8,13 +8,13 @@ "name": "ls", "description": "Lists the contents of a directory", "parameters": { + "type": "object", "properties": { "dir": { - "description": "The directory to list", - "type": "string" + "type": "string", + "description": "The directory to list" } - }, - "type": "object" + } } } }, @@ -24,13 +24,13 @@ "name": "read", "description": "Reads the contents of a file. Can only read plain text files, not binary files", "parameters": { + "type": "object", "properties": { "filename": { - "description": "The name of the file to read", - "type": "string" + "type": "string", + "description": "The name of the file to read" } - }, - "type": "object" + } } } }, @@ -40,17 +40,17 @@ "name": "write", "description": "Write the contents to a file", "parameters": { + "type": "object", "properties": { "content": { - "description": "The content to write", - "type": "string" + "type": "string", + "description": "The content to write" }, "filename": { - "description": "The name of the file to write to", - "type": "string" + "type": "string", + "description": "The name of the file to write to" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestToolsChange/call2.golden b/pkg/tests/testdata/TestToolsChange/call2.golden index ad86b7ce..4abdfddf 100644 --- a/pkg/tests/testdata/TestToolsChange/call2.golden +++ b/pkg/tests/testdata/TestToolsChange/call2.golden @@ -8,13 +8,13 @@ "name": "ls", "description": "Lists the contents of a directory", "parameters": { + "type": "object", "properties": { "dir": { - "description": "The directory to list", - "type": "string" + "type": "string", + "description": "The directory to list" } - }, - "type": "object" + } } } }, @@ -24,17 +24,17 @@ "name": "write", "description": "Write the contents to a file", "parameters": { + "type": "object", "properties": { "content": { - "description": "The content to write", - "type": "string" + "type": "string", + "description": "The content to write" }, "filename": { - "description": "The name of the file to write to", - "type": "string" + "type": "string", + "description": "The name of the file to write to" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestToolsChange/step1.golden b/pkg/tests/testdata/TestToolsChange/step1.golden index e26862ae..2274f8b0 100644 --- a/pkg/tests/testdata/TestToolsChange/step1.golden +++ b/pkg/tests/testdata/TestToolsChange/step1.golden @@ -16,13 +16,13 @@ "name": "ls", "description": "Lists the contents of a directory", "parameters": { + "type": "object", "properties": { "dir": { - "description": "The directory to list", - "type": "string" + "type": "string", + "description": "The directory to list" } - }, - "type": "object" + } } } }, @@ -32,13 +32,13 @@ "name": "read", "description": "Reads the contents of a file. Can only read plain text files, not binary files", "parameters": { + "type": "object", "properties": { "filename": { - "description": "The name of the file to read", - "type": "string" + "type": "string", + "description": "The name of the file to read" } - }, - "type": "object" + } } } }, @@ -48,17 +48,17 @@ "name": "write", "description": "Write the contents to a file", "parameters": { + "type": "object", "properties": { "content": { - "description": "The content to write", - "type": "string" + "type": "string", + "description": "The content to write" }, "filename": { - "description": "The name of the file to write to", - "type": "string" + "type": "string", + "description": "The name of the file to write to" } - }, - "type": "object" + } } } } diff --git a/pkg/tests/testdata/TestToolsChange/step2.golden b/pkg/tests/testdata/TestToolsChange/step2.golden index 9c9dbad7..ef99f90e 100644 --- a/pkg/tests/testdata/TestToolsChange/step2.golden +++ b/pkg/tests/testdata/TestToolsChange/step2.golden @@ -16,13 +16,13 @@ "name": "ls", "description": "Lists the contents of a directory", "parameters": { + "type": "object", "properties": { "dir": { - "description": "The directory to list", - "type": "string" + "type": "string", + "description": "The directory to list" } - }, - "type": "object" + } } } }, @@ -32,17 +32,17 @@ "name": "write", "description": "Write the contents to a file", "parameters": { + "type": "object", "properties": { "content": { - "description": "The content to write", - "type": "string" + "type": "string", + "description": "The content to write" }, "filename": { - "description": "The name of the file to write to", - "type": "string" + "type": "string", + "description": "The name of the file to write to" } - }, - "type": "object" + } } } } diff --git a/pkg/types/completion.go b/pkg/types/completion.go index fbd2fb3b..fa7781e5 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - humav2 "github.com/danielgtaylor/huma/v2" + "github.com/modelcontextprotocol/go-sdk/jsonschema" ) type CompletionRequest struct { @@ -31,10 +31,10 @@ type ChatCompletionTool struct { } type CompletionFunctionDefinition struct { - ToolID string `json:"toolID,omitempty"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - Parameters *humav2.Schema `json:"parameters"` + ToolID string `json:"toolID,omitempty"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Parameters *jsonschema.Schema `json:"parameters"` } // Chat message role defined by the OpenAI API. diff --git a/pkg/types/jsonschema.go b/pkg/types/jsonschema.go index b88e37b6..38b0e3df 100644 --- a/pkg/types/jsonschema.go +++ b/pkg/types/jsonschema.go @@ -1,17 +1,17 @@ package types -import humav2 "github.com/danielgtaylor/huma/v2" +import "github.com/modelcontextprotocol/go-sdk/jsonschema" -func ObjectSchema(kv ...string) *humav2.Schema { - s := &humav2.Schema{ - Type: humav2.TypeObject, - Properties: make(map[string]*humav2.Schema, len(kv)/2), +func ObjectSchema(kv ...string) *jsonschema.Schema { + s := &jsonschema.Schema{ + Type: "object", + Properties: make(map[string]*jsonschema.Schema, len(kv)/2), } for i, v := range kv { if i%2 == 1 { - s.Properties[kv[i-1]] = &humav2.Schema{ + s.Properties[kv[i-1]] = &jsonschema.Schema{ Description: v, - Type: humav2.TypeString, + Type: "string", } } } diff --git a/pkg/types/tool.go b/pkg/types/tool.go index c5346319..2edeefd6 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -9,9 +9,9 @@ import ( "sort" "strings" - humav2 "github.com/danielgtaylor/huma/v2" "github.com/google/shlex" "github.com/gptscript-ai/gptscript/pkg/system" + "github.com/modelcontextprotocol/go-sdk/jsonschema" "golang.org/x/exp/maps" ) @@ -120,33 +120,33 @@ func (p Program) SetBlocking() Program { type BuiltinFunc func(ctx context.Context, env []string, input string, progress chan<- string) (string, error) type Parameters struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - MaxTokens int `json:"maxTokens,omitempty"` - ModelName string `json:"modelName,omitempty"` - ModelProvider bool `json:"modelProvider,omitempty"` - JSONResponse bool `json:"jsonResponse,omitempty"` - Chat bool `json:"chat,omitempty"` - Temperature *float32 `json:"temperature,omitempty"` - Cache *bool `json:"cache,omitempty"` - InternalPrompt *bool `json:"internalPrompt"` - Arguments *humav2.Schema `json:"arguments,omitempty"` - Tools []string `json:"tools,omitempty"` - GlobalTools []string `json:"globalTools,omitempty"` - GlobalModelName string `json:"globalModelName,omitempty"` - Context []string `json:"context,omitempty"` - ExportContext []string `json:"exportContext,omitempty"` - Export []string `json:"export,omitempty"` - Agents []string `json:"agents,omitempty"` - Credentials []string `json:"credentials,omitempty"` - ExportCredentials []string `json:"exportCredentials,omitempty"` - InputFilters []string `json:"inputFilters,omitempty"` - ExportInputFilters []string `json:"exportInputFilters,omitempty"` - OutputFilters []string `json:"outputFilters,omitempty"` - ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` - Blocking bool `json:"-"` - Stdin bool `json:"stdin,omitempty"` - Type ToolType `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + MaxTokens int `json:"maxTokens,omitempty"` + ModelName string `json:"modelName,omitempty"` + ModelProvider bool `json:"modelProvider,omitempty"` + JSONResponse bool `json:"jsonResponse,omitempty"` + Chat bool `json:"chat,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + Cache *bool `json:"cache,omitempty"` + InternalPrompt *bool `json:"internalPrompt"` + Arguments *jsonschema.Schema `json:"arguments,omitempty"` + Tools []string `json:"tools,omitempty"` + GlobalTools []string `json:"globalTools,omitempty"` + GlobalModelName string `json:"globalModelName,omitempty"` + Context []string `json:"context,omitempty"` + ExportContext []string `json:"exportContext,omitempty"` + Export []string `json:"export,omitempty"` + Agents []string `json:"agents,omitempty"` + Credentials []string `json:"credentials,omitempty"` + ExportCredentials []string `json:"exportCredentials,omitempty"` + InputFilters []string `json:"inputFilters,omitempty"` + ExportInputFilters []string `json:"exportInputFilters,omitempty"` + OutputFilters []string `json:"outputFilters,omitempty"` + ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` + Blocking bool `json:"-"` + Stdin bool `json:"stdin,omitempty"` + Type ToolType `json:"type,omitempty"` } func (p Parameters) allExports() []string { From de7bebec1bf30f2328a2a7f0f71fe5133302547c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 21 Jul 2025 12:56:59 -0400 Subject: [PATCH 261/270] chore: expose client session ID (#991) Signed-off-by: Donnie Adams --- pkg/mcp/client.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/mcp/client.go b/pkg/mcp/client.go index f4ecee15..a6563bd2 100644 --- a/pkg/mcp/client.go +++ b/pkg/mcp/client.go @@ -12,11 +12,13 @@ func (l *Local) Client(server ServerConfig, clientOpts ...nmcp.ClientOption) (*C return &Client{ Client: session.Client, + ID: session.ID, }, nil } type Client struct { *nmcp.Client + ID string } func (c *Client) Capabilities() nmcp.ServerCapabilities { From b609820bd5f5ef84a8e26fa2c98936e55474873b Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Wed, 6 Aug 2025 16:11:36 -0400 Subject: [PATCH 262/270] chore: bump default max tokens to 1M (#992) Signed-off-by: Grant Linville --- pkg/openai/count.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/openai/count.go b/pkg/openai/count.go index d8f2ca36..c37bdcfd 100644 --- a/pkg/openai/count.go +++ b/pkg/openai/count.go @@ -13,7 +13,7 @@ func init() { tiktoken.SetBpeLoader(tiktoken_loader.NewOfflineLoader()) } -const DefaultMaxTokens = 128_000 +const DefaultMaxTokens = 1_000_000 // This is the limit for GPT-4.1 func decreaseTenPercent(maxTokens int) int { maxTokens = getBudget(maxTokens) From f83181d19b69eb354d1d699184b45d30289bc42a Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 7 Aug 2025 15:21:40 -0400 Subject: [PATCH 263/270] chore: lower default max tokens to 400k (#993) Signed-off-by: Grant Linville --- pkg/openai/count.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/openai/count.go b/pkg/openai/count.go index c37bdcfd..3c3de1a9 100644 --- a/pkg/openai/count.go +++ b/pkg/openai/count.go @@ -13,7 +13,7 @@ func init() { tiktoken.SetBpeLoader(tiktoken_loader.NewOfflineLoader()) } -const DefaultMaxTokens = 1_000_000 // This is the limit for GPT-4.1 +const DefaultMaxTokens = 400_000 // This is the limit for GPT-5 func decreaseTenPercent(maxTokens int) int { maxTokens = getBudget(maxTokens) From 3898d6c241ac0dac39ca770cffdb36fb26579701 Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Tue, 19 Aug 2025 13:40:44 -0700 Subject: [PATCH 264/270] Fix: add placeholder message when chat is aborted (#994) Signed-off-by: Daishan Peng --- pkg/openai/client.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 7715c657..3101e633 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -666,11 +666,17 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, }, }), nil } - stream, err := c.c.CreateChatCompletionStream(ctx, request, headers, retryOpts...) if err != nil { if errors.Is(err, context.Canceled) { - err = nil + return types.CompletionMessage{ + Content: []types.ContentPart{ + { + Text: "User aborted the chat before model could respond", + }, + }, + Role: types.CompletionMessageRoleTypeAssistant, + }, nil } return types.CompletionMessage{}, err } @@ -683,6 +689,11 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, for { response, err := stream.Recv() if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + if len(partialMessage.Content) > 0 && partialMessage.Content[0].Text == "" { + // Place a text holder if LLM doesn't respond or user cancel the stream before it can produce any response. + // In anthropic models it will yield an error about non-empty message for assistant message + partialMessage.Content[0].Text = "User aborted the chat or chat finished before LLM can respond" + } // If the stream is finished, either because we got an EOF or the context was canceled, // then we're done. The cache won't save the response if the context was canceled. return partialMessage, c.cache.Store(ctx, c.cacheKey(request), partialMessage) From 9d5e31590cb6f37430bc2bb04c3bcfdf4d13dd74 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Mon, 25 Aug 2025 10:48:53 -0400 Subject: [PATCH 265/270] chore: bump nanobot to pickup session closing changes (#995) Signed-off-by: Donnie Adams --- go.mod | 25 ++++++++------- go.sum | 77 +++++++++++++++++------------------------------ pkg/mcp/loader.go | 6 ++-- 3 files changed, 43 insertions(+), 65 deletions(-) diff --git a/go.mod b/go.mod index 770c14e1..81fed391 100644 --- a/go.mod +++ b/go.mod @@ -2,16 +2,14 @@ module github.com/gptscript-ai/gptscript go 1.24.2 -toolchain go1.24.4 - require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 - github.com/adrg/xdg v0.4.0 + github.com/adrg/xdg v0.5.3 github.com/chzyer/readline v1.5.1 github.com/docker/cli v26.0.0+incompatible github.com/docker/docker-credential-helpers v0.8.1 - github.com/fatih/color v1.17.0 + github.com/fatih/color v1.18.0 github.com/getkin/kin-openapi v0.132.0 github.com/go-git/go-git/v5 v5.13.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -21,12 +19,12 @@ require ( github.com/gptscript-ai/cmd v0.0.0-20250530150401-bc71fddf8070 github.com/gptscript-ai/go-gptscript v0.9.6-0.20250714170123-17ad44ae8c54 github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 - github.com/hexops/autogold/v2 v2.2.1 - github.com/hexops/valast v1.4.4 + github.com/hexops/autogold/v2 v2.3.0 + github.com/hexops/valast v1.5.0 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 github.com/mholt/archives v0.1.0 github.com/modelcontextprotocol/go-sdk v0.2.0 - github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09 + github.com/nanobot-ai/nanobot v0.0.6-0.20250825141756-f61b8b0f41f8 github.com/pkoukk/tiktoken-go v0.1.7 github.com/pkoukk/tiktoken-go-loader v0.0.2-0.20240522064338-c17e8bc0f699 github.com/rs/cors v1.11.0 @@ -38,8 +36,8 @@ require ( github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.15.0 - golang.org/x/term v0.32.0 + golang.org/x/sync v0.16.0 + golang.org/x/term v0.33.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 @@ -137,12 +135,13 @@ require ( github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.39.0 // indirect + golang.org/x/crypto v0.40.0 // indirect golang.org/x/mod v0.25.0 // indirect golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/text v0.27.0 // indirect golang.org/x/tools v0.34.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - mvdan.cc/gofumpt v0.6.0 // indirect + mvdan.cc/gofumpt v0.8.0 // indirect ) diff --git a/go.sum b/go.sum index e33f7f84..4a5eff71 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,8 @@ github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXx github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= -github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= -github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= +github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= +github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink= github.com/alecthomas/assert/v2 v2.2.1/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/chroma/v2 v2.8.0 h1:w9WJUjFFmHHB2e8mRpL9jjy3alYDlU0QLDezj1xE264= @@ -133,13 +133,9 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/getkin/kin-openapi v0.132.0 h1:3ISeLMsQzcb5v26yeJrBcdTCEQTag36ZjaGk7MIRUwk= github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= @@ -158,6 +154,8 @@ github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1 github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= @@ -230,13 +228,13 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hexops/autogold v0.8.1/go.mod h1:97HLDXyG23akzAoRYJh/2OBs3kd80eHyKPvZw0S5ZBY= github.com/hexops/autogold v1.3.1 h1:YgxF9OHWbEIUjhDbpnLhgVsjUDsiHDTyDfy2lrfdlzo= github.com/hexops/autogold v1.3.1/go.mod h1:sQO+mQUCVfxOKPht+ipDSkJ2SCJ7BNJVHZexsXqWMx4= -github.com/hexops/autogold/v2 v2.2.1 h1:JPUXuZQGkcQMv7eeDXuNMovjfoRYaa0yVcm+F3voaGY= -github.com/hexops/autogold/v2 v2.2.1/go.mod h1:IJwxtUfj1BGLm0YsR/k+dIxYi6xbeLjqGke2bzcOTMI= +github.com/hexops/autogold/v2 v2.3.0 h1:tObVFzC7WDIF2tT80Bo9p42mXlkqcyLKmIMghcjoTWE= +github.com/hexops/autogold/v2 v2.3.0/go.mod h1:e77HQw5vjubldctJpHjjDHr7KHUmrFc5KrWKFFieO7Q= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hexops/valast v1.4.3/go.mod h1:Iqx2kLj3Jn47wuXpj3wX40xn6F93QNFBHuiKBerkTGA= -github.com/hexops/valast v1.4.4 h1:rETyycw+/L2ZVJHHNxEBgh8KUn+87WugH9MxcEv9PGs= -github.com/hexops/valast v1.4.4/go.mod h1:Jcy1pNH7LNraVaAZDLyv21hHg2WBv9Nf9FL6fGxU7o4= +github.com/hexops/valast v1.5.0 h1:FBTuvVi0wjTngtXJRZXMbkN/Dn6DgsUsBwch2DUJU8Y= +github.com/hexops/valast v1.5.0/go.mod h1:Jcy1pNH7LNraVaAZDLyv21hHg2WBv9Nf9FL6fGxU7o4= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -290,8 +288,6 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= @@ -313,8 +309,8 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09 h1:nMo9dQvmdetj+INyOvg37igNG1Q3nWzXCOnNRDDNv7M= -github.com/nanobot-ai/nanobot v0.0.6-0.20250623174223-c75713af7a09/go.mod h1:okGlfo6y6kP/mFLN4XpKkRIYzU9EXXjPO2KlcafbwrM= +github.com/nanobot-ai/nanobot v0.0.6-0.20250825141756-f61b8b0f41f8 h1:SZsity7OCSBRVnqfPMpmaSnaIFlMUm3z8sGED5C31XU= +github.com/nanobot-ai/nanobot v0.0.6-0.20250825141756-f61b8b0f41f8/go.mod h1:vKoxU5Fro4DuvHq2AsxjhNYF3/KRlAuHLFT+NZ9ns5w= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= @@ -359,9 +355,8 @@ github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -444,9 +439,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -477,8 +471,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -500,9 +492,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -510,6 +499,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -519,9 +510,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -545,7 +535,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -557,22 +546,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -584,9 +567,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -616,8 +598,6 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -680,9 +660,8 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= -mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= -mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= -mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= +mvdan.cc/gofumpt v0.8.0 h1:nZUCeC2ViFaerTcYKstMmfysj6uhQrA2vJe+2vwGU6k= +mvdan.cc/gofumpt v0.8.0/go.mod h1:vEYnSzyGPmjvFkqJWtXkh79UwPWP9/HMxQdGEXZHjpg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/mcp/loader.go b/pkg/mcp/loader.go index 9469db7a..7ec6fb5c 100644 --- a/pkg/mcp/loader.go +++ b/pkg/mcp/loader.go @@ -142,7 +142,7 @@ func (l *Local) ShutdownServer(server ServerConfig) error { l.lock.Unlock() if session != nil && session.Client != nil { - session.Client.Session.Close() + session.Client.Session.Close(true) session.Client.Session.Wait() } @@ -169,7 +169,7 @@ func (l *Local) Close() error { var errs []error for id, session := range l.sessions { logger.Infof("closing MCP session %s", id) - session.Client.Session.Close() + session.Client.Session.Close(false) session.Client.Session.Wait() } @@ -310,7 +310,7 @@ func (l *Local) loadSession(server ServerConfig, serverName string, clientOpts . defer l.lock.Unlock() if existing, ok = l.sessions[id]; ok { - c.Session.Close() + c.Session.Close(true) return existing, nil } From f962ce4ca88425cdf8502e0fbfe10bba040efe99 Mon Sep 17 00:00:00 2001 From: Daishan Peng Date: Mon, 25 Aug 2025 23:14:04 -0700 Subject: [PATCH 266/270] Fix: only append abort message when error is cancelled (#996) Signed-off-by: Daishan Peng --- pkg/openai/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 3101e633..1a2f0968 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -689,7 +689,7 @@ func (c *Client) call(ctx context.Context, request openai.ChatCompletionRequest, for { response, err := stream.Recv() if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { - if len(partialMessage.Content) > 0 && partialMessage.Content[0].Text == "" { + if len(partialMessage.Content) > 0 && partialMessage.Content[0].Text == "" && errors.Is(err, context.Canceled) { // Place a text holder if LLM doesn't respond or user cancel the stream before it can produce any response. // In anthropic models it will yield an error about non-empty message for assistant message partialMessage.Content[0].Text = "User aborted the chat or chat finished before LLM can respond" From 7ec07e576f488d773c75d4e682262a4a6331f7fb Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 22 Oct 2025 09:20:04 -0400 Subject: [PATCH 267/270] chore: stop using default mux in SDK server (#997) This is essentially a hack. We need to be able to run the SDK server twice in Obot to fix some short-term dependency cycles. We are unable to do this if both use the default server mux. This change will use a separate mux for each. Signed-off-by: Donnie Adams --- pkg/sdkserver/server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index 52e9ec1c..7b0e4c1a 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -126,10 +126,11 @@ func run(ctx context.Context, listener net.Listener, opts Options) error { } defer s.close() - s.addRoutes(http.DefaultServeMux) + mux := http.NewServeMux() + s.addRoutes(mux) httpServer := &http.Server{ - Handler: apply(http.DefaultServeMux, + Handler: apply(mux, contentType("application/json"), addRequestID, addLogger, From 7bd3fd64af7fd9a566afdebb15cec1e1f4e843b7 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 4 Nov 2025 12:09:04 -0500 Subject: [PATCH 268/270] chore: bump dependencies to pickup CVE fixes (#998) Signed-off-by: Donnie Adams --- Makefile | 4 +- go.mod | 40 +++++++++--------- go.sum | 79 ++++++++++++++++++------------------ pkg/types/args.go | 1 + pkg/types/completion.go | 1 + pkg/types/credential_test.go | 1 + pkg/types/jsonschema.go | 1 + pkg/types/log.go | 1 + pkg/types/prompt.go | 1 + pkg/types/prompt_test.go | 1 + pkg/types/set.go | 1 + pkg/types/tool.go | 1 + pkg/types/tool_test.go | 1 + pkg/types/toolname.go | 1 + pkg/types/toolname_test.go | 1 + pkg/types/toolstring.go | 1 + 16 files changed, 75 insertions(+), 61 deletions(-) diff --git a/Makefile b/Makefile index 284c94c9..80ed0356 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ smoke: build smoke: go test -v -tags='smoke' ./pkg/tests/smoke/... -GOLANGCI_LINT_VERSION ?= v2.1.2 +GOLANGCI_LINT_VERSION ?= v2.6.1 lint: if ! command -v golangci-lint &> /dev/null; then \ echo "Could not find golangci-lint, installing version $(GOLANGCI_LINT_VERSION)."; \ @@ -62,4 +62,4 @@ validate-docs: gen-docs ;fi gen-docs: - go run tools/gendocs/main.go \ No newline at end of file + go run tools/gendocs/main.go diff --git a/go.mod b/go.mod index 81fed391..e4369567 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/gptscript-ai/gptscript -go 1.24.2 +go 1.25.3 require ( github.com/AlecAivazis/survey/v2 v2.3.7 @@ -22,7 +22,7 @@ require ( github.com/hexops/autogold/v2 v2.3.0 github.com/hexops/valast v1.5.0 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 - github.com/mholt/archives v0.1.0 + github.com/mholt/archives v0.1.5 github.com/modelcontextprotocol/go-sdk v0.2.0 github.com/nanobot-ai/nanobot v0.0.6-0.20250825141756-f61b8b0f41f8 github.com/pkoukk/tiktoken-go v0.1.7 @@ -36,8 +36,8 @@ require ( github.com/tidwall/gjson v1.17.1 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.16.0 - golang.org/x/term v0.33.0 + golang.org/x/sync v0.17.0 + golang.org/x/term v0.34.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.1 sigs.k8s.io/yaml v1.4.0 @@ -50,13 +50,13 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v1.1.3 // indirect - github.com/STARRY-S/zip v0.2.1 // indirect + github.com/STARRY-S/zip v0.2.3 // indirect github.com/alecthomas/chroma/v2 v2.8.0 // indirect - github.com/andybalholm/brotli v1.1.1 // indirect + github.com/andybalholm/brotli v1.2.0 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/bodgit/plumbing v1.3.0 // indirect - github.com/bodgit/sevenzip v1.6.0 // indirect + github.com/bodgit/sevenzip v1.6.1 // indirect github.com/bodgit/windows v1.0.1 // indirect github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect github.com/charmbracelet/glamour v0.7.0 // indirect @@ -84,8 +84,6 @@ require ( github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hexops/autogold v1.3.1 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect @@ -104,16 +102,18 @@ require ( github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/microcosm-cc/bluemonday v1.0.26 // indirect + github.com/mikelolasagasti/xz v1.0.1 // indirect + github.com/minio/minlz v1.0.1 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.16.0 // indirect github.com/nightlyone/lockfile v1.0.0 // indirect - github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect + github.com/nwaples/rardecode/v2 v2.2.0 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pterm/pterm v0.12.79 // indirect @@ -121,13 +121,13 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/skeema/knownhosts v1.3.0 // indirect - github.com/sorairolake/lzip-go v0.3.5 // indirect + github.com/sorairolake/lzip-go v0.3.8 // indirect github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect + github.com/spf13/afero v1.15.0 // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect - github.com/therootcompany/xz v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect @@ -135,13 +135,13 @@ require ( github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.40.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.34.0 // indirect - golang.org/x/text v0.27.0 // indirect - golang.org/x/tools v0.34.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/tools v0.36.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect mvdan.cc/gofumpt v0.8.0 // indirect ) diff --git a/go.sum b/go.sum index 4a5eff71..33a5e3c1 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,8 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63n github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= -github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= +github.com/STARRY-S/zip v0.2.3 h1:luE4dMvRPDOWQdeDdUxUoZkzUIpTccdKdhHHsQJ1fm4= +github.com/STARRY-S/zip v0.2.3/go.mod h1:lqJ9JdeRipyOQJrYSOtpNAiaesFO6zVDsE8GIGFaoSk= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink= @@ -59,8 +59,8 @@ github.com/alecthomas/chroma/v2 v2.8.0 h1:w9WJUjFFmHHB2e8mRpL9jjy3alYDlU0QLDezj1 github.com/alecthomas/chroma/v2 v2.8.0/go.mod h1:yrkMI9807G1ROx13fhe1v6PN2DDeaR73L3d+1nmYQtw= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= -github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -72,8 +72,8 @@ github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuP github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= -github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A= -github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc= +github.com/bodgit/sevenzip v1.6.1 h1:kikg2pUMYC9ljU7W9SaqHXhym5HyKm8/M/jd31fYan4= +github.com/bodgit/sevenzip v1.6.1/go.mod h1:GVoYQbEVbOGT8n2pfqCIMRUaRjQ8F9oSqoBEqZh5fQ8= github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -216,11 +216,6 @@ github.com/gptscript-ai/go-gptscript v0.9.6-0.20250714170123-17ad44ae8c54 h1:9OA github.com/gptscript-ai/go-gptscript v0.9.6-0.20250714170123-17ad44ae8c54/go.mod h1:HLPvKBhDtsEkyyUWefJVhPpl98R3tZG6ps7+mQ+EKVI= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9 h1:wQC8sKyeGA50WnCEG+Jo5FNRIkuX3HX8d3ubyWCCoI8= github.com/gptscript-ai/tui v0.0.0-20250419050840-5e79e16786c9/go.mod h1:iwHxuueg2paOak7zIg0ESBWx7A0wIHGopAratbgaPNY= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -297,10 +292,14 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= -github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I= +github.com/mholt/archives v0.1.5 h1:Fh2hl1j7VEhc6DZs2DLMgiBNChUux154a1G+2esNvzQ= +github.com/mholt/archives v0.1.5/go.mod h1:3TPMmBLPsgszL+1As5zECTuKwKvIfj6YcwWPpeTAXF4= github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/mikelolasagasti/xz v1.0.1 h1:Q2F2jX0RYJUG3+WsM+FJknv+6eVjsjXNDV0KJXZzkD0= +github.com/mikelolasagasti/xz v1.0.1/go.mod h1:muAirjiOUxPRXwm9HdDtB3uoRPrGnL85XHtokL9Hcgc= +github.com/minio/minlz v1.0.1 h1:OUZUzXcib8diiX+JYxyRLIdomyZYzHct6EShOKtQY2A= +github.com/minio/minlz v1.0.1/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= github.com/modelcontextprotocol/go-sdk v0.2.0 h1:PESNYOmyM1c369tRkzXLY5hHrazj8x9CY1Xu0fLCryM= github.com/modelcontextprotocol/go-sdk v0.2.0/go.mod h1:0sL9zUKKs2FTTkeCCVnKqbLJTw5TScefPAzojjU459E= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= @@ -313,8 +312,8 @@ github.com/nanobot-ai/nanobot v0.0.6-0.20250825141756-f61b8b0f41f8 h1:SZsity7OCS github.com/nanobot-ai/nanobot v0.0.6-0.20250825141756-f61b8b0f41f8/go.mod h1:vKoxU5Fro4DuvHq2AsxjhNYF3/KRlAuHLFT+NZ9ns5w= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= -github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= -github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/nwaples/rardecode/v2 v2.2.0 h1:4ufPGHiNe1rYJxYfehALLjup4Ls3ck42CWwjKiOqu0A= +github.com/nwaples/rardecode/v2 v2.2.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= @@ -325,8 +324,8 @@ github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -372,10 +371,12 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= -github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= -github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= +github.com/sorairolake/lzip-go v0.3.8 h1:j5Q2313INdTA80ureWYRhX+1K78mUXfMoPZCw/ivWik= +github.com/sorairolake/lzip-go v0.3.8/go.mod h1:JcBqGMV0frlxwrsE9sMWXDjqn3EeVf0/54YPsw66qkU= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -385,6 +386,8 @@ github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02n github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -395,8 +398,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= -github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -406,8 +407,8 @@ github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= @@ -439,8 +440,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -471,8 +472,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -492,8 +493,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -510,8 +511,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -546,16 +547,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -567,8 +568,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -598,8 +599,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/types/args.go b/pkg/types/args.go index fa9c82b2..62933de1 100644 --- a/pkg/types/args.go +++ b/pkg/types/args.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/completion.go b/pkg/types/completion.go index fa7781e5..bacdaab8 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/credential_test.go b/pkg/types/credential_test.go index 530b23f8..7f03e2d9 100644 --- a/pkg/types/credential_test.go +++ b/pkg/types/credential_test.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/jsonschema.go b/pkg/types/jsonschema.go index 38b0e3df..17df0692 100644 --- a/pkg/types/jsonschema.go +++ b/pkg/types/jsonschema.go @@ -1,3 +1,4 @@ +//nolint:revive package types import "github.com/modelcontextprotocol/go-sdk/jsonschema" diff --git a/pkg/types/log.go b/pkg/types/log.go index ba3ff8c5..de23ece8 100644 --- a/pkg/types/log.go +++ b/pkg/types/log.go @@ -1,3 +1,4 @@ +//nolint:revive package types import "github.com/gptscript-ai/gptscript/pkg/mvl" diff --git a/pkg/types/prompt.go b/pkg/types/prompt.go index f36ea566..42e6eaa5 100644 --- a/pkg/types/prompt.go +++ b/pkg/types/prompt.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/prompt_test.go b/pkg/types/prompt_test.go index f2d911ef..c081011e 100644 --- a/pkg/types/prompt_test.go +++ b/pkg/types/prompt_test.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/set.go b/pkg/types/set.go index 65b73d22..654fbd5f 100644 --- a/pkg/types/set.go +++ b/pkg/types/set.go @@ -1,3 +1,4 @@ +//nolint:revive package types type toolRefKey struct { diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 2edeefd6..42b08f62 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index 1160b4f8..ca5172cd 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/toolname.go b/pkg/types/toolname.go index 622e9bb5..c43c2e80 100644 --- a/pkg/types/toolname.go +++ b/pkg/types/toolname.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/toolname_test.go b/pkg/types/toolname_test.go index fc7a66ab..0233baab 100644 --- a/pkg/types/toolname_test.go +++ b/pkg/types/toolname_test.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index 8d379f14..f9a69584 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -1,3 +1,4 @@ +//nolint:revive package types import ( From ea8f116e64944ffd607be76f802d0893feb9ca0f Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 4 Nov 2025 14:12:51 -0500 Subject: [PATCH 269/270] chore: change to Sonnet 3.7 for smoke tests (#999) The model we were using is no longer available. Signed-off-by: Donnie Adams --- .github/workflows/smoke.yaml | 8 +- .../claude-3-5-sonnet-20240620-expected.json | 619 ------------------ .../claude-3-7-sonnet-20250219-expected.json | 603 +++++++++++++++++ .../claude-3-5-sonnet-20240620-expected.json | 617 ----------------- .../claude-3-7-sonnet-20250219-expected.json | 601 +++++++++++++++++ 5 files changed, 1208 insertions(+), 1240 deletions(-) delete mode 100644 pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json create mode 100644 pkg/tests/smoke/testdata/Bob/claude-3-7-sonnet-20250219-expected.json delete mode 100644 pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json create mode 100644 pkg/tests/smoke/testdata/BobAsShell/claude-3-7-sonnet-20250219-expected.json diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index d3a40aa6..1a392a12 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -117,7 +117,7 @@ jobs: export PATH="$(pwd)/bin:${PATH}" make smoke - claude-3-5-sonnet-20240620: + claude-3-7-sonnet-20250219: needs: check-label if: ${{ needs.check-label.outputs.run_smoke_tests == 'true' }} runs-on: ubuntu-22.04 @@ -139,11 +139,11 @@ jobs: go-version: "1.21" - env: OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider + GPTSCRIPT_DEFAULT_MODEL: claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider ANTHROPIC_API_KEY: ${{ secrets.SMOKE_ANTHROPIC_API_KEY }} GPTSCRIPT_CREDENTIAL_OVERRIDE: "github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY" - name: Run smoke test for claude-3-5-sonnet-20240620 + name: Run smoke test for claude-3-7-sonnet-20250219 run: | - echo "Running smoke test for model claude-3-5-sonnet-20240620" + echo "Running smoke test for model claude-3-7-sonnet-20250219" export PATH="$(pwd)/bin:${PATH}" make smoke diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json deleted file mode 100644 index 52d975c5..00000000 --- a/pkg/tests/smoke/testdata/Bob/claude-3-5-sonnet-20240620-expected.json +++ /dev/null @@ -1,619 +0,0 @@ -[ - { - "time": "2024-10-14T18:59:12.228692-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:12.229038-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:13.520962-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:13.521331-04:00", - "callContext": { - "id": "1728946754", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T18:59:14.541348-04:00", - "callContext": { - "id": "1728946754", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callFinish", - "usage": {}, - "content": "http://127.0.0.1:10258" - }, - { - "time": "2024-10-14T18:59:14.541518-04:00", - "type": "runFinish", - "usage": {} - }, - { - "time": "2024-10-14T18:59:14.541566-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946755", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T18:59:17.304351-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946755", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - } - ], - "usage": {} - } - }, - { - "time": "2024-10-14T18:59:17.304441-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolSubCalls": { - "toolu_01KtYnAwnQ2cyRieDu98Jopb": { - "toolID": "testdata/Bob/test.gpt:bob", - "input": "{\"question\": \"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-10-14T18:59:17.304485-04:00", - "callContext": { - "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946753" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\": \"how are you doing\"}" - }, - { - "time": "2024-10-14T18:59:17.394841-04:00", - "callContext": { - "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946753" - }, - "type": "callChat", - "chatCompletionId": "1728946756", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T18:59:18.202926-04:00", - "callContext": { - "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946753" - }, - "type": "callChat", - "chatCompletionId": "1728946756", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - } - ], - "usage": {} - } - }, - { - "time": "2024-10-14T18:59:18.202988-04:00", - "callContext": { - "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", - "id": "testdata/Bob/test.gpt:bob", - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728946753" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-10-14T18:59:18.203022-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-10-14T18:59:18.295164-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946757", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T18:59:19.737028-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728946757", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - } - ], - "usage": {} - } - }, - { - "time": "2024-10-14T18:59:19.737045-04:00", - "callContext": { - "id": "1728946753", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/Bob/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/Bob/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/Bob/test.gpt:", - "bob": "testdata/Bob/test.gpt:bob" - }, - "source": { - "location": "testdata/Bob/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/Bob" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-10-14T18:59:19.737061-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-7-sonnet-20250219-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-7-sonnet-20250219-expected.json new file mode 100644 index 00000000..618c84d5 --- /dev/null +++ b/pkg/tests/smoke/testdata/Bob/claude-3-7-sonnet-20250219-expected.json @@ -0,0 +1,603 @@ +[ + { + "time": "2024-10-14T18:59:12.228692-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-10-14T18:59:12.229038-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-10-14T18:59:13.520962-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-10-14T18:59:13.521331-04:00", + "callContext": { + "id": "1728946754", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-10-14T18:59:14.541348-04:00", + "callContext": { + "id": "1728946754", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:10258" + }, + { + "time": "2024-10-14T18:59:14.541518-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-10-14T18:59:14.541566-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728946755", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T18:59:17.304351-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728946755", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-10-14T18:59:17.304441-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01KtYnAwnQ2cyRieDu98Jopb": { + "toolID": "testdata/Bob/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-10-14T18:59:17.304485-04:00", + "callContext": { + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728946753" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-10-14T18:59:17.394841-04:00", + "callContext": { + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728946753" + }, + "type": "callChat", + "chatCompletionId": "1728946756", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T18:59:18.202926-04:00", + "callContext": { + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728946753" + }, + "type": "callChat", + "chatCompletionId": "1728946756", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" + } + ], + "usage": {} + } + }, + { + "time": "2024-10-14T18:59:18.202988-04:00", + "callContext": { + "id": "toolu_01KtYnAwnQ2cyRieDu98Jopb", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "When asked how I am doing, respond with the following exactly: \"Thanks for asking ${QUESTION}! I'm doing great fellow friendly AI tool!\" with ${QUESTION} replaced with the question text as given.", + "id": "testdata/Bob/test.gpt:bob", + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728946753" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-10-14T18:59:18.203022-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-10-14T18:59:18.295164-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728946757", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T18:59:19.737028-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728946757", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" + } + ], + "usage": {} + } + }, + { + "time": "2024-10-14T18:59:19.737045-04:00", + "callContext": { + "id": "1728946753", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/Bob/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/Bob/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/Bob/test.gpt:", + "bob": "testdata/Bob/test.gpt:bob" + }, + "source": { + "location": "testdata/Bob/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/Bob" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing! I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-10-14T18:59:19.737061-04:00", + "type": "runFinish", + "usage": {} + } +] diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json deleted file mode 100644 index 22fe9514..00000000 --- a/pkg/tests/smoke/testdata/BobAsShell/claude-3-5-sonnet-20240620-expected.json +++ /dev/null @@ -1,617 +0,0 @@ -[ - { - "time": "2024-10-14T17:38:39.518668-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:39.519079-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:40.155982-04:00", - "type": "runStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:40.156405-04:00", - "callContext": { - "id": "1728941921", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callStart", - "usage": {} - }, - { - "time": "2024-10-14T17:38:41.173004-04:00", - "callContext": { - "id": "1728941921", - "tool": { - "name": "Anthropic Claude3 Model Provider", - "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "modelProvider": true, - "internalPrompt": null, - "credentials": [ - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" - ], - "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", - "toolMapping": { - "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ - { - "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" - } - ] - }, - "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" - }, - "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", - "lineNo": 1, - "repo": { - "VCS": "git", - "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", - "Path": "/", - "Name": "tool.gpt", - "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - } - }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" - }, - "currentAgent": {}, - "inputContext": null, - "toolCategory": "provider", - "displayText": "Running sys.daemon" - }, - "type": "callFinish", - "usage": {}, - "content": "http://127.0.0.1:10787" - }, - { - "time": "2024-10-14T17:38:41.173175-04:00", - "type": "runFinish", - "usage": {} - }, - { - "time": "2024-10-14T17:38:41.173247-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941922", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T17:38:43.937061-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941922", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "toolCall": { - "index": 0, - "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", - "function": { - "name": "bob", - "arguments": "{\"question\": \"how are you doing\"}" - } - } - } - ], - "usage": {} - } - }, - { - "time": "2024-10-14T17:38:43.937155-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolSubCalls": { - "toolu_01PQYSGxbwRLw8XuUUkgKvbe": { - "toolID": "testdata/BobAsShell/test.gpt:bob", - "input": "{\"question\": \"how are you doing\"}" - } - }, - "type": "callSubCalls", - "usage": {} - }, - { - "time": "2024-10-14T17:38:43.937193-04:00", - "callContext": { - "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941920", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callStart", - "usage": {}, - "content": "{\"question\": \"how are you doing\"}" - }, - { - "time": "2024-10-14T17:38:43.938264-04:00", - "callContext": { - "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941920", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1728941923", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T17:38:43.943625-04:00", - "callContext": { - "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941920", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callChat", - "chatCompletionId": "1728941923", - "usage": {}, - "chatResponse": { - "usage": {} - } - }, - { - "time": "2024-10-14T17:38:43.943703-04:00", - "callContext": { - "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", - "tool": { - "name": "bob", - "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "arguments": { - "properties": { - "question": { - "description": "The question to ask Bob.", - "type": "string" - } - }, - "type": "object" - }, - "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", - "id": "testdata/BobAsShell/test.gpt:bob", - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 6 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null, - "toolName": "bob", - "parentID": "1728941920", - "displayText": "Running bob from testdata/BobAsShell/test.gpt" - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" - }, - { - "time": "2024-10-14T17:38:43.943766-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "toolResults": 1, - "type": "callContinue", - "usage": {} - }, - { - "time": "2024-10-14T17:38:44.494388-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941924", - "usage": {}, - "chatRequest": { - "model": "", - "messages": null - } - }, - { - "time": "2024-10-14T17:38:45.659797-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callChat", - "chatCompletionId": "1728941924", - "usage": {}, - "chatResponse": { - "role": "assistant", - "content": [ - { - "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - } - ], - "usage": {} - } - }, - { - "time": "2024-10-14T17:38:45.659891-04:00", - "callContext": { - "id": "1728941920", - "tool": { - "modelName": "claude-3-5-sonnet-20240620 from github.com/gptscript-ai/claude3-anthropic-provider", - "internalPrompt": null, - "tools": [ - "bob" - ], - "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", - "id": "testdata/BobAsShell/test.gpt:", - "toolMapping": { - "bob": [ - { - "reference": "bob", - "toolID": "testdata/BobAsShell/test.gpt:bob" - } - ] - }, - "localTools": { - "": "testdata/BobAsShell/test.gpt:", - "bob": "testdata/BobAsShell/test.gpt:bob" - }, - "source": { - "location": "testdata/BobAsShell/test.gpt", - "lineNo": 1 - }, - "workingDir": "testdata/BobAsShell" - }, - "currentAgent": {}, - "inputContext": null - }, - "type": "callFinish", - "usage": {}, - "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" - }, - { - "time": "2024-10-14T17:38:45.659921-04:00", - "type": "runFinish", - "usage": {} - } -] diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-7-sonnet-20250219-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-7-sonnet-20250219-expected.json new file mode 100644 index 00000000..15d59a1f --- /dev/null +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-7-sonnet-20250219-expected.json @@ -0,0 +1,601 @@ +[ + { + "time": "2024-10-14T17:38:39.518668-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-10-14T17:38:39.519079-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-10-14T17:38:40.155982-04:00", + "type": "runStart", + "usage": {} + }, + { + "time": "2024-10-14T17:38:40.156405-04:00", + "callContext": { + "id": "1728941921", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callStart", + "usage": {} + }, + { + "time": "2024-10-14T17:38:41.173004-04:00", + "callContext": { + "id": "1728941921", + "tool": { + "name": "Anthropic Claude3 Model Provider", + "description": "Model provider for Anthropic hosted Claude3 models", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "modelProvider": true, + "internalPrompt": null, + "credentials": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" + ], + "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider", + "toolMapping": { + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ + { + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/de2fada1c51a1dbb5c3e9ef268ea6740d1b52f80/tool.gpt:token" + } + ] + }, + "localTools": { + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt:Anthropic Claude3 Model Provider" + }, + "source": { + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344/tool.gpt", + "lineNo": 1, + "repo": { + "VCS": "git", + "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", + "Path": "/", + "Name": "tool.gpt", + "Revision": "ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + } + }, + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/ee5c02a9aeca5a1cbffcf569751e37432bfe0344" + }, + "currentAgent": {}, + "inputContext": null, + "toolCategory": "provider", + "displayText": "Running sys.daemon" + }, + "type": "callFinish", + "usage": {}, + "content": "http://127.0.0.1:10787" + }, + { + "time": "2024-10-14T17:38:41.173175-04:00", + "type": "runFinish", + "usage": {} + }, + { + "time": "2024-10-14T17:38:41.173247-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728941922", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T17:38:43.937061-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728941922", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "toolCall": { + "index": 0, + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", + "function": { + "name": "bob", + "arguments": "{\"question\": \"how are you doing\"}" + } + } + } + ], + "usage": {} + } + }, + { + "time": "2024-10-14T17:38:43.937155-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolSubCalls": { + "toolu_01PQYSGxbwRLw8XuUUkgKvbe": { + "toolID": "testdata/BobAsShell/test.gpt:bob", + "input": "{\"question\": \"how are you doing\"}" + } + }, + "type": "callSubCalls", + "usage": {} + }, + { + "time": "2024-10-14T17:38:43.937193-04:00", + "callContext": { + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728941920", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callStart", + "usage": {}, + "content": "{\"question\": \"how are you doing\"}" + }, + { + "time": "2024-10-14T17:38:43.938264-04:00", + "callContext": { + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728941920", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1728941923", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T17:38:43.943625-04:00", + "callContext": { + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728941920", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callChat", + "chatCompletionId": "1728941923", + "usage": {}, + "chatResponse": { + "usage": {} + } + }, + { + "time": "2024-10-14T17:38:43.943703-04:00", + "callContext": { + "id": "toolu_01PQYSGxbwRLw8XuUUkgKvbe", + "tool": { + "name": "bob", + "description": "I'm Bob, a friendly guy.", + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "arguments": { + "properties": { + "question": { + "description": "The question to ask Bob.", + "type": "string" + } + }, + "type": "object" + }, + "instructions": "#!/bin/bash\n\necho \"Thanks for asking ${QUESTION}, I'm doing great fellow friendly AI tool!\"", + "id": "testdata/BobAsShell/test.gpt:bob", + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 6 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null, + "toolName": "bob", + "parentID": "1728941920", + "displayText": "Running bob from testdata/BobAsShell/test.gpt" + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" + }, + { + "time": "2024-10-14T17:38:43.943766-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "toolResults": 1, + "type": "callContinue", + "usage": {} + }, + { + "time": "2024-10-14T17:38:44.494388-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728941924", + "usage": {}, + "chatRequest": { + "model": "", + "messages": null + } + }, + { + "time": "2024-10-14T17:38:45.659797-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callChat", + "chatCompletionId": "1728941924", + "usage": {}, + "chatResponse": { + "role": "assistant", + "content": [ + { + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + } + ], + "usage": {} + } + }, + { + "time": "2024-10-14T17:38:45.659891-04:00", + "callContext": { + "id": "1728941920", + "tool": { + "modelName": "claude-3-7-sonnet-20250219 from github.com/gptscript-ai/claude3-anthropic-provider", + "internalPrompt": null, + "tools": ["bob"], + "instructions": "Ask Bob \"how are you doing\" and repeat the response text exactly as given without saying anything else.", + "id": "testdata/BobAsShell/test.gpt:", + "toolMapping": { + "bob": [ + { + "reference": "bob", + "toolID": "testdata/BobAsShell/test.gpt:bob" + } + ] + }, + "localTools": { + "": "testdata/BobAsShell/test.gpt:", + "bob": "testdata/BobAsShell/test.gpt:bob" + }, + "source": { + "location": "testdata/BobAsShell/test.gpt", + "lineNo": 1 + }, + "workingDir": "testdata/BobAsShell" + }, + "currentAgent": {}, + "inputContext": null + }, + "type": "callFinish", + "usage": {}, + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" + }, + { + "time": "2024-10-14T17:38:45.659921-04:00", + "type": "runFinish", + "usage": {} + } +] From 2a07e0ea22ac1310bd9f0084be3fd789edd3c2d1 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 5 Nov 2025 12:02:57 -0500 Subject: [PATCH 270/270] chore: bump go-get to pickup fix for low CVE (#1000) Signed-off-by: Donnie Adams --- go.mod | 18 +++++++++--------- go.sum | 40 ++++++++++++++++++++-------------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index e4369567..a0e0ec0a 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/docker/docker-credential-helpers v0.8.1 github.com/fatih/color v1.18.0 github.com/getkin/kin-openapi v0.132.0 - github.com/go-git/go-git/v5 v5.13.0 + github.com/go-git/go-git/v5 v5.16.3 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 @@ -48,8 +48,8 @@ require ( atomicgo.dev/keyboard v0.2.9 // indirect atomicgo.dev/schedule v0.1.0 // indirect dario.cat/mergo v1.0.0 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v1.1.3 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/STARRY-S/zip v0.2.3 // indirect github.com/alecthomas/chroma/v2 v2.8.0 // indirect github.com/andybalholm/brotli v1.2.0 // indirect @@ -64,22 +64,22 @@ require ( github.com/charmbracelet/x/ansi v0.8.0 // indirect github.com/charmbracelet/x/cellbuf v0.0.13 // indirect github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.6.1 // indirect github.com/containerd/console v1.0.4 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/creack/pty v1.1.24 // indirect - github.com/cyphar/filepath-securejoin v0.2.5 // indirect + github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.11.4 // indirect github.com/dop251/goja v0.0.0-20250531102226-cb187b08699c // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.0 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/gookit/color v1.5.4 // indirect @@ -114,13 +114,13 @@ require ( github.com/olekukonko/tablewriter v0.0.6-0.20230925090304-df64c4bbad77 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pterm/pterm v0.12.79 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect - github.com/skeema/knownhosts v1.3.0 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/sorairolake/lzip-go v0.3.8 // indirect github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect github.com/spf13/afero v1.15.0 // indirect diff --git a/go.sum b/go.sum index 33a5e3c1..6b6dc8e1 100644 --- a/go.sum +++ b/go.sum @@ -43,12 +43,12 @@ github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= -github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/STARRY-S/zip v0.2.3 h1:luE4dMvRPDOWQdeDdUxUoZkzUIpTccdKdhHHsQJ1fm4= github.com/STARRY-S/zip v0.2.3/go.mod h1:lqJ9JdeRipyOQJrYSOtpNAiaesFO6zVDsE8GIGFaoSk= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= @@ -99,8 +99,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= @@ -110,8 +110,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= -github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -126,8 +126,8 @@ github.com/dop251/goja v0.0.0-20250531102226-cb187b08699c/go.mod h1:MxLav0peU43G github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/elazarl/goproxy v1.2.1 h1:njjgvO6cRG9rIqN2ebkqy6cQz2Njkx7Fsfv/zIZqgug= -github.com/elazarl/goproxy v1.2.1/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -142,12 +142,12 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.0 h1:w2hPNtoehvJIxR00Vb4xX94qHQi/ApZfX+nBE2Cjio8= -github.com/go-git/go-billy/v5 v5.6.0/go.mod h1:sFDq7xD3fn3E0GOwUSZqHo9lrkmx8xJhA0ZrfvjBRGM= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.0 h1:vLn5wlGIh/X78El6r3Jr+30W16Blk0CTcxTYcYPWi5E= -github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkvVkiXRR/zw= +github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8= +github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -164,8 +164,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -326,8 +326,8 @@ github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -369,8 +369,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= -github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/sorairolake/lzip-go v0.3.8 h1:j5Q2313INdTA80ureWYRhX+1K78mUXfMoPZCw/ivWik= github.com/sorairolake/lzip-go v0.3.8/go.mod h1:JcBqGMV0frlxwrsE9sMWXDjqn3EeVf0/54YPsw66qkU= github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg=