From bb7775a4b0e07f0b70b672d441015d4d6f9cafea Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Tue, 25 Jun 2024 21:28:22 -0400 Subject: [PATCH 01/24] chore: unfix the UI after v0.8.5 release Signed-off-by: Donnie Adams --- pkg/cli/gptscript.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 353967cd..90af42d7 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -330,7 +330,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { - args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui@v0.8.5")}, args...) + args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui")}, args...) // If args has more than one element, then the user has provided a file. if len(args) > 1 { From 638f64506b324065b55998c616633ad1d5888326 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Jun 2024 09:23:38 -0400 Subject: [PATCH 02/24] feat: build credential helpers just in time Instead of building the credential helpers on startup, this change builds them when they are needed. This speeds up start time for the SDK. Signed-off-by: Donnie Adams --- pkg/cli/credential.go | 16 +++++++++++--- pkg/cli/credential_delete.go | 19 ++++++++++++---- pkg/cli/credential_show.go | 19 ++++++++++++---- pkg/cli/eval.go | 2 +- pkg/cli/gptscript.go | 2 +- pkg/credentials/noop.go | 10 +++++---- pkg/credentials/store.go | 43 ++++++++++++++++++++++-------------- pkg/gptscript/gptscript.go | 13 +++++------ pkg/openai/client.go | 4 ++-- pkg/prompt/credential.go | 4 ++-- pkg/remote/remote.go | 4 ++-- pkg/runner/runner.go | 6 ++--- pkg/sdkserver/run.go | 2 +- pkg/sdkserver/server.go | 2 +- 14 files changed, 94 insertions(+), 52 deletions(-) diff --git a/pkg/cli/credential.go b/pkg/cli/credential.go index 14f832eb..b0c4a30a 100644 --- a/pkg/cli/credential.go +++ b/pkg/cli/credential.go @@ -12,6 +12,8 @@ import ( "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" + "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/spf13/cobra" ) @@ -35,7 +37,7 @@ func (c *Credential) Customize(cmd *cobra.Command) { cmd.AddCommand(cmd2.Command(&Show{root: c.root})) } -func (c *Credential) Run(_ *cobra.Command, _ []string) error { +func (c *Credential) Run(cmd *cobra.Command, _ []string) error { cfg, err := config.ReadCLIConfig(c.root.ConfigFile) if err != nil { return fmt.Errorf("failed to read CLI config: %w", err) @@ -51,14 +53,22 @@ func (c *Credential) Run(_ *cobra.Command, _ []string) error { return err } opts.Cache = cache.Complete(opts.Cache) + opts.Runner = runner.Complete(opts.Runner) + if opts.Runner.RuntimeManager == nil { + opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) + } + + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg, opts.Env); err != nil { + return err + } // Initialize the credential store and get all the credentials. - store, err := credentials.NewStore(cfg, ctx, opts.Cache.CacheDir) + store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, ctx, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) } - creds, err := store.List() + creds, err := store.List(cmd.Context()) if err != nil { return fmt.Errorf("failed to list credentials: %w", err) } diff --git a/pkg/cli/credential_delete.go b/pkg/cli/credential_delete.go index db1f97b8..9c986c54 100644 --- a/pkg/cli/credential_delete.go +++ b/pkg/cli/credential_delete.go @@ -6,6 +6,8 @@ import ( "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" + "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/spf13/cobra" ) @@ -21,24 +23,33 @@ func (c *Delete) Customize(cmd *cobra.Command) { cmd.Args = cobra.ExactArgs(1) } -func (c *Delete) Run(_ *cobra.Command, args []string) error { +func (c *Delete) Run(cmd *cobra.Command, args []string) error { opts, err := c.root.NewGPTScriptOpts() if err != nil { return err } - opts.Cache = cache.Complete(opts.Cache) cfg, err := config.ReadCLIConfig(c.root.ConfigFile) if err != nil { return fmt.Errorf("failed to read CLI config: %w", err) } - store, err := credentials.NewStore(cfg, c.root.CredentialContext, opts.Cache.CacheDir) + opts.Cache = cache.Complete(opts.Cache) + opts.Runner = runner.Complete(opts.Runner) + if opts.Runner.RuntimeManager == nil { + opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) + } + + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg, opts.Env); err != nil { + return err + } + + store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, c.root.CredentialContext, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) } - if err = store.Remove(args[0]); err != nil { + if err = store.Remove(cmd.Context(), args[0]); err != nil { return fmt.Errorf("failed to remove credential: %w", err) } return nil diff --git a/pkg/cli/credential_show.go b/pkg/cli/credential_show.go index 92911dde..ccfe3675 100644 --- a/pkg/cli/credential_show.go +++ b/pkg/cli/credential_show.go @@ -8,6 +8,8 @@ import ( "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/config" "github.com/gptscript-ai/gptscript/pkg/credentials" + "github.com/gptscript-ai/gptscript/pkg/repos/runtimes" + "github.com/gptscript-ai/gptscript/pkg/runner" "github.com/spf13/cobra" ) @@ -23,24 +25,33 @@ func (c *Show) Customize(cmd *cobra.Command) { cmd.Args = cobra.ExactArgs(1) } -func (c *Show) Run(_ *cobra.Command, args []string) error { +func (c *Show) Run(cmd *cobra.Command, args []string) error { opts, err := c.root.NewGPTScriptOpts() if err != nil { return err } - opts.Cache = cache.Complete(opts.Cache) cfg, err := config.ReadCLIConfig(c.root.ConfigFile) if err != nil { return fmt.Errorf("failed to read CLI config: %w", err) } - store, err := credentials.NewStore(cfg, c.root.CredentialContext, opts.Cache.CacheDir) + opts.Cache = cache.Complete(opts.Cache) + opts.Runner = runner.Complete(opts.Runner) + if opts.Runner.RuntimeManager == nil { + opts.Runner.RuntimeManager = runtimes.Default(opts.Cache.CacheDir) + } + + if err = opts.Runner.RuntimeManager.SetUpCredentialHelpers(cmd.Context(), cfg, opts.Env); err != nil { + return err + } + + store, err := credentials.NewStore(cfg, opts.Runner.RuntimeManager, c.root.CredentialContext, opts.Cache.CacheDir) if err != nil { return fmt.Errorf("failed to get credentials store: %w", err) } - cred, exists, err := store.Get(args[0]) + cred, exists, err := store.Get(cmd.Context(), args[0]) if err != nil { return fmt.Errorf("failed to get credential: %w", err) } diff --git a/pkg/cli/eval.go b/pkg/cli/eval.go index 7beaa8a0..2cd4b1b5 100644 --- a/pkg/cli/eval.go +++ b/pkg/cli/eval.go @@ -56,7 +56,7 @@ func (e *Eval) Run(cmd *cobra.Command, args []string) error { return err } - runner, err := gptscript.New(opts) + runner, err := gptscript.New(cmd.Context(), opts) if err != nil { return err } diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 90af42d7..a8561abf 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -380,7 +380,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { ctx := cmd.Context() - gptScript, err := gptscript.New(gptOpt) + gptScript, err := gptscript.New(ctx, gptOpt) if err != nil { return err } diff --git a/pkg/credentials/noop.go b/pkg/credentials/noop.go index 1dfa61e8..5f3cc5ad 100644 --- a/pkg/credentials/noop.go +++ b/pkg/credentials/noop.go @@ -1,19 +1,21 @@ package credentials +import "context" + type NoopStore struct{} -func (s NoopStore) Get(_ string) (*Credential, bool, error) { +func (s NoopStore) Get(context.Context, string) (*Credential, bool, error) { return nil, false, nil } -func (s NoopStore) Add(_ Credential) error { +func (s NoopStore) Add(context.Context, Credential) error { return nil } -func (s NoopStore) Remove(_ string) error { +func (s NoopStore) Remove(context.Context, string) error { return nil } -func (s NoopStore) List() ([]Credential, error) { +func (s NoopStore) List(context.Context) ([]Credential, error) { return nil, nil } diff --git a/pkg/credentials/store.go b/pkg/credentials/store.go index 287c1aa6..3940184b 100644 --- a/pkg/credentials/store.go +++ b/pkg/credentials/store.go @@ -1,6 +1,7 @@ package credentials import ( + "context" "fmt" "path/filepath" "regexp" @@ -10,32 +11,38 @@ import ( "github.com/gptscript-ai/gptscript/pkg/config" ) +type CredentialBuilder interface { + EnsureCredentialHelpers(ctx context.Context) error +} + type CredentialStore interface { - Get(toolName string) (*Credential, bool, error) - Add(cred Credential) error - Remove(toolName string) error - List() ([]Credential, error) + Get(ctx context.Context, toolName string) (*Credential, bool, error) + Add(ctx context.Context, cred Credential) error + Remove(ctx context.Context, toolName string) error + List(ctx context.Context) ([]Credential, error) } type Store struct { credCtx string + credBuilder CredentialBuilder credHelperDirs CredentialHelperDirs cfg *config.CLIConfig } -func NewStore(cfg *config.CLIConfig, credCtx, cacheDir string) (CredentialStore, error) { +func NewStore(cfg *config.CLIConfig, credentialBuilder CredentialBuilder, credCtx, cacheDir string) (CredentialStore, error) { if err := validateCredentialCtx(credCtx); err != nil { return nil, err } return Store{ credCtx: credCtx, + credBuilder: credentialBuilder, credHelperDirs: GetCredentialHelperDirs(cacheDir), cfg: cfg, }, nil } -func (s Store) Get(toolName string) (*Credential, bool, error) { - store, err := s.getStore() +func (s Store) Get(ctx context.Context, toolName string) (*Credential, bool, error) { + store, err := s.getStore(ctx) if err != nil { return nil, false, err } @@ -57,9 +64,9 @@ func (s Store) Get(toolName string) (*Credential, bool, error) { return &cred, true, nil } -func (s Store) Add(cred Credential) error { +func (s Store) Add(ctx context.Context, cred Credential) error { cred.Context = s.credCtx - store, err := s.getStore() + store, err := s.getStore(ctx) if err != nil { return err } @@ -70,16 +77,16 @@ func (s Store) Add(cred Credential) error { return store.Store(auth) } -func (s Store) Remove(toolName string) error { - store, err := s.getStore() +func (s Store) Remove(ctx context.Context, toolName string) error { + store, err := s.getStore(ctx) if err != nil { return err } return store.Erase(toolNameWithCtx(toolName, s.credCtx)) } -func (s Store) List() ([]Credential, error) { - store, err := s.getStore() +func (s Store) List(ctx context.Context) ([]Credential, error) { + store, err := s.getStore(ctx) if err != nil { return nil, err } @@ -106,17 +113,21 @@ func (s Store) List() ([]Credential, error) { return creds, nil } -func (s *Store) getStore() (credentials.Store, error) { - return s.getStoreByHelper(config.GPTScriptHelperPrefix + s.cfg.CredentialsStore) +func (s *Store) getStore(ctx context.Context) (credentials.Store, error) { + return s.getStoreByHelper(ctx, config.GPTScriptHelperPrefix+s.cfg.CredentialsStore) } -func (s *Store) getStoreByHelper(helper string) (credentials.Store, error) { +func (s *Store) getStoreByHelper(ctx context.Context, helper string) (credentials.Store, error) { if helper == "" || helper == config.GPTScriptHelperPrefix+"file" { return credentials.NewFileStore(s.cfg), nil } // If the helper is referencing one of the credential helper programs, then reference the full path. if strings.HasPrefix(helper, "gptscript-credential-") { + if err := s.credBuilder.EnsureCredentialHelpers(ctx); err != nil { + return nil, err + } + helper = filepath.Join(s.credHelperDirs.BinDir, helper) } diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 63cce7ba..f8264a44 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -77,7 +77,7 @@ func complete(opts ...Options) Options { return result } -func New(o ...Options) (*GPTScript, error) { +func New(ctx context.Context, o ...Options) (*GPTScript, error) { opts := complete(o...) registry := llm.NewRegistry() @@ -91,11 +91,6 @@ func New(o ...Options) (*GPTScript, error) { return nil, err } - credStore, err := credentials.NewStore(cliCfg, opts.CredentialContext, cacheClient.CacheDir()) - if err != nil { - return nil, err - } - if opts.Runner.RuntimeManager == nil { opts.Runner.RuntimeManager = runtimes.Default(cacheClient.CacheDir()) } @@ -103,11 +98,13 @@ func New(o ...Options) (*GPTScript, error) { if err := opts.Runner.RuntimeManager.SetUpCredentialHelpers(context.Background(), cliCfg, opts.Env); err != nil { return nil, err } - if err := opts.Runner.RuntimeManager.EnsureCredentialHelpers(context.Background()); err != nil { + + credStore, err := credentials.NewStore(cliCfg, opts.Runner.RuntimeManager, opts.CredentialContext, cacheClient.CacheDir()) + if err != nil { return nil, err } - oaiClient, err := openai.NewClient(credStore, opts.OpenAI, openai.Options{ + oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ Cache: cacheClient, SetSeed: true, }) diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 42ff83bc..6e3c9c60 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -93,7 +93,7 @@ func complete(opts ...Options) (Options, error) { return result, err } -func NewClient(credStore credentials.CredentialStore, opts ...Options) (*Client, error) { +func NewClient(ctx context.Context, credStore credentials.CredentialStore, opts ...Options) (*Client, error) { opt, err := complete(opts...) if err != nil { return nil, err @@ -101,7 +101,7 @@ func NewClient(credStore credentials.CredentialStore, opts ...Options) (*Client, // If the API key is not set, try to get it from the cred store if opt.APIKey == "" && opt.BaseURL == "" { - cred, exists, err := credStore.Get(BuiltinCredName) + cred, exists, err := credStore.Get(ctx, BuiltinCredName) if err != nil { return nil, err } diff --git a/pkg/prompt/credential.go b/pkg/prompt/credential.go index 9202ed49..a47a9168 100644 --- a/pkg/prompt/credential.go +++ b/pkg/prompt/credential.go @@ -9,7 +9,7 @@ import ( ) func GetModelProviderCredential(ctx context.Context, credStore credentials.CredentialStore, credName, env, message string, envs []string) (string, error) { - cred, exists, err := credStore.Get(credName) + cred, exists, err := credStore.Get(ctx, credName) if err != nil { return "", err } @@ -25,7 +25,7 @@ func GetModelProviderCredential(ctx context.Context, credStore credentials.Crede } k = gjson.Get(result, "key").String() - if err := credStore.Add(credentials.Credential{ + if err := credStore.Add(ctx, credentials.Credential{ ToolName: credName, Type: credentials.CredentialTypeModelProvider, Env: map[string]string{ diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 3837879a..7d7cff6e 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -116,7 +116,7 @@ func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Clie } } - return openai.NewClient(c.credStore, openai.Options{ + return openai.NewClient(ctx, c.credStore, openai.Options{ BaseURL: apiURL, Cache: c.cache, APIKey: key, @@ -163,7 +163,7 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err url += "/v1" } - client, err = openai.NewClient(c.credStore, openai.Options{ + client, err = openai.NewClient(ctx, c.credStore, openai.Options{ BaseURL: url, Cache: c.cache, CacheKey: prg.EntryToolID, diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 466eddb7..cecc7afd 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -873,12 +873,12 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env // Only try to look up the cred if the tool is on GitHub or has an alias. // If it is a GitHub tool and has an alias, the alias overrides the tool name, so we use it as the credential name. if isGitHubTool(toolName) && credentialAlias == "" { - c, exists, err = r.credStore.Get(toolName) + c, exists, err = r.credStore.Get(callCtx.Ctx, toolName) if err != nil { return nil, fmt.Errorf("failed to get credentials for tool %s: %w", toolName, err) } } else if credentialAlias != "" { - c, exists, err = r.credStore.Get(credentialAlias) + c, exists, err = r.credStore.Get(callCtx.Ctx, credentialAlias) if err != nil { return nil, fmt.Errorf("failed to get credentials for tool %s: %w", credentialAlias, err) } @@ -942,7 +942,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env if (isGitHubTool(toolName) && callCtx.Program.ToolSet[credToolRefs[0].ToolID].Source.Repo != nil) || credentialAlias != "" { if isEmpty { log.Warnf("Not saving empty credential for tool %s", toolName) - } else if err := r.credStore.Add(*c); err != nil { + } else if err := r.credStore.Add(callCtx.Ctx, *c); err != nil { return nil, fmt.Errorf("failed to add credential for tool %s: %w", toolName, err) } } else { diff --git a/pkg/sdkserver/run.go b/pkg/sdkserver/run.go index 321e76c1..dc155557 100644 --- a/pkg/sdkserver/run.go +++ b/pkg/sdkserver/run.go @@ -17,7 +17,7 @@ import ( type loaderFunc func(context.Context, string, string, ...loader.Options) (types.Program, error) func (s *server) execAndStream(ctx context.Context, programLoader loaderFunc, logger mvl.Logger, w http.ResponseWriter, opts gptscript.Options, chatState, input, subTool string, toolDef fmt.Stringer) { - g, err := gptscript.New(s.gptscriptOpts, opts) + g, err := gptscript.New(ctx, s.gptscriptOpts, opts) if err != nil { writeError(logger, w, http.StatusInternalServerError, fmt.Errorf("failed to initialize gptscript: %w", err)) return diff --git a/pkg/sdkserver/server.go b/pkg/sdkserver/server.go index c51e21e4..4556f69e 100644 --- a/pkg/sdkserver/server.go +++ b/pkg/sdkserver/server.go @@ -53,7 +53,7 @@ func Start(ctx context.Context, opts Options) error { // prompt server because it is only used for fmt, parse, etc. opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", types.PromptTokenEnvVar, token)) - g, err := gptscript.New(opts.Options) + g, err := gptscript.New(ctx, opts.Options) if err != nil { return err } From 13604359cac06db9f4c1885fac773844a737ccf0 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Jun 2024 12:11:45 -0400 Subject: [PATCH 03/24] fix: pass default model configuration to the UI Signed-off-by: Donnie Adams --- pkg/cli/gptscript.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index a8561abf..5a44c5fb 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -361,6 +361,11 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { gptOpt.Env = append(gptOpt.Env, system.BinEnvVar+"="+system.Bin()) } + // If the DefaultModel is set, then pass the correct environment variable. + if r.DefaultModel != "" { + gptOpt.Env = append(gptOpt.Env, "GPTSCRIPT_SDKSERVER_DEFAULT_MODEL="+r.DefaultModel) + } + args = append([]string{args[0]}, "--file="+file) if len(args) > 2 { From 51c49d9b4cfdf1d53830a2beabefd6479b2d3b86 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Wed, 26 Jun 2024 12:03:09 -0700 Subject: [PATCH 04/24] chore: remove dispatch workflow Signed-off-by: Taylor Price --- .github/workflows/release.yaml | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e29f6b9d..f864d26f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -43,36 +43,3 @@ jobs: run: | $url = "${{ github.server_url }}/${{ github.repository }}/releases/download/${{ github.ref_name }}/gptscript-${{ github.ref_name }}-windows-amd64.zip" ./wingetcreate.exe update --submit --token "${{ secrets.WINGET_GH_TOKEN }}" --urls $url --version "${{ github.ref_name }}" gptscript-ai.gptscript - node-release: - needs: release-tag - runs-on: ubuntu-latest - steps: - - name: trigger dispatch - uses: peter-evans/repository-dispatch@v3 - with: - token: ${{ secrets.DISPATCH_PAT }} - repository: gptscript-ai/node-gptscript - event-type: release - client-payload: '{"tag": "${{ github.ref_name }}"}' - python-release: - needs: release-tag - runs-on: ubuntu-latest - steps: - - name: trigger dispatch - uses: peter-evans/repository-dispatch@v3 - with: - token: ${{ secrets.DISPATCH_PAT }} - repository: gptscript-ai/py-gptscript - event-type: release - client-payload: '{"tag": "${{ github.ref_name }}"}' - go-release: - needs: release-tag - runs-on: ubuntu-latest - steps: - - name: trigger dispatch - uses: peter-evans/repository-dispatch@v3 - with: - token: ${{ secrets.DISPATCH_PAT }} - repository: gptscript-ai/go-gptscript - event-type: release - client-payload: '{"tag": "${{ github.ref_name }}"}' From f33bec8952b827af3734c8f5f7e20aac2696d87d Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Wed, 26 Jun 2024 12:11:01 -0700 Subject: [PATCH 05/24] chore: dispatch to tag ui repo, update actions Signed-off-by: Taylor Price --- .github/workflows/main.yaml | 4 ++-- .github/workflows/push-quickstart.yaml | 2 +- .github/workflows/release.yaml | 11 +++++++++-- .github/workflows/validate-docs.yaml | 2 +- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index d5fd7c4c..6b469c39 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -19,11 +19,11 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: cache: false go-version: "1.22" diff --git a/.github/workflows/push-quickstart.yaml b/.github/workflows/push-quickstart.yaml index 50bae387..ae6f11e4 100644 --- a/.github/workflows/push-quickstart.yaml +++ b/.github/workflows/push-quickstart.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 - name: Copy files to S3 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f864d26f..d998b521 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,12 +12,19 @@ jobs: release-tag: runs-on: ubuntu-22.04 steps: + - name: trigger ui repo tag workflow + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.DISPATCH_PAT }} + repository: gptscript-ai/ui + event-type: release + client-payload: '{"tag": "${{ github.ref_name }}"}' - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: cache: false go-version: "1.22" diff --git a/.github/workflows/validate-docs.yaml b/.github/workflows/validate-docs.yaml index b017af94..18368355 100644 --- a/.github/workflows/validate-docs.yaml +++ b/.github/workflows/validate-docs.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: cache: false go-version: "1.22" From f1179b207c079954ce276dc918b4541112eb1383 Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Wed, 26 Jun 2024 15:33:43 -0400 Subject: [PATCH 06/24] chore: pull ui tool with matching release tag Use the matching tagged version of the UI tool for release builds when the `--ui` option is given. Use main for all other builds of gptscript. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/cli/gptscript.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 5a44c5fb..fab9b2a2 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -330,7 +330,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // If the user is trying to launch the chat-builder UI, then set up the tool and options here. if r.UI { - args = append([]string{env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", "github.com/gptscript-ai/ui")}, args...) + args = append([]string{uiTool()}, args...) // If args has more than one element, then the user has provided a file. if len(args) > 1 { @@ -493,3 +493,15 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { return r.PrintOutput(toolInput, s) } + +// uiTool returns the versioned UI tool reference for the current GPTScript version. +// For release versions, a reference with a matching release tag is returned. +// For all other versions, a reference to main is returned. +func uiTool() string { + ref := "github.com/gptscript-ai/ui" + if tag := version.Tag; !strings.Contains(tag, "v0.0.0-dev") { + ref = fmt.Sprintf("%s@%s", ref, tag) + } + + return env.VarOrDefault("GPTSCRIPT_CHAT_UI_TOOL", ref) +} From f1c0282965559234d0030fc31232ae20d132c03a Mon Sep 17 00:00:00 2001 From: Bill Maxwell Date: Tue, 25 Jun 2024 13:51:13 -0700 Subject: [PATCH 07/24] enhance: add support for gptscript.env file in workspace To add environment variables to a sys.exec call, the user can populate a gptscript.env file with a list of environment variables per line in the format: ``` FOO=bar ``` This file can be precreated or modified by gptscripts. Signed-off-by: Bill Maxwell --- pkg/builtin/builtin.go | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 3e05f05c..56630369 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -336,6 +336,11 @@ func SysExec(_ context.Context, env []string, input string, progress chan<- stri } combined = io.MultiWriter(&out, &pw) ) + + if envvars, err := getWorkspaceEnvFileContents(env); err == nil { + env = append(env, envvars...) + } + cmd.Env = env cmd.Dir = params.Directory cmd.Stdout = combined @@ -355,6 +360,43 @@ func (pw *progressWriter) Write(p []byte) (n int, err error) { return len(p), nil } +func getWorkspaceEnvFileContents(envs []string) ([]string, error) { + dir, err := getWorkspaceDir(envs) + if err != nil { + return nil, err + } + + file := filepath.Join(dir, "gptscript.env") + + // Lock the file to prevent concurrent writes from other tool calls. + locker.RLock(file) + defer locker.RUnlock(file) + + // This is optional, so no errors are returned if the file does not exist. + log.Debugf("Reading file %s", file) + data, err := os.ReadFile(file) + if errors.Is(err, fs.ErrNotExist) { + log.Debugf("The file %s does not exist", file) + return []string{}, nil + } else if err != nil { + log.Debugf("Failed to read file %s: %v", file, err.Error()) + return []string{}, nil + } + + lines := strings.Split(string(data), "\n") + var envContents []string + + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.Contains(line, "=") { + envContents = append(envContents, line) + } + } + + return envContents, nil + +} + func getWorkspaceDir(envs []string) (string, error) { for _, env := range envs { dir, ok := strings.CutPrefix(env, "GPTSCRIPT_WORKSPACE_DIR=") From e0064e02f2433fdf5be6bdb496cf65171523e680 Mon Sep 17 00:00:00 2001 From: Taylor Price Date: Wed, 26 Jun 2024 14:03:40 -0700 Subject: [PATCH 08/24] chore: add back dispatch jobs Signed-off-by: Taylor Price --- .github/workflows/release.yaml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d998b521..18871150 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -50,3 +50,36 @@ jobs: run: | $url = "${{ github.server_url }}/${{ github.repository }}/releases/download/${{ github.ref_name }}/gptscript-${{ github.ref_name }}-windows-amd64.zip" ./wingetcreate.exe update --submit --token "${{ secrets.WINGET_GH_TOKEN }}" --urls $url --version "${{ github.ref_name }}" gptscript-ai.gptscript + node-release: + needs: release-tag + runs-on: ubuntu-latest + steps: + - name: trigger dispatch + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.DISPATCH_PAT }} + repository: gptscript-ai/node-gptscript + event-type: release + client-payload: '{"tag": "${{ github.ref_name }}"}' + python-release: + needs: release-tag + runs-on: ubuntu-latest + steps: + - name: trigger dispatch + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.DISPATCH_PAT }} + repository: gptscript-ai/py-gptscript + event-type: release + client-payload: '{"tag": "${{ github.ref_name }}"}' + go-release: + needs: release-tag + runs-on: ubuntu-latest + steps: + - name: trigger dispatch + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.DISPATCH_PAT }} + repository: gptscript-ai/go-gptscript + event-type: release + client-payload: '{"tag": "${{ github.ref_name }}"}' \ No newline at end of file From 22c5d9b597a835aef6aa5c2bdb8bd82cedeca45b Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 24 Jun 2024 16:01:35 -0700 Subject: [PATCH 09/24] chore: add sys.chat.current --- go.mod | 3 +- go.sum | 6 +- pkg/builtin/builtin.go | 32 +++++++++++ pkg/cli/gptscript.go | 2 +- pkg/engine/engine.go | 37 +++++++++--- pkg/openai/client.go | 5 ++ pkg/openai/count.go | 57 +++++++++++++++++++ pkg/runner/output.go | 1 - pkg/runner/runner.go | 2 + pkg/tests/runner_test.go | 18 +++--- pkg/tests/testdata/TestAgents/call1.golden | 3 +- pkg/tests/testdata/TestAgents/call2.golden | 3 +- pkg/tests/testdata/TestAgents/call3.golden | 3 +- pkg/tests/testdata/TestAgents/call4.golden | 3 +- pkg/tests/testdata/TestAgents/step1.golden | 12 ++-- pkg/tests/testdata/TestChat/call1.golden | 3 +- pkg/tests/testdata/TestChat/call2.golden | 3 +- .../testdata/TestChatRunNoError/call1.golden | 3 +- .../testdata/TestContextSubChat/call10.golden | 3 +- .../testdata/TestContextSubChat/call2.golden | 3 +- .../testdata/TestContextSubChat/call3.golden | 3 +- .../testdata/TestContextSubChat/call5.golden | 3 +- .../testdata/TestContextSubChat/call7.golden | 3 +- .../testdata/TestContextSubChat/call8.golden | 3 +- .../testdata/TestContextSubChat/step1.golden | 3 +- .../testdata/TestContextSubChat/step2.golden | 3 +- .../testdata/TestContextSubChat/step3.golden | 6 +- .../testdata/TestContextSubChat/step4.golden | 3 +- .../testdata/TestDualSubChat/call2.golden | 3 +- .../testdata/TestDualSubChat/call3.golden | 3 +- .../testdata/TestDualSubChat/call4.golden | 3 +- .../testdata/TestDualSubChat/call5.golden | 3 +- .../testdata/TestDualSubChat/call6.golden | 3 +- .../testdata/TestDualSubChat/step1.golden | 6 +- .../testdata/TestDualSubChat/step2.golden | 3 +- .../testdata/TestDualSubChat/step3.golden | 3 +- pkg/tests/testdata/TestInput/call1.golden | 3 +- pkg/tests/testdata/TestInput/call2.golden | 3 +- pkg/tests/testdata/TestInput/step1.golden | 3 +- pkg/tests/testdata/TestInput/step2.golden | 3 +- pkg/tests/testdata/TestOutput/call1.golden | 3 +- pkg/tests/testdata/TestOutput/call2.golden | 3 +- pkg/tests/testdata/TestOutput/call3.golden | 3 +- pkg/tests/testdata/TestOutput/step1.golden | 7 ++- pkg/tests/testdata/TestOutput/step2.golden | 7 ++- pkg/tests/testdata/TestOutput/step3.golden | 2 +- pkg/tests/testdata/TestOutput/test.gpt | 1 - pkg/tests/testdata/TestSubChat/call2.golden | 3 +- pkg/tests/testdata/TestSubChat/call3.golden | 3 +- .../testdata/TestSysContext/call1.golden | 5 +- .../testdata/TestSysContext/context.json | 2 +- .../testdata/TestSysContext/step1.golden | 5 +- pkg/types/completion.go | 1 + pkg/types/toolstring.go | 2 +- 54 files changed, 240 insertions(+), 75 deletions(-) create mode 100644 pkg/openai/count.go diff --git a/go.mod b/go.mod index c5eba627..1609ae4c 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/tui v0.0.0-20240625175717-1e6eca7a66c1 + github.com/gptscript-ai/tui v0.0.0-20240627001757-8b452fa47eb5 github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -94,6 +94,7 @@ require ( github.com/pterm/pterm v0.12.79 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect diff --git a/go.sum b/go.sum index c61794f1..c13d872a 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6A github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc h1:ABV7VAK65YBkqL7VlNp5ryVXnRqkKQ+U/NZfUO3ypqA= github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240625175717-1e6eca7a66c1 h1:sx/dJ0IRh3P9Ehr1g1TQ/jEw83KISmQyjrssVgPGUbE= -github.com/gptscript-ai/tui v0.0.0-20240625175717-1e6eca7a66c1/go.mod h1:R33cfOnNaqsEn9es5jLKR39wvDyHvsIVgeTMNqtzCb8= +github.com/gptscript-ai/tui v0.0.0-20240627001757-8b452fa47eb5 h1:knDhTTJNqaZB1XMudXJuVVnTqj9USrXzNfsl1nTqKXA= +github.com/gptscript-ai/tui v0.0.0-20240627001757-8b452fa47eb5/go.mod h1:NwFdBDmGQvjLFFDnSRBRakkhw0MIO1sSdRnWNk4cCQ0= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -308,6 +308,8 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e h1:H+jDTUeF+SVd4ApwnSFoew8ZwGNRfgb9EsZc7LcocAg= +github.com/sourcegraph/go-diff-patch v0.0.0-20240223163233-798fd1e94a8e/go.mod h1:VsUklG6OQo7Ctunu0gS3AtEOCEc2kMB6r5rKzxAes58= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= diff --git a/pkg/builtin/builtin.go b/pkg/builtin/builtin.go index 56630369..f6811549 100644 --- a/pkg/builtin/builtin.go +++ b/pkg/builtin/builtin.go @@ -29,6 +29,7 @@ var SafeTools = map[string]struct{}{ "sys.abort": {}, "sys.chat.finish": {}, "sys.chat.history": {}, + "sys.chat.current": {}, "sys.echo": {}, "sys.prompt": {}, "sys.time.now": {}, @@ -229,6 +230,15 @@ var tools = map[string]types.Tool{ BuiltinFunc: SysChatHistory, }, }, + "sys.chat.current": { + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Description: "Retrieves the current chat dialog", + Arguments: types.ObjectSchema(), + }, + BuiltinFunc: SysChatCurrent, + }, + }, "sys.context": { ToolDef: types.ToolDef{ Parameters: types.Parameters{ @@ -715,6 +725,28 @@ func writeHistory(ctx *engine.Context) (result []engine.ChatHistoryCall) { return } +func SysChatCurrent(ctx context.Context, _ []string, _ string, _ chan<- string) (string, error) { + engineContext, _ := engine.FromContext(ctx) + + var call any + if engineContext != nil && engineContext.CurrentReturn != nil && engineContext.CurrentReturn.State != nil { + call = engine.ChatHistoryCall{ + ID: engineContext.ID, + Tool: engineContext.Tool, + Completion: engineContext.CurrentReturn.State.Completion, + } + } else { + call = map[string]any{} + } + + data, err := json.Marshal(call) + if err != nil { + return invalidArgument("", err), nil + } + + return string(data), nil +} + func SysChatFinish(_ context.Context, _ []string, input string, _ chan<- string) (string, error) { var params struct { Message string `json:"return,omitempty"` diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 5a44c5fb..9faa7109 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -467,7 +467,7 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { DefaultModel: r.DefaultModel, TrustedRepoPrefixes: []string{"github.com/gptscript-ai"}, DisableCache: r.DisableCache, - Input: strings.Join(args[1:], " "), + Input: toolInput, CacheDir: r.CacheDir, SubTool: r.SubTool, Workspace: r.Workspace, diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index 5cbc87a8..b0a6e4eb 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -59,6 +59,7 @@ type CallResult struct { type commonContext struct { ID string `json:"id"` Tool types.Tool `json:"tool"` + CurrentAgent types.ToolReference `json:"currentAgent,omitempty"` AgentGroup []types.ToolReference `json:"agentGroup,omitempty"` InputContext []InputContext `json:"inputContext"` ToolCategory ToolCategory `json:"toolCategory,omitempty"` @@ -73,10 +74,11 @@ type CallContext struct { type Context struct { commonContext - Ctx context.Context - Parent *Context - LastReturn *Return - Program *types.Program + Ctx context.Context + Parent *Context + LastReturn *Return + CurrentReturn *Return + Program *types.Program // Input is saved only so that we can render display text, don't use otherwise Input string } @@ -129,6 +131,18 @@ func (c *Context) ParentID() string { return c.Parent.ID } +func (c *Context) CurrentAgent() types.ToolReference { + for _, ref := range c.AgentGroup { + if ref.ToolID == c.Tool.ID { + return ref + } + } + if c.Parent != nil { + return c.Parent.CurrentAgent() + } + return types.ToolReference{} +} + func (c *Context) GetCallContext() *CallContext { var toolName string if c.Parent != nil { @@ -143,12 +157,15 @@ func (c *Context) GetCallContext() *CallContext { } } - return &CallContext{ + result := &CallContext{ commonContext: c.commonContext, ParentID: c.ParentID(), ToolName: toolName, DisplayText: types.ToDisplayText(c.Tool, c.Input), } + + result.CurrentAgent = c.CurrentAgent() + return result } func (c *Context) UnmarshalJSON([]byte) error { @@ -215,10 +232,11 @@ func (c *Context) SubCallContext(ctx context.Context, input, toolID, callID stri AgentGroup: agentGroup, ToolCategory: toolCategory, }, - Ctx: ctx, - Parent: c, - Program: c.Program, - Input: input, + Ctx: ctx, + Parent: c, + Program: c.Program, + CurrentReturn: c.CurrentReturn, + Input: input, }, nil } @@ -270,6 +288,7 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) { MaxTokens: tool.Parameters.MaxTokens, JSONResponse: tool.Parameters.JSONResponse, Cache: tool.Parameters.Cache, + Chat: tool.Parameters.Chat, Temperature: tool.Parameters.Temperature, InternalSystemPrompt: tool.Parameters.InternalPrompt, } diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 6e3c9c60..897fb880 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -300,11 +300,16 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques if messageRequest.Model == "" { messageRequest.Model = c.defaultModel } + msgs, err := toMessages(messageRequest, !c.setSeed) if err != nil { return nil, err } + if messageRequest.Chat { + msgs = dropMessagesOverCount(messageRequest.MaxTokens, msgs) + } + if len(msgs) == 0 { log.Errorf("invalid request, no messages to send to LLM") return &types.CompletionMessage{ diff --git a/pkg/openai/count.go b/pkg/openai/count.go new file mode 100644 index 00000000..47c5c9bd --- /dev/null +++ b/pkg/openai/count.go @@ -0,0 +1,57 @@ +package openai + +import openai "github.com/gptscript-ai/chat-completion-client" + +func dropMessagesOverCount(maxTokens int, msgs []openai.ChatCompletionMessage) (result []openai.ChatCompletionMessage) { + var ( + lastSystem int + withinBudget int + budget = maxTokens + ) + + if maxTokens == 0 { + budget = 300_000 + } else { + budget *= 3 + } + + for i, msg := range msgs { + if msg.Role == openai.ChatMessageRoleSystem { + budget -= countMessage(msg) + lastSystem = i + result = append(result, msg) + } else { + break + } + } + + for i := len(msgs) - 1; i > lastSystem; i-- { + withinBudget = i + budget -= countMessage(msgs[i]) + if budget <= 0 { + break + } + } + + if withinBudget == len(msgs)-1 { + // We are going to drop all non system messages, which seems useless, so just return them + // all and let it fail + return msgs + } + + return append(result, msgs[withinBudget:]...) +} + +func countMessage(msg openai.ChatCompletionMessage) (count int) { + count += len(msg.Role) + count += len(msg.Content) + for _, content := range msg.MultiContent { + count += len(content.Text) + } + for _, tool := range msg.ToolCalls { + count += len(tool.Function.Name) + count += len(tool.Function.Arguments) + } + count += len(msg.ToolCallID) + return count / 3 +} diff --git a/pkg/runner/output.go b/pkg/runner/output.go index 858d106c..d4cb4b9b 100644 --- a/pkg/runner/output.go +++ b/pkg/runner/output.go @@ -41,7 +41,6 @@ func (r *Runner) handleOutput(callCtx engine.Context, monitor Monitor, env []str for _, outputToolRef := range outputToolRefs { inputData, err := json.Marshal(map[string]any{ "output": output, - "chatFinish": chatFinish, "continuation": continuation, "chat": callCtx.Tool.Chat, }) diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index cecc7afd..41e4f058 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -559,6 +559,8 @@ func (r *Runner) resume(callCtx engine.Context, monitor Monitor, env []string, s } for { + callCtx.CurrentReturn = state.Continuation + if state.Continuation.Result != nil && len(state.Continuation.Calls) == 0 && state.SubCallID == "" && state.ResumeInput == nil { progressClose() monitor.Event(Event{ diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index db185d75..deced81c 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -413,7 +413,8 @@ func TestSubChat(t *testing.T) { ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant 1" @@ -555,7 +556,8 @@ func TestSubChat(t *testing.T) { ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant 2" @@ -622,7 +624,8 @@ func TestChat(t *testing.T) { ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant 1" @@ -691,7 +694,8 @@ func TestChat(t *testing.T) { ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant 2" @@ -866,7 +870,7 @@ func TestOutput(t *testing.T) { require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) - autogold.Expect(`CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix + autogold.Expect(`CHAT: true CONTENT: Response 1 CONTINUATION: true suffix `).Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step1")) @@ -877,7 +881,7 @@ func TestOutput(t *testing.T) { require.NoError(t, err) r.AssertResponded(t) assert.False(t, resp.Done) - autogold.Expect(`CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix + autogold.Expect(`CHAT: true CONTENT: Response 2 CONTINUATION: true suffix `).Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step2")) @@ -890,7 +894,7 @@ func TestOutput(t *testing.T) { require.NoError(t, err) r.AssertResponded(t) assert.True(t, resp.Done) - autogold.Expect(`CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false FINISH: true suffix + autogold.Expect(`CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false suffix `).Equal(t, resp.Content) autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step3")) } diff --git a/pkg/tests/testdata/TestAgents/call1.golden b/pkg/tests/testdata/TestAgents/call1.golden index c0465ac7..a1a0dcaa 100644 --- a/pkg/tests/testdata/TestAgents/call1.golden +++ b/pkg/tests/testdata/TestAgents/call1.golden @@ -52,5 +52,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestAgents/call2.golden b/pkg/tests/testdata/TestAgents/call2.golden index a4b53537..6b59830a 100644 --- a/pkg/tests/testdata/TestAgents/call2.golden +++ b/pkg/tests/testdata/TestAgents/call2.golden @@ -28,5 +28,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestAgents/call3.golden b/pkg/tests/testdata/TestAgents/call3.golden index 4a001215..29d5462e 100644 --- a/pkg/tests/testdata/TestAgents/call3.golden +++ b/pkg/tests/testdata/TestAgents/call3.golden @@ -43,5 +43,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestAgents/call4.golden b/pkg/tests/testdata/TestAgents/call4.golden index 038fa68c..d43474ca 100644 --- a/pkg/tests/testdata/TestAgents/call4.golden +++ b/pkg/tests/testdata/TestAgents/call4.golden @@ -11,5 +11,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestAgents/step1.golden b/pkg/tests/testdata/TestAgents/step1.golden index 423069b5..a259bf0f 100644 --- a/pkg/tests/testdata/TestAgents/step1.golden +++ b/pkg/tests/testdata/TestAgents/step1.golden @@ -75,7 +75,8 @@ ], "usage": {} } - ] + ], + "chat": true }, "pending": { "call_1": { @@ -145,7 +146,8 @@ ], "usage": {} } - ] + ], + "chat": true }, "pending": { "call_2": { @@ -230,7 +232,8 @@ ], "usage": {} } - ] + ], + "chat": true }, "pending": { "call_3": { @@ -277,7 +280,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "TEST RESULT CALL: 4" diff --git a/pkg/tests/testdata/TestChat/call1.golden b/pkg/tests/testdata/TestChat/call1.golden index 0fef0adb..34bb12cf 100644 --- a/pkg/tests/testdata/TestChat/call1.golden +++ b/pkg/tests/testdata/TestChat/call1.golden @@ -20,5 +20,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestChat/call2.golden b/pkg/tests/testdata/TestChat/call2.golden index ff513bb2..e67862cb 100644 --- a/pkg/tests/testdata/TestChat/call2.golden +++ b/pkg/tests/testdata/TestChat/call2.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestChatRunNoError/call1.golden b/pkg/tests/testdata/TestChatRunNoError/call1.golden index 5fdf4ff8..ac6d5325 100644 --- a/pkg/tests/testdata/TestChatRunNoError/call1.golden +++ b/pkg/tests/testdata/TestChatRunNoError/call1.golden @@ -11,5 +11,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/call10.golden b/pkg/tests/testdata/TestContextSubChat/call10.golden index b4433d52..c8c98651 100644 --- a/pkg/tests/testdata/TestContextSubChat/call10.golden +++ b/pkg/tests/testdata/TestContextSubChat/call10.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/call2.golden b/pkg/tests/testdata/TestContextSubChat/call2.golden index c3843cb9..a6cf25c6 100644 --- a/pkg/tests/testdata/TestContextSubChat/call2.golden +++ b/pkg/tests/testdata/TestContextSubChat/call2.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/call3.golden b/pkg/tests/testdata/TestContextSubChat/call3.golden index da02aace..55ad402f 100644 --- a/pkg/tests/testdata/TestContextSubChat/call3.golden +++ b/pkg/tests/testdata/TestContextSubChat/call3.golden @@ -56,5 +56,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/call5.golden b/pkg/tests/testdata/TestContextSubChat/call5.golden index 25249ec2..2b8cf41e 100644 --- a/pkg/tests/testdata/TestContextSubChat/call5.golden +++ b/pkg/tests/testdata/TestContextSubChat/call5.golden @@ -20,5 +20,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/call7.golden b/pkg/tests/testdata/TestContextSubChat/call7.golden index 7c1c9a19..b0ef4e39 100644 --- a/pkg/tests/testdata/TestContextSubChat/call7.golden +++ b/pkg/tests/testdata/TestContextSubChat/call7.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/call8.golden b/pkg/tests/testdata/TestContextSubChat/call8.golden index e1350cdc..3d0db61b 100644 --- a/pkg/tests/testdata/TestContextSubChat/call8.golden +++ b/pkg/tests/testdata/TestContextSubChat/call8.golden @@ -56,5 +56,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestContextSubChat/step1.golden b/pkg/tests/testdata/TestContextSubChat/step1.golden index 464efb36..5f9a3c6f 100644 --- a/pkg/tests/testdata/TestContextSubChat/step1.golden +++ b/pkg/tests/testdata/TestContextSubChat/step1.golden @@ -128,7 +128,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant Response 1 - from chatbot1" diff --git a/pkg/tests/testdata/TestContextSubChat/step2.golden b/pkg/tests/testdata/TestContextSubChat/step2.golden index d9631971..dfcb2b96 100644 --- a/pkg/tests/testdata/TestContextSubChat/step2.golden +++ b/pkg/tests/testdata/TestContextSubChat/step2.golden @@ -37,7 +37,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant Response 3 - from main chat tool" diff --git a/pkg/tests/testdata/TestContextSubChat/step3.golden b/pkg/tests/testdata/TestContextSubChat/step3.golden index 3c365c54..e50b04a0 100644 --- a/pkg/tests/testdata/TestContextSubChat/step3.golden +++ b/pkg/tests/testdata/TestContextSubChat/step3.golden @@ -37,7 +37,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant Response 3 - from main chat tool" @@ -169,7 +170,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant Response 4 - from chatbot1" diff --git a/pkg/tests/testdata/TestContextSubChat/step4.golden b/pkg/tests/testdata/TestContextSubChat/step4.golden index 40798cf6..5e95d626 100644 --- a/pkg/tests/testdata/TestContextSubChat/step4.golden +++ b/pkg/tests/testdata/TestContextSubChat/step4.golden @@ -55,7 +55,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant Response 6 - from main chat tool resume" diff --git a/pkg/tests/testdata/TestDualSubChat/call2.golden b/pkg/tests/testdata/TestDualSubChat/call2.golden index c3843cb9..a6cf25c6 100644 --- a/pkg/tests/testdata/TestDualSubChat/call2.golden +++ b/pkg/tests/testdata/TestDualSubChat/call2.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestDualSubChat/call3.golden b/pkg/tests/testdata/TestDualSubChat/call3.golden index 755dda31..ddcc81c9 100644 --- a/pkg/tests/testdata/TestDualSubChat/call3.golden +++ b/pkg/tests/testdata/TestDualSubChat/call3.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestDualSubChat/call4.golden b/pkg/tests/testdata/TestDualSubChat/call4.golden index 45b6648b..600e3ba5 100644 --- a/pkg/tests/testdata/TestDualSubChat/call4.golden +++ b/pkg/tests/testdata/TestDualSubChat/call4.golden @@ -56,5 +56,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestDualSubChat/call5.golden b/pkg/tests/testdata/TestDualSubChat/call5.golden index 2aa21e94..54823934 100644 --- a/pkg/tests/testdata/TestDualSubChat/call5.golden +++ b/pkg/tests/testdata/TestDualSubChat/call5.golden @@ -56,5 +56,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestDualSubChat/call6.golden b/pkg/tests/testdata/TestDualSubChat/call6.golden index 6194daed..a9a8a1c4 100644 --- a/pkg/tests/testdata/TestDualSubChat/call6.golden +++ b/pkg/tests/testdata/TestDualSubChat/call6.golden @@ -74,5 +74,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestDualSubChat/step1.golden b/pkg/tests/testdata/TestDualSubChat/step1.golden index 46932939..6a04614b 100644 --- a/pkg/tests/testdata/TestDualSubChat/step1.golden +++ b/pkg/tests/testdata/TestDualSubChat/step1.golden @@ -174,7 +174,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant Response 1 - from chatbot1" @@ -238,7 +239,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistent Response 2 - from chatbot2" diff --git a/pkg/tests/testdata/TestDualSubChat/step2.golden b/pkg/tests/testdata/TestDualSubChat/step2.golden index 2e1511c7..da3e8403 100644 --- a/pkg/tests/testdata/TestDualSubChat/step2.golden +++ b/pkg/tests/testdata/TestDualSubChat/step2.golden @@ -181,7 +181,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistent Response 2 - from chatbot2" diff --git a/pkg/tests/testdata/TestDualSubChat/step3.golden b/pkg/tests/testdata/TestDualSubChat/step3.golden index d21249c3..028f9cd9 100644 --- a/pkg/tests/testdata/TestDualSubChat/step3.golden +++ b/pkg/tests/testdata/TestDualSubChat/step3.golden @@ -199,7 +199,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "Assistant 3" diff --git a/pkg/tests/testdata/TestInput/call1.golden b/pkg/tests/testdata/TestInput/call1.golden index 8f9b629c..a3b78c42 100644 --- a/pkg/tests/testdata/TestInput/call1.golden +++ b/pkg/tests/testdata/TestInput/call1.golden @@ -20,5 +20,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestInput/call2.golden b/pkg/tests/testdata/TestInput/call2.golden index 8da96da8..63c469f4 100644 --- a/pkg/tests/testdata/TestInput/call2.golden +++ b/pkg/tests/testdata/TestInput/call2.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestInput/step1.golden b/pkg/tests/testdata/TestInput/step1.golden index a617d41a..72e36ecb 100644 --- a/pkg/tests/testdata/TestInput/step1.golden +++ b/pkg/tests/testdata/TestInput/step1.golden @@ -37,7 +37,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "TEST RESULT CALL: 1" diff --git a/pkg/tests/testdata/TestInput/step2.golden b/pkg/tests/testdata/TestInput/step2.golden index e085edb2..73b89d76 100644 --- a/pkg/tests/testdata/TestInput/step2.golden +++ b/pkg/tests/testdata/TestInput/step2.golden @@ -55,7 +55,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "TEST RESULT CALL: 2" diff --git a/pkg/tests/testdata/TestOutput/call1.golden b/pkg/tests/testdata/TestOutput/call1.golden index 9430afee..6b1e1362 100644 --- a/pkg/tests/testdata/TestOutput/call1.golden +++ b/pkg/tests/testdata/TestOutput/call1.golden @@ -20,5 +20,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestOutput/call2.golden b/pkg/tests/testdata/TestOutput/call2.golden index 32bb7039..e7e17359 100644 --- a/pkg/tests/testdata/TestOutput/call2.golden +++ b/pkg/tests/testdata/TestOutput/call2.golden @@ -38,5 +38,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestOutput/call3.golden b/pkg/tests/testdata/TestOutput/call3.golden index 01aed5eb..b81d2d26 100644 --- a/pkg/tests/testdata/TestOutput/call3.golden +++ b/pkg/tests/testdata/TestOutput/call3.golden @@ -56,5 +56,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestOutput/step1.golden b/pkg/tests/testdata/TestOutput/step1.golden index 46f1b8e8..3d77ecec 100644 --- a/pkg/tests/testdata/TestOutput/step1.golden +++ b/pkg/tests/testdata/TestOutput/step1.golden @@ -1,6 +1,6 @@ `{ "done": false, - "content": "CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix\n", + "content": "CHAT: true CONTENT: Response 1 CONTINUATION: true suffix\n", "toolID": "testdata/TestOutput/test.gpt:", "state": { "continuation": { @@ -37,10 +37,11 @@ ], "usage": {} } - ] + ], + "chat": true } }, - "result": "CHAT: true CONTENT: Response 1 CONTINUATION: true FINISH: false suffix\n" + "result": "CHAT: true CONTENT: Response 1 CONTINUATION: true suffix\n" }, "continuationToolID": "testdata/TestOutput/test.gpt:" } diff --git a/pkg/tests/testdata/TestOutput/step2.golden b/pkg/tests/testdata/TestOutput/step2.golden index d5fd89a0..80baf7e4 100644 --- a/pkg/tests/testdata/TestOutput/step2.golden +++ b/pkg/tests/testdata/TestOutput/step2.golden @@ -1,6 +1,6 @@ `{ "done": false, - "content": "CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix\n", + "content": "CHAT: true CONTENT: Response 2 CONTINUATION: true suffix\n", "toolID": "testdata/TestOutput/test.gpt:", "state": { "continuation": { @@ -55,10 +55,11 @@ ], "usage": {} } - ] + ], + "chat": true } }, - "result": "CHAT: true CONTENT: Response 2 CONTINUATION: true FINISH: false suffix\n" + "result": "CHAT: true CONTENT: Response 2 CONTINUATION: true suffix\n" }, "continuationToolID": "testdata/TestOutput/test.gpt:" } diff --git a/pkg/tests/testdata/TestOutput/step3.golden b/pkg/tests/testdata/TestOutput/step3.golden index c4e63adc..b357ebb8 100644 --- a/pkg/tests/testdata/TestOutput/step3.golden +++ b/pkg/tests/testdata/TestOutput/step3.golden @@ -1,6 +1,6 @@ `{ "done": true, - "content": "CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false FINISH: true suffix\n", + "content": "CHAT FINISH: CHAT: true CONTENT: Chat Done CONTINUATION: false suffix\n", "toolID": "", "state": null }` diff --git a/pkg/tests/testdata/TestOutput/test.gpt b/pkg/tests/testdata/TestOutput/test.gpt index cc35faa0..9bccc2db 100644 --- a/pkg/tests/testdata/TestOutput/test.gpt +++ b/pkg/tests/testdata/TestOutput/test.gpt @@ -20,7 +20,6 @@ args: chatFinish: chat finish message echo CHAT: ${CHAT} echo CONTENT: ${OUTPUT} echo CONTINUATION: ${CONTINUATION} -echo FINISH: ${CHATFINISH} --- name: suffix diff --git a/pkg/tests/testdata/TestSubChat/call2.golden b/pkg/tests/testdata/TestSubChat/call2.golden index 5fdf4ff8..ac6d5325 100644 --- a/pkg/tests/testdata/TestSubChat/call2.golden +++ b/pkg/tests/testdata/TestSubChat/call2.golden @@ -11,5 +11,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestSubChat/call3.golden b/pkg/tests/testdata/TestSubChat/call3.golden index b1b4b3a9..0bf5dbab 100644 --- a/pkg/tests/testdata/TestSubChat/call3.golden +++ b/pkg/tests/testdata/TestSubChat/call3.golden @@ -29,5 +29,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestSysContext/call1.golden b/pkg/tests/testdata/TestSysContext/call1.golden index c315d381..c92b704f 100644 --- a/pkg/tests/testdata/TestSysContext/call1.golden +++ b/pkg/tests/testdata/TestSysContext/call1.golden @@ -23,7 +23,7 @@ "role": "system", "content": [ { - "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"currentAgent\":{},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" } ], "usage": {} @@ -37,5 +37,6 @@ ], "usage": {} } - ] + ], + "chat": true }` diff --git a/pkg/tests/testdata/TestSysContext/context.json b/pkg/tests/testdata/TestSysContext/context.json index c5608ec4..b6d62218 100644 --- a/pkg/tests/testdata/TestSysContext/context.json +++ b/pkg/tests/testdata/TestSysContext/context.json @@ -1 +1 @@ -{"call":{"id":"","tool":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"agentGroup":[{"named":"iAmSuperman","reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"inputContext":null,"toolCategory":"context","toolName":"sys.context"},"program":{"name":"testdata/TestSysContext/test.gpt","entryToolId":"testdata/TestSysContext/test.gpt:","toolSet":{"sys.context":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"testdata/TestSysContext/file.gpt:I am Superman Agent":{"name":"I am Superman Agent","modelName":"gpt-4o","internalPrompt":null,"instructions":"I'm super","id":"testdata/TestSysContext/file.gpt:I am Superman Agent","localTools":{"i am superman agent":"testdata/TestSysContext/file.gpt:I am Superman Agent"},"source":{"location":"testdata/TestSysContext/file.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:":{"modelName":"gpt-4o","chat":true,"internalPrompt":null,"context":["agents"],"agents":["./file.gpt"],"instructions":"Tool body","id":"testdata/TestSysContext/test.gpt:","toolMapping":{"./file.gpt":[{"reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"agents":[{"reference":"agents","toolID":"testdata/TestSysContext/test.gpt:agents"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:agents":{"name":"agents","modelName":"gpt-4o","internalPrompt":null,"context":["sys.context"],"instructions":"#!/bin/bash\n\necho \"${GPTSCRIPT_CONTEXT}\"\necho \"${GPTSCRIPT_CONTEXT}\" \u003e ${GPTSCRIPT_TOOL_DIR}/context.json","id":"testdata/TestSysContext/test.gpt:agents","toolMapping":{"sys.context":[{"reference":"sys.context","toolID":"sys.context"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":8},"workingDir":"testdata/TestSysContext"}}}} +{"call":{"id":"","tool":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"currentAgent":{},"agentGroup":[{"named":"iAmSuperman","reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"inputContext":null,"toolCategory":"context","toolName":"sys.context"},"program":{"name":"testdata/TestSysContext/test.gpt","entryToolId":"testdata/TestSysContext/test.gpt:","toolSet":{"sys.context":{"name":"sys.context","description":"Retrieves the current internal GPTScript tool call context information","modelName":"gpt-4o","internalPrompt":null,"arguments":{"type":"object"},"instructions":"#!sys.context","id":"sys.context","source":{}},"testdata/TestSysContext/file.gpt:I am Superman Agent":{"name":"I am Superman Agent","modelName":"gpt-4o","internalPrompt":null,"instructions":"I'm super","id":"testdata/TestSysContext/file.gpt:I am Superman Agent","localTools":{"i am superman agent":"testdata/TestSysContext/file.gpt:I am Superman Agent"},"source":{"location":"testdata/TestSysContext/file.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:":{"modelName":"gpt-4o","chat":true,"internalPrompt":null,"context":["agents"],"agents":["./file.gpt"],"instructions":"Tool body","id":"testdata/TestSysContext/test.gpt:","toolMapping":{"./file.gpt":[{"reference":"./file.gpt","toolID":"testdata/TestSysContext/file.gpt:I am Superman Agent"}],"agents":[{"reference":"agents","toolID":"testdata/TestSysContext/test.gpt:agents"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":1},"workingDir":"testdata/TestSysContext"},"testdata/TestSysContext/test.gpt:agents":{"name":"agents","modelName":"gpt-4o","internalPrompt":null,"context":["sys.context"],"instructions":"#!/bin/bash\n\necho \"${GPTSCRIPT_CONTEXT}\"\necho \"${GPTSCRIPT_CONTEXT}\" \u003e ${GPTSCRIPT_TOOL_DIR}/context.json","id":"testdata/TestSysContext/test.gpt:agents","toolMapping":{"sys.context":[{"reference":"sys.context","toolID":"sys.context"}]},"localTools":{"":"testdata/TestSysContext/test.gpt:","agents":"testdata/TestSysContext/test.gpt:agents"},"source":{"location":"testdata/TestSysContext/test.gpt","lineNo":8},"workingDir":"testdata/TestSysContext"}}}} diff --git a/pkg/tests/testdata/TestSysContext/step1.golden b/pkg/tests/testdata/TestSysContext/step1.golden index 26b75508..fa773fda 100644 --- a/pkg/tests/testdata/TestSysContext/step1.golden +++ b/pkg/tests/testdata/TestSysContext/step1.golden @@ -31,7 +31,7 @@ "role": "system", "content": [ { - "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" + "text": "{\"call\":{\"id\":\"\",\"tool\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"currentAgent\":{},\"agentGroup\":[{\"named\":\"iAmSuperman\",\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"inputContext\":null,\"toolCategory\":\"context\",\"toolName\":\"sys.context\"},\"program\":{\"name\":\"testdata/TestSysContext/test.gpt\",\"entryToolId\":\"testdata/TestSysContext/test.gpt:\",\"toolSet\":{\"sys.context\":{\"name\":\"sys.context\",\"description\":\"Retrieves the current internal GPTScript tool call context information\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"arguments\":{\"type\":\"object\"},\"instructions\":\"#!sys.context\",\"id\":\"sys.context\",\"source\":{}},\"testdata/TestSysContext/file.gpt:I am Superman Agent\":{\"name\":\"I am Superman Agent\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"instructions\":\"I'm super\",\"id\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\",\"localTools\":{\"i am superman agent\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"},\"source\":{\"location\":\"testdata/TestSysContext/file.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:\":{\"modelName\":\"gpt-4o\",\"chat\":true,\"internalPrompt\":null,\"context\":[\"agents\"],\"agents\":[\"./file.gpt\"],\"instructions\":\"Tool body\",\"id\":\"testdata/TestSysContext/test.gpt:\",\"toolMapping\":{\"./file.gpt\":[{\"reference\":\"./file.gpt\",\"toolID\":\"testdata/TestSysContext/file.gpt:I am Superman Agent\"}],\"agents\":[{\"reference\":\"agents\",\"toolID\":\"testdata/TestSysContext/test.gpt:agents\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":1},\"workingDir\":\"testdata/TestSysContext\"},\"testdata/TestSysContext/test.gpt:agents\":{\"name\":\"agents\",\"modelName\":\"gpt-4o\",\"internalPrompt\":null,\"context\":[\"sys.context\"],\"instructions\":\"#!/bin/bash\\n\\necho \\\"${GPTSCRIPT_CONTEXT}\\\"\\necho \\\"${GPTSCRIPT_CONTEXT}\\\" \\u003e ${GPTSCRIPT_TOOL_DIR}/context.json\",\"id\":\"testdata/TestSysContext/test.gpt:agents\",\"toolMapping\":{\"sys.context\":[{\"reference\":\"sys.context\",\"toolID\":\"sys.context\"}]},\"localTools\":{\"\":\"testdata/TestSysContext/test.gpt:\",\"agents\":\"testdata/TestSysContext/test.gpt:agents\"},\"source\":{\"location\":\"testdata/TestSysContext/test.gpt\",\"lineNo\":8},\"workingDir\":\"testdata/TestSysContext\"}}}}\n\nTool body" } ], "usage": {} @@ -54,7 +54,8 @@ ], "usage": {} } - ] + ], + "chat": true } }, "result": "TEST RESULT CALL: 1" diff --git a/pkg/types/completion.go b/pkg/types/completion.go index 7665a51f..dd70ad50 100644 --- a/pkg/types/completion.go +++ b/pkg/types/completion.go @@ -14,6 +14,7 @@ type CompletionRequest struct { Tools []CompletionTool `json:"tools,omitempty"` Messages []CompletionMessage `json:"messages,omitempty"` MaxTokens int `json:"maxTokens,omitempty"` + Chat bool `json:"chat,omitempty"` Temperature *float32 `json:"temperature,omitempty"` JSONResponse bool `json:"jsonResponse,omitempty"` Cache *bool `json:"cache,omitempty"` diff --git a/pkg/types/toolstring.go b/pkg/types/toolstring.go index ede3401e..64f53638 100644 --- a/pkg/types/toolstring.go +++ b/pkg/types/toolstring.go @@ -74,7 +74,7 @@ func ToSysDisplayString(id string, args map[string]string) (string, error) { return fmt.Sprintf("Removing `%s`", args["location"]), nil case "sys.write": return fmt.Sprintf("Writing `%s`", args["filename"]), nil - case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": + case "sys.context", "sys.stat", "sys.getenv", "sys.abort", "sys.chat.current", "sys.chat.finish", "sys.chat.history", "sys.echo", "sys.prompt", "sys.time.now": return "", nil default: return "", fmt.Errorf("unknown tool for display string: %s", id) From 1c1888dce9141ec9940cce30bf473a502fad3c1e Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Wed, 26 Jun 2024 20:38:27 -0400 Subject: [PATCH 10/24] enhance: plumb credential context into run/eval requests Expose the `credentialOverride` field in the request payload for the run/eval sdkserver endpoints. This will enable credential override support to be implemented by the SDKs. Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/sdkserver/routes.go | 3 ++- pkg/sdkserver/types.go | 17 +++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 0380cff4..176552b1 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -200,7 +200,8 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { CredentialContext: reqObject.CredentialContext, Runner: runner.Options{ // Set the monitor factory so that we can get events from the server. - MonitorFactory: NewSessionFactory(s.events), + MonitorFactory: NewSessionFactory(s.events), + CredentialOverride: reqObject.CredentialOverride, }, } diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 0025539c..e6325956 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -52,14 +52,15 @@ type toolOrFileRequest struct { cacheOptions `json:",inline"` openAIOptions `json:",inline"` - ToolDefs toolDefs `json:"toolDefs,inline"` - SubTool string `json:"subTool"` - Input string `json:"input"` - ChatState string `json:"chatState"` - Workspace string `json:"workspace"` - Env []string `json:"env"` - CredentialContext string `json:"credentialContext"` - Confirm bool `json:"confirm"` + ToolDefs toolDefs `json:"toolDefs,inline"` + SubTool string `json:"subTool"` + Input string `json:"input"` + ChatState string `json:"chatState"` + Workspace string `json:"workspace"` + Env []string `json:"env"` + CredentialContext string `json:"credentialContext"` + CredentialOverride string `json:"credentialOverride"` + Confirm bool `json:"confirm"` } type content struct { From 7d2aeb3abdd086505d0a51962295c937cba48606 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 26 Jun 2024 19:48:40 -0700 Subject: [PATCH 11/24] bug: go back to old behavior of dropping {} for tools This is probably the wrong thing to do. We need to differientiate between empty args and no args. If a tool has no params defined then {} should be dropped. Right now this logic is in the wrong spot to do that so we just go back to the old behavior of just always dropping {} which will cause a problem for tools that would expect an empty object. --- go.mod | 2 +- go.sum | 4 ++-- pkg/openai/client.go | 3 +++ pkg/system/prompt.go | 2 +- pkg/tests/runner_test.go | 4 ++-- pkg/tests/testdata/TestAgents/call1.golden | 4 ++-- pkg/tests/testdata/TestAgents/call2.golden | 2 +- pkg/tests/testdata/TestAgents/call3.golden | 4 ++-- pkg/tests/testdata/TestAgents/step1.golden | 10 +++++----- pkg/tests/testdata/TestAsterick/call1.golden | 4 ++-- pkg/tests/testdata/TestContextSubChat/call1.golden | 2 +- pkg/tests/testdata/TestContextSubChat/call4.golden | 2 +- pkg/tests/testdata/TestContextSubChat/call6.golden | 2 +- pkg/tests/testdata/TestContextSubChat/call9.golden | 2 +- pkg/tests/testdata/TestContextSubChat/step1.golden | 2 +- pkg/tests/testdata/TestContextSubChat/step3.golden | 2 +- pkg/tests/testdata/TestDualSubChat/call1.golden | 4 ++-- pkg/tests/testdata/TestDualSubChat/call7.golden | 4 ++-- pkg/tests/testdata/TestDualSubChat/step1.golden | 4 ++-- pkg/tests/testdata/TestDualSubChat/step2.golden | 4 ++-- pkg/tests/testdata/TestDualSubChat/step3.golden | 4 ++-- pkg/tests/testdata/TestExport/call1.golden | 6 +++--- pkg/tests/testdata/TestExport/call3.golden | 6 +++--- pkg/tests/testdata/TestExportContext/call1.golden | 2 +- pkg/tests/testdata/TestSubChat/call1.golden | 2 +- pkg/tests/testdata/TestSysContext/call1.golden | 2 +- pkg/tests/testdata/TestSysContext/step1.golden | 2 +- pkg/tests/testdata/TestToolAs/call1.golden | 4 ++-- 28 files changed, 49 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index 1609ae4c..868dc060 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/tui v0.0.0-20240627001757-8b452fa47eb5 + github.com/gptscript-ai/tui v0.0.0-20240627044440-d416df63c10d github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 diff --git a/go.sum b/go.sum index c13d872a..8a6d9dc8 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6A github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc h1:ABV7VAK65YBkqL7VlNp5ryVXnRqkKQ+U/NZfUO3ypqA= github.com/gptscript-ai/go-gptscript v0.0.0-20240625134437-4b83849794cc/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240627001757-8b452fa47eb5 h1:knDhTTJNqaZB1XMudXJuVVnTqj9USrXzNfsl1nTqKXA= -github.com/gptscript-ai/tui v0.0.0-20240627001757-8b452fa47eb5/go.mod h1:NwFdBDmGQvjLFFDnSRBRakkhw0MIO1sSdRnWNk4cCQ0= +github.com/gptscript-ai/tui v0.0.0-20240627044440-d416df63c10d h1:hbJ5rkwMDDntqbvHMbsEoP8Nsa5nqTOzF+ktkw3uDQQ= +github.com/gptscript-ai/tui v0.0.0-20240627044440-d416df63c10d/go.mod h1:NwFdBDmGQvjLFFDnSRBRakkhw0MIO1sSdRnWNk4cCQ0= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= diff --git a/pkg/openai/client.go b/pkg/openai/client.go index 897fb880..4ade654a 100644 --- a/pkg/openai/client.go +++ b/pkg/openai/client.go @@ -276,6 +276,9 @@ func toMessages(request types.CompletionRequest, compat bool) (result []openai.C } if len(chatMessage.MultiContent) == 1 && chatMessage.MultiContent[0].Type == openai.ChatMessagePartTypeText { + if !request.Chat && strings.TrimSpace(chatMessage.MultiContent[0].Text) == "{}" { + continue + } chatMessage.Content = chatMessage.MultiContent[0].Text chatMessage.MultiContent = nil diff --git a/pkg/system/prompt.go b/pkg/system/prompt.go index 98b2535a..6b1815fd 100644 --- a/pkg/system/prompt.go +++ b/pkg/system/prompt.go @@ -24,7 +24,7 @@ You don't move to the next step until you have a result. // DefaultPromptParameter is used as the key in a json map to indication that we really wanted // to just send pure text but the interface required JSON (as that is the fundamental interface of tools in OpenAI) -var DefaultPromptParameter = "prompt" +var DefaultPromptParameter = "defaultPromptParameter" var DefaultToolSchema = openapi3.Schema{ Type: &openapi3.Types{"object"}, diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index deced81c..6efe4a11 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -322,7 +322,7 @@ func TestSubChat(t *testing.T) { "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -447,7 +447,7 @@ func TestSubChat(t *testing.T) { "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestAgents/call1.golden b/pkg/tests/testdata/TestAgents/call1.golden index a1a0dcaa..d3c4a86d 100644 --- a/pkg/tests/testdata/TestAgents/call1.golden +++ b/pkg/tests/testdata/TestAgents/call1.golden @@ -8,7 +8,7 @@ "name": "agent1", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -23,7 +23,7 @@ "name": "agent2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestAgents/call2.golden b/pkg/tests/testdata/TestAgents/call2.golden index 6b59830a..950ad2ea 100644 --- a/pkg/tests/testdata/TestAgents/call2.golden +++ b/pkg/tests/testdata/TestAgents/call2.golden @@ -8,7 +8,7 @@ "name": "agent2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestAgents/call3.golden b/pkg/tests/testdata/TestAgents/call3.golden index 29d5462e..f9b45a1b 100644 --- a/pkg/tests/testdata/TestAgents/call3.golden +++ b/pkg/tests/testdata/TestAgents/call3.golden @@ -8,7 +8,7 @@ "name": "agent1", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -23,7 +23,7 @@ "name": "agent3", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestAgents/step1.golden b/pkg/tests/testdata/TestAgents/step1.golden index a259bf0f..3047e695 100644 --- a/pkg/tests/testdata/TestAgents/step1.golden +++ b/pkg/tests/testdata/TestAgents/step1.golden @@ -16,7 +16,7 @@ "name": "agent1", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -31,7 +31,7 @@ "name": "agent2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -111,7 +111,7 @@ "name": "agent2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -182,7 +182,7 @@ "name": "agent1", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -197,7 +197,7 @@ "name": "agent3", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestAsterick/call1.golden b/pkg/tests/testdata/TestAsterick/call1.golden index d6d56df9..3f2fa0b1 100644 --- a/pkg/tests/testdata/TestAsterick/call1.golden +++ b/pkg/tests/testdata/TestAsterick/call1.golden @@ -7,7 +7,7 @@ "name": "afoo", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } @@ -22,7 +22,7 @@ "name": "a", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestContextSubChat/call1.golden b/pkg/tests/testdata/TestContextSubChat/call1.golden index 09da7a95..225401db 100644 --- a/pkg/tests/testdata/TestContextSubChat/call1.golden +++ b/pkg/tests/testdata/TestContextSubChat/call1.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestContextSubChat/call4.golden b/pkg/tests/testdata/TestContextSubChat/call4.golden index abe24599..e1fb91ea 100644 --- a/pkg/tests/testdata/TestContextSubChat/call4.golden +++ b/pkg/tests/testdata/TestContextSubChat/call4.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestContextSubChat/call6.golden b/pkg/tests/testdata/TestContextSubChat/call6.golden index 09da7a95..225401db 100644 --- a/pkg/tests/testdata/TestContextSubChat/call6.golden +++ b/pkg/tests/testdata/TestContextSubChat/call6.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestContextSubChat/call9.golden b/pkg/tests/testdata/TestContextSubChat/call9.golden index 37f67e56..33768f26 100644 --- a/pkg/tests/testdata/TestContextSubChat/call9.golden +++ b/pkg/tests/testdata/TestContextSubChat/call9.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestContextSubChat/step1.golden b/pkg/tests/testdata/TestContextSubChat/step1.golden index 5f9a3c6f..2ffb138e 100644 --- a/pkg/tests/testdata/TestContextSubChat/step1.golden +++ b/pkg/tests/testdata/TestContextSubChat/step1.golden @@ -15,7 +15,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestContextSubChat/step3.golden b/pkg/tests/testdata/TestContextSubChat/step3.golden index e50b04a0..0ccb188b 100644 --- a/pkg/tests/testdata/TestContextSubChat/step3.golden +++ b/pkg/tests/testdata/TestContextSubChat/step3.golden @@ -57,7 +57,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestDualSubChat/call1.golden b/pkg/tests/testdata/TestDualSubChat/call1.golden index 267690fe..2baf798a 100644 --- a/pkg/tests/testdata/TestDualSubChat/call1.golden +++ b/pkg/tests/testdata/TestDualSubChat/call1.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -22,7 +22,7 @@ "name": "chatbot2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestDualSubChat/call7.golden b/pkg/tests/testdata/TestDualSubChat/call7.golden index 5f1d227a..ff19a0e8 100644 --- a/pkg/tests/testdata/TestDualSubChat/call7.golden +++ b/pkg/tests/testdata/TestDualSubChat/call7.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -22,7 +22,7 @@ "name": "chatbot2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestDualSubChat/step1.golden b/pkg/tests/testdata/TestDualSubChat/step1.golden index 6a04614b..f29dfd60 100644 --- a/pkg/tests/testdata/TestDualSubChat/step1.golden +++ b/pkg/tests/testdata/TestDualSubChat/step1.golden @@ -15,7 +15,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -30,7 +30,7 @@ "name": "chatbot2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestDualSubChat/step2.golden b/pkg/tests/testdata/TestDualSubChat/step2.golden index da3e8403..830a8e7c 100644 --- a/pkg/tests/testdata/TestDualSubChat/step2.golden +++ b/pkg/tests/testdata/TestDualSubChat/step2.golden @@ -15,7 +15,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -30,7 +30,7 @@ "name": "chatbot2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestDualSubChat/step3.golden b/pkg/tests/testdata/TestDualSubChat/step3.golden index 028f9cd9..4f3b415a 100644 --- a/pkg/tests/testdata/TestDualSubChat/step3.golden +++ b/pkg/tests/testdata/TestDualSubChat/step3.golden @@ -15,7 +15,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } @@ -30,7 +30,7 @@ "name": "chatbot2", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestExport/call1.golden b/pkg/tests/testdata/TestExport/call1.golden index 8b360d8a..9f8b650d 100644 --- a/pkg/tests/testdata/TestExport/call1.golden +++ b/pkg/tests/testdata/TestExport/call1.golden @@ -7,7 +7,7 @@ "name": "frommain", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } @@ -22,7 +22,7 @@ "name": "parentLocal", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } @@ -37,7 +37,7 @@ "name": "transient", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestExport/call3.golden b/pkg/tests/testdata/TestExport/call3.golden index a653821a..ccf7e980 100644 --- a/pkg/tests/testdata/TestExport/call3.golden +++ b/pkg/tests/testdata/TestExport/call3.golden @@ -7,7 +7,7 @@ "name": "frommain", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } @@ -22,7 +22,7 @@ "name": "parentLocal", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } @@ -37,7 +37,7 @@ "name": "transient", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestExportContext/call1.golden b/pkg/tests/testdata/TestExportContext/call1.golden index 2e1d2421..bec15478 100644 --- a/pkg/tests/testdata/TestExportContext/call1.golden +++ b/pkg/tests/testdata/TestExportContext/call1.golden @@ -7,7 +7,7 @@ "name": "subtool", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestSubChat/call1.golden b/pkg/tests/testdata/TestSubChat/call1.golden index 53104a4e..0d906395 100644 --- a/pkg/tests/testdata/TestSubChat/call1.golden +++ b/pkg/tests/testdata/TestSubChat/call1.golden @@ -7,7 +7,7 @@ "name": "chatbot", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the assistant. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestSysContext/call1.golden b/pkg/tests/testdata/TestSysContext/call1.golden index c92b704f..4c9c51d0 100644 --- a/pkg/tests/testdata/TestSysContext/call1.golden +++ b/pkg/tests/testdata/TestSysContext/call1.golden @@ -8,7 +8,7 @@ "name": "iAmSuperman", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestSysContext/step1.golden b/pkg/tests/testdata/TestSysContext/step1.golden index fa773fda..426e5991 100644 --- a/pkg/tests/testdata/TestSysContext/step1.golden +++ b/pkg/tests/testdata/TestSysContext/step1.golden @@ -16,7 +16,7 @@ "name": "iAmSuperman", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } diff --git a/pkg/tests/testdata/TestToolAs/call1.golden b/pkg/tests/testdata/TestToolAs/call1.golden index 519f07b8..55796fea 100644 --- a/pkg/tests/testdata/TestToolAs/call1.golden +++ b/pkg/tests/testdata/TestToolAs/call1.golden @@ -7,7 +7,7 @@ "name": "local", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } @@ -22,7 +22,7 @@ "name": "remote", "parameters": { "properties": { - "prompt": { + "defaultPromptParameter": { "description": "Prompt to send to the tool. This may be an instruction or question.", "type": "string" } From bcd9f33a3b4bdea0f4863db3ab355b92fda1061b Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 27 Jun 2024 10:12:21 -0400 Subject: [PATCH 12/24] fix: don't prompt for creds for local models (#567) Signed-off-by: Grant Linville --- pkg/credentials/credential.go | 14 ++++++++------ pkg/credentials/helper.go | 14 ++++++++++++-- pkg/remote/remote.go | 7 ++++++- pkg/types/tool.go | 9 +++++++++ 4 files changed, 35 insertions(+), 9 deletions(-) diff --git a/pkg/credentials/credential.go b/pkg/credentials/credential.go index fc247b38..c81adf2b 100644 --- a/pkg/credentials/credential.go +++ b/pkg/credentials/credential.go @@ -49,13 +49,15 @@ func (c Credential) toDockerAuthConfig() (types.AuthConfig, error) { func credentialFromDockerAuthConfig(authCfg types.AuthConfig) (Credential, error) { var cred Credential - if err := json.Unmarshal([]byte(authCfg.Password), &cred); err != nil || len(cred.Env) == 0 { - // Legacy: try unmarshalling into just an env map - var env map[string]string - if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { - return Credential{}, err + if authCfg.Password != "" { + if err := json.Unmarshal([]byte(authCfg.Password), &cred); err != nil || len(cred.Env) == 0 { + // Legacy: try unmarshalling into just an env map + var env map[string]string + if err := json.Unmarshal([]byte(authCfg.Password), &env); err != nil { + return Credential{}, err + } + cred.Env = env } - cred.Env = env } // We used to hardcode the username as "gptscript" before CredentialType was introduced, so diff --git a/pkg/credentials/helper.go b/pkg/credentials/helper.go index 9776d8b9..e5cd34f6 100644 --- a/pkg/credentials/helper.go +++ b/pkg/credentials/helper.go @@ -2,6 +2,7 @@ package credentials import ( "errors" + "net/url" "regexp" "strings" @@ -71,9 +72,18 @@ func (h *HelperStore) GetAll() (map[string]types.AuthConfig, error) { contextPieces := strings.Split(ctx, ":") if len(contextPieces) > 1 { possiblePortNumber := contextPieces[len(contextPieces)-1] - if regexp.MustCompile(`\d+$`).MatchString(possiblePortNumber) { + if regexp.MustCompile(`^\d+$`).MatchString(possiblePortNumber) { // port number confirmed - toolName = toolName + ":" + possiblePortNumber + toolURL, err := url.Parse(toolName) + if err != nil { + return nil, err + } + + // Save the path so we can put it back after removing it. + path := toolURL.Path + toolURL.Path = "" + + toolName = toolURL.String() + ":" + possiblePortNumber + path ctx = strings.TrimSuffix(ctx, ":"+possiblePortNumber) } } diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 7d7cff6e..6cb3644e 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -108,7 +108,7 @@ func (c *Client) clientFromURL(ctx context.Context, apiURL string) (*openai.Clie env := "GPTSCRIPT_PROVIDER_" + env2.ToEnvLike(parsed.Hostname()) + "_API_KEY" key := os.Getenv(env) - if key == "" { + if key == "" && !isLocalhost(apiURL) { var err error key, err = c.retrieveAPIKey(ctx, env, apiURL) if err != nil { @@ -179,3 +179,8 @@ func (c *Client) load(ctx context.Context, toolName string) (*openai.Client, err func (c *Client) retrieveAPIKey(ctx context.Context, env, url string) (string, error) { return prompt.GetModelProviderCredential(ctx, c.credStore, url, env, fmt.Sprintf("Please provide your API key for %s", url), append(gcontext.GetEnv(ctx), c.envs...)) } + +func isLocalhost(url string) bool { + return strings.HasPrefix(url, "http://localhost") || strings.HasPrefix(url, "http://127.0.0.1") || + strings.HasPrefix(url, "https://localhost") || strings.HasPrefix(url, "https://127.0.0.1") +} diff --git a/pkg/types/tool.go b/pkg/types/tool.go index b0af5183..e7af756f 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "path/filepath" + "regexp" "slices" "sort" "strings" @@ -283,6 +284,10 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str fields = fields[2:] } + if alias != "" && !isAlphaNumeric(alias) { + return "", "", nil, fmt.Errorf("credential alias must be alphanumeric") + } + if len(fields) == 0 { // Nothing left, so just return return originalName, alias, nil, nil } @@ -780,3 +785,7 @@ func FirstSet[T comparable](in ...T) (result T) { } return } + +func isAlphaNumeric(s string) bool { + return regexp.MustCompile(`^[a-zA-Z0-9_.]+$`).MatchString(s) +} From 51fd2b323c27ed5425edd32accd0d366214db0dd Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 27 Jun 2024 10:54:31 -0400 Subject: [PATCH 13/24] fix: only check cred helper repo once per 24h (#572) Signed-off-by: Grant Linville Co-authored-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- pkg/credentials/util.go | 9 +++++---- pkg/repos/get.go | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/pkg/credentials/util.go b/pkg/credentials/util.go index cf3291d7..f6165308 100644 --- a/pkg/credentials/util.go +++ b/pkg/credentials/util.go @@ -5,13 +5,14 @@ import ( ) type CredentialHelperDirs struct { - RevisionFile, BinDir, RepoDir string + RevisionFile, LastCheckedFile, BinDir, RepoDir string } func GetCredentialHelperDirs(cacheDir string) CredentialHelperDirs { return CredentialHelperDirs{ - RevisionFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "revision"), - BinDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "bin"), - RepoDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "repo"), + RevisionFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "revision"), + LastCheckedFile: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "last-checked"), + BinDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "bin"), + RepoDir: filepath.Join(cacheDir, "repos", "gptscript-credential-helpers", "repo"), } } diff --git a/pkg/repos/get.go b/pkg/repos/get.go index 68b2e427..b3d5f609 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" "sync" + "time" "github.com/BurntSushi/locker" "github.com/gptscript-ai/gptscript/pkg/config" @@ -112,11 +113,30 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co locker.Lock("gptscript-credential-helpers") defer locker.Unlock("gptscript-credential-helpers") + // Load the last-checked file to make sure we haven't checked the repo in the last 24 hours. + now := time.Now() + lastChecked, err := os.ReadFile(m.credHelperDirs.LastCheckedFile) + if err == nil { + if t, err := time.Parse(time.RFC3339, strings.TrimSpace(string(lastChecked))); err == nil && now.Sub(t) < 24*time.Hour { + // Make sure the binary still exists, and if it does, return. + if _, err := os.Stat(filepath.Join(m.credHelperDirs.BinDir, "gptscript-credential-"+helperName+suffix)); err == nil { + log.Debugf("Credential helper %s up-to-date as of %v, checking for updates after %v", helperName, t, t.Add(24*time.Hour)) + return nil + } + } + } + + // Load the credential helpers repo information. _, repo, _, err := github.Load(ctx, nil, credentialHelpersRepo) if err != nil { return err } + // Update the last-checked file. + if err := os.WriteFile(m.credHelperDirs.LastCheckedFile, []byte(now.Format(time.RFC3339)), 0644); err != nil { + return err + } + var needsBuild bool // Check the last revision shasum and see if it is different from the current one. From 9e5f8f458366536ee4d1e8ec4bcbd0b5aa7cb9ca Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 27 Jun 2024 15:21:16 -0400 Subject: [PATCH 14/24] fix: make the WorkspacePath take absolute precedence for setting env var Before this change, the environment variables would take precedence for setting the GPTSCRIPT_WORKSPACE_DIR. This is incorrect because gptscript should be solely responsible for setting these environment variables. Putting these environment variables at the end of the list ensures that they take precedence. Co-authored-by: tylerslaton Signed-off-by: Donnie Adams --- pkg/gptscript/gptscript.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index f8264a44..7a11e7dd 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -175,10 +175,10 @@ func (g *GPTScript) getEnv(env []string) ([]string, error) { if err := os.MkdirAll(g.WorkspacePath, 0700); err != nil { return nil, err } - return slices.Concat(g.ExtraEnv, []string{ + return slices.Concat(g.ExtraEnv, env, []string{ fmt.Sprintf("GPTSCRIPT_WORKSPACE_DIR=%s", g.WorkspacePath), fmt.Sprintf("GPTSCRIPT_WORKSPACE_ID=%s", hash.ID(g.WorkspacePath)), - }, env), nil + }), nil } func (g *GPTScript) Chat(ctx context.Context, prevState runner.ChatState, prg types.Program, envs []string, input string) (runner.ChatResponse, error) { From 549082a6cfed5783e1d4091b608550e21f6ce00e Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Thu, 27 Jun 2024 17:32:08 -0400 Subject: [PATCH 15/24] fix: remove requirement that cred aliases be alphanumeric (#580) Signed-off-by: Grant Linville --- pkg/types/tool.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/pkg/types/tool.go b/pkg/types/tool.go index e7af756f..b0af5183 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "path/filepath" - "regexp" "slices" "sort" "strings" @@ -284,10 +283,6 @@ func ParseCredentialArgs(toolName string, input string) (string, string, map[str fields = fields[2:] } - if alias != "" && !isAlphaNumeric(alias) { - return "", "", nil, fmt.Errorf("credential alias must be alphanumeric") - } - if len(fields) == 0 { // Nothing left, so just return return originalName, alias, nil, nil } @@ -785,7 +780,3 @@ func FirstSet[T comparable](in ...T) (result T) { } return } - -func isAlphaNumeric(s string) bool { - return regexp.MustCompile(`^[a-zA-Z0-9_.]+$`).MatchString(s) -} From 8a14610e289ee7a3fe62daf80d6757c12882d850 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 27 Jun 2024 14:56:46 -0700 Subject: [PATCH 16/24] bug: incorrect nil check on internal.FS --- pkg/embedded/embed.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/embedded/embed.go b/pkg/embedded/embed.go index 7ea7edb9..2a96e776 100644 --- a/pkg/embedded/embed.go +++ b/pkg/embedded/embed.go @@ -15,7 +15,7 @@ type Options struct { func Run(opts ...Options) bool { for _, opt := range opts { - if opt.FS == nil { + if opt.FS != nil { internal.FS = opt.FS } } From 94d0f2453856d1ec13e8e9129d93c2a47a14b892 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Fri, 28 Jun 2024 10:25:22 -0400 Subject: [PATCH 17/24] fix: create the cache directory before writing last-checked file If the cache directory is gone, then, currently, writing the last-checked file fails. This change ensures that the directory exists before trying to write the file. Co-authored-by: Nick Hale <4175918+njhale@users.noreply.github.com> Signed-off-by: Donnie Adams --- pkg/repos/get.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/repos/get.go b/pkg/repos/get.go index b3d5f609..a0d7b0c3 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -7,6 +7,7 @@ import ( "fmt" "io/fs" "os" + "path" "path/filepath" "strings" "sync" @@ -132,6 +133,10 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co return err } + if err := os.MkdirAll(path.Dir(m.credHelperDirs.LastCheckedFile), 0755); err != nil { + return err + } + // Update the last-checked file. if err := os.WriteFile(m.credHelperDirs.LastCheckedFile, []byte(now.Format(time.RFC3339)), 0644); err != nil { return err From 9175120098073dcb1ecaa27b84dac1b0e0450463 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Fri, 28 Jun 2024 10:58:38 -0400 Subject: [PATCH 18/24] chore: remove third credential override syntax (#585) Signed-off-by: Grant Linville --- docs/docs/03-tools/04-credential-tools.md | 43 ++------ pkg/runner/credentials.go | 14 +-- pkg/runner/credentials_test.go | 128 ++++++++++++++++++++++ 3 files changed, 142 insertions(+), 43 deletions(-) create mode 100644 pkg/runner/credentials_test.go diff --git a/docs/docs/03-tools/04-credential-tools.md b/docs/docs/03-tools/04-credential-tools.md index 7178542d..981a708c 100644 --- a/docs/docs/03-tools/04-credential-tools.md +++ b/docs/docs/03-tools/04-credential-tools.md @@ -148,10 +148,6 @@ This is useful when working with credential overrides. ## Credential Overrides (Advanced) -:::note -The syntax for this will change at some point in the future. -::: - You can bypass credential tools and stored credentials by setting the `--credential-override` argument (or the `GPTSCRIPT_CREDENTIAL_OVERRIDE` environment variable) when running GPTScript. To set up a credential override, you need to be aware of which environment variables the credential tool sets. You can find this out by running the @@ -166,41 +162,24 @@ This can be overridden with a credential alias, i.e. `credential: my-cred-tool.g If the credential has an alias, use it instead of the tool name when you specify an override. ::: -The `--credential-override` argument must be formatted in one of the following three ways: +The `--credential-override` argument must be formatted in one of the following two ways: #### 1. Key-Value Pairs -`toolA:ENV_VAR_1=value1,ENV_VAR_2=value2;toolB:ENV_VAR_1=value3,ENV_VAR_2=value4` +`toolA:ENV_VAR_1=value1,ENV_VAR_2=value2` + +In this example, both `toolA` provides the variables `ENV_VAR_1` and `ENV_VAR_2`. +This will set the environment variables `ENV_VAR_1` and `ENV_VAR_2` to the specific values `value1` and `value2`. -In this example, both `toolA` and `toolB` provide the variables `ENV_VAR_1` and `ENV_VAR_2`. -This will set the environment variables `ENV_VAR_1` and `ENV_VAR_2` to the specific values provided for each tool. +:::info +To override more than one credential, use `;` as a separator. For example, `toolA:ENV_VAR_1=value1;toolB:ENV_VAR_2=value2`. +::: #### 2. Environment Variables -`toolA:ENV_VAR_1,ENV_VAR_2;toolB:ENV_VAR_3,ENV_VAR_4` +`toolA:ENV_VAR_1,ENV_VAR_2` -In this example, `toolA` provides the variables `ENV_VAR_1` and `ENV_VAR_2`, and `toolB` provides the variables `ENV_VAR_3` and `ENV_VAR_4`. -This will read the values of `ENV_VAR_1` through `ENV_VAR_4` from the current environment and set them for each tool. +In this example, `toolA` provides the variables `ENV_VAR_1` and `ENV_VAR_2`, +This will read the values of `ENV_VAR_1` through `ENV_VAR_4` from the current environment and set them for the credential. This is a direct mapping of environment variable names. **This is not recommended when overriding credentials for multiple tools that use the same environment variable names.** - -#### 3. Environment Variable Mapping - -`toolA:ENV_VAR_1->TOOL_A_ENV_VAR_1,ENV_VAR_2->TOOL_A_ENV_VAR_2;toolB:ENV_VAR_1->TOOL_B_ENV_VAR_1,ENV_VAR_2->TOOL_B_ENV_VAR_2` - -In this example, `toolA` and `toolB` both provide the variables `ENV_VAR_1` and `ENV_VAR_2`. -This will set the environment variables `ENV_VAR_1` and `ENV_VAR_2` to the values of `TOOL_A_ENV_VAR_1` and -`TOOL_A_ENV_VAR_2` from the current environment for `toolA`. The same applies for `toolB`, but with the values of -`TOOL_B_ENV_VAR_1` and `TOOL_B_ENV_VAR_2`. This is a mapping of one environment variable name to another. - -### Real-World Example - -Here is an example of how you can use a credential override to skip running the credential tool for the Brave Search tool: - -```bash -gptscript --credential-override "github.com/gptscript-ai/search/brave-credential:GPTSCRIPT_BRAVE_SEARCH_TOKEN->MY_BRAVE_SEARCH_TOKEN" github.com/gptscript-ai/search/brave '{"q": "cute cats"}' -``` - -If you run this command, rather than being prompted by the credential tool for your token, GPTScript will read the contents -of the environment variable `MY_BRAVE_SEARCH_TOKEN` and set that as the variable `GPTSCRIPT_BRAVE_SEARCH_TOKEN` when it runs -the script. diff --git a/pkg/runner/credentials.go b/pkg/runner/credentials.go index c547e832..f1c5d0dd 100644 --- a/pkg/runner/credentials.go +++ b/pkg/runner/credentials.go @@ -7,10 +7,9 @@ import ( ) // parseCredentialOverrides parses a string of credential overrides that the user provided as a command line arg. -// The format of credential overrides can be one of three things: +// The format of credential overrides can be one of two things: // cred1:ENV1,ENV2;cred2:ENV1,ENV2 (direct mapping of environment variables) // cred1:ENV1=VALUE1,ENV2=VALUE2;cred2:ENV1=VALUE1,ENV2=VALUE2 (key-value pairs) -// cred1:ENV1->OTHER_ENV1,ENV2->OTHER_ENV2;cred2:ENV1->OTHER_ENV1,ENV2->OTHER_ENV2 (mapping to other environment variables) // // This function turns it into a map[string]map[string]string like this: // @@ -36,15 +35,8 @@ func parseCredentialOverrides(override string) (map[string]map[string]string, er for _, env := range strings.Split(envs, ",") { key, value, found := strings.Cut(env, "=") if !found { - var envVar string - key, envVar, found = strings.Cut(env, "->") - if found { - // User did a mapping of key -> other env var, so look up the value. - value = os.Getenv(envVar) - } else { - // User just passed an env var name as the key, so look up the value. - value = os.Getenv(key) - } + // User just passed an env var name as the key, so look up the value. + value = os.Getenv(key) } envMap[key] = value } diff --git a/pkg/runner/credentials_test.go b/pkg/runner/credentials_test.go new file mode 100644 index 00000000..548bc3b5 --- /dev/null +++ b/pkg/runner/credentials_test.go @@ -0,0 +1,128 @@ +package runner + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseCredentialOverrides(t *testing.T) { + cases := []struct { + name string + envs map[string]string + in string + out map[string]map[string]string + expectErr bool + }{ + { + name: "empty", + in: "", + expectErr: true, + }, + { + name: "single cred, single env", + envs: map[string]string{ + "ENV1": "VALUE1", + }, + in: "cred1:ENV1", + out: map[string]map[string]string{ + "cred1": { + "ENV1": "VALUE1", + }, + }, + }, + { + name: "single cred, multiple envs", + envs: map[string]string{ + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + in: "cred1:ENV1,ENV2", + out: map[string]map[string]string{ + "cred1": { + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + }, + }, + { + name: "single cred, key value pairs", + envs: map[string]string{ + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + in: "cred1:ENV1=OTHERVALUE1,ENV2=OTHERVALUE2", + out: map[string]map[string]string{ + "cred1": { + "ENV1": "OTHERVALUE1", + "ENV2": "OTHERVALUE2", + }, + }, + }, + { + name: "multiple creds, multiple envs", + envs: map[string]string{ + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + in: "cred1:ENV1,ENV2;cred2:ENV1,ENV2", + out: map[string]map[string]string{ + "cred1": { + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + "cred2": { + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + }, + }, + { + name: "multiple creds, key value pairs", + envs: map[string]string{ + "ENV1": "VALUE1", + "ENV2": "VALUE2", + }, + in: "cred1:ENV1=OTHERVALUE1,ENV2=OTHERVALUE2;cred2:ENV1=OTHERVALUE3,ENV2=OTHERVALUE4", + out: map[string]map[string]string{ + "cred1": { + "ENV1": "OTHERVALUE1", + "ENV2": "OTHERVALUE2", + }, + "cred2": { + "ENV1": "OTHERVALUE3", + "ENV2": "OTHERVALUE4", + }, + }, + }, + { + name: "invalid format", + in: "cred1=ENV1,ENV2", + expectErr: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + envs := tc.envs + if envs == nil { + envs = map[string]string{} + } + + for k, v := range envs { + _ = os.Setenv(k, v) + } + + out, err := parseCredentialOverrides(tc.in) + if tc.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + require.Equal(t, len(tc.out), len(out), "expected %d creds, but got %d", len(tc.out), len(out)) + require.Equal(t, tc.out, out, "expected output %v, but got %v", tc.out, out) + }) + } +} From 337febb28c10bc7b080833e31002121625a06195 Mon Sep 17 00:00:00 2001 From: Bill Maxwell Date: Fri, 28 Jun 2024 10:25:31 -0700 Subject: [PATCH 19/24] fix: join the server and url with url joinpath. When joining url paths, you sometimes end up with // or /./ in them. When interacting with some apis you get redirects, like a 308, that are not handled by the default client. This just cleans it up in an effort to avoid. Signed-off-by: Bill Maxwell --- pkg/engine/openapi.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/engine/openapi.go b/pkg/engine/openapi.go index 9af28772..2e338ca4 100644 --- a/pkg/engine/openapi.go +++ b/pkg/engine/openapi.go @@ -112,7 +112,12 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { instructions.Path = handlePathParameters(instructions.Path, instructions.PathParameters, input) // Parse the URL - u, err := url.Parse(instructions.Server + instructions.Path) + path, err := url.JoinPath(instructions.Server, instructions.Path) + if err != nil { + return nil, fmt.Errorf("failed to join server and path: %w", err) + } + + u, err := url.Parse(path) if err != nil { return nil, fmt.Errorf("failed to parse server URL %s: %w", instructions.Server+instructions.Path, err) } From 8a38e0e4076f4c9d3283b152d89366edf92281da Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 1 Jul 2024 09:33:53 -0400 Subject: [PATCH 20/24] docs: update OpenAPI docs to explain credential tool generation (#592) Signed-off-by: Grant Linville --- docs/docs/03-tools/03-openapi.md | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/docs/03-tools/03-openapi.md b/docs/docs/03-tools/03-openapi.md index 4beac26e..2069b331 100644 --- a/docs/docs/03-tools/03-openapi.md +++ b/docs/docs/03-tools/03-openapi.md @@ -54,12 +54,8 @@ OAuth and OIDC schemes will be ignored. GPTScript will look at the `security` defined on the operation (or defined globally, if it is not defined on the operation) before it makes the request. It will set the necessary headers, cookies, or query parameters based on the corresponding security scheme. -Environment variables must be set for each security scheme that will be used by the operation. -`` is the hostname of the server, but all caps, and with dashes (`-`) and dots (`.`) replaced with underscores (`_`). -`` is the name of the security scheme, but all caps, and with dashes (`-`) and dots (`.`) replaced with underscores (`_`). - -- For `apiKey`-type and `http`-type with `bearer` scheme, the environment variable is `GPTSCRIPT__` -- For `http`-type with `basic` scheme, the environment variables are `GPTSCRIPT___USERNAME` and `GPTSCRIPT___PASSWORD` +When internally generating the tool for the operation with a supported security scheme, GPTScript will include a credential tool. +This tool will prompt the user to enter their credentials. This will make the key available to GPTScript during the tool's execution. #### Example @@ -85,10 +81,10 @@ security: ``` In this example, we have two security schemes, and both are defined as the defaults on the global level. -They are separate entries in the global `security` array, so they are treated as a logical OR, and GPTScript will only -need the environment variable for one or the other to make the request. +They are separate entries in the global `security` array, so they are treated as a logical OR, and GPTScript will prompt +the user to enter the credential for the first one (basic auth). -When put into the same entry, they would be a logical AND, and the environment variables for both would be required. +When put into the same entry, they would be a logical AND, and both would be required. It would look like this: ```yaml @@ -97,10 +93,7 @@ security: MyAPIKey: [] ``` -The environment variable names are as follows: - -- `GPTSCRIPT_API_EXAMPLE_COM_MYBASIC_USERNAME` and `GPTSCRIPT_API_EXAMPLE_COM_MYBASIC_PASSWORD` for basic auth -- `GPTSCRIPT_API_EXAMPLE_COM_MYAPIKEY` for the API key +In this case, GPTScript will prompt the user for both the basic auth credentials and the API key. ### 2. Bearer token for server From a42c9e3170fcdfb74b71a9b1e49979f7dda9ce1c Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 1 Jul 2024 09:35:02 -0400 Subject: [PATCH 21/24] fix: use GH token for VCS lookups (#578) Signed-off-by: Grant Linville --- .github/workflows/smoke.yaml | 3 +- pkg/loader/github/github.go | 10 +- pkg/loader/github/github_test.go | 6 +- pkg/loader/url.go | 18 +- pkg/repos/get.go | 2 +- .../Bob/claude-3-opus-20240229-expected.json | 236 ++++++++++-------- .../claude-3-opus-20240229-expected.json | 225 +++++++++-------- 7 files changed, 273 insertions(+), 227 deletions(-) diff --git a/.github/workflows/smoke.yaml b/.github/workflows/smoke.yaml index abea2159..5c26ef8d 100644 --- a/.github/workflows/smoke.yaml +++ b/.github/workflows/smoke.yaml @@ -139,8 +139,9 @@ jobs: go-version: "1.21" - env: OPENAI_API_KEY: ${{ secrets.SMOKE_OPENAI_API_KEY }} - GPTSCRIPT_DEFAULT_MODEL: claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta + GPTSCRIPT_DEFAULT_MODEL: claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider ANTHROPIC_API_KEY: ${{ secrets.SMOKE_ANTHROPIC_API_KEY }} + GPTSCRIPT_CREDENTIAL_OVERRIDE: "github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY" name: Run smoke test for claude-3-opus-20240229 run: | echo "Running smoke test for model claude-3-opus-20240229" diff --git a/pkg/loader/github/github.go b/pkg/loader/github/github.go index 1ca2f6c4..2fb01c3d 100644 --- a/pkg/loader/github/github.go +++ b/pkg/loader/github/github.go @@ -88,9 +88,9 @@ func getCommit(ctx context.Context, account, repo, ref string) (string, error) { return commit.SHA, nil } -func Load(ctx context.Context, _ *cache.Client, urlName string) (string, *types.Repo, bool, error) { +func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string, *types.Repo, bool, error) { if !strings.HasPrefix(urlName, GithubPrefix) { - return "", nil, false, nil + return "", "", nil, false, nil } url, ref, _ := strings.Cut(urlName, "@") @@ -101,7 +101,7 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, *types. parts := strings.Split(url, "/") // Must be at least 3 parts github.com/ACCOUNT/REPO[/FILE] if len(parts) < 3 { - return "", nil, false, nil + return "", "", nil, false, nil } account, repo := parts[1], parts[2] @@ -109,7 +109,7 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, *types. ref, err := getCommit(ctx, account, repo, ref) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } downloadURL := fmt.Sprintf(githubDownloadURL, account, repo, ref, path) @@ -141,7 +141,7 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, *types. path = testPath } - return downloadURL, &types.Repo{ + return downloadURL, githubAuthToken, &types.Repo{ VCS: "git", Root: fmt.Sprintf(githubRepoURL, account, repo), Path: gpath.Dir(path), diff --git a/pkg/loader/github/github_test.go b/pkg/loader/github/github_test.go index 169f2e7e..d627ee5e 100644 --- a/pkg/loader/github/github_test.go +++ b/pkg/loader/github/github_test.go @@ -11,7 +11,7 @@ import ( ) func TestLoad(t *testing.T) { - url, repo, ok, err := Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/tool@172dfb0") + url, _, repo, ok, err := Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/tool@172dfb0") require.NoError(t, err) assert.True(t, ok) autogold.Expect("https://raw.githubusercontent.com/gptscript-ai/gptscript/172dfb00b48c6adbbaa7e99270933f95887d1b91/pkg/loader/testdata/tool/tool.gpt").Equal(t, url) @@ -22,7 +22,7 @@ func TestLoad(t *testing.T) { Revision: "172dfb00b48c6adbbaa7e99270933f95887d1b91", }).Equal(t, repo) - url, repo, ok, err = Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/agent@172dfb0") + url, _, repo, ok, err = Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/agent@172dfb0") require.NoError(t, err) assert.True(t, ok) autogold.Expect("https://raw.githubusercontent.com/gptscript-ai/gptscript/172dfb00b48c6adbbaa7e99270933f95887d1b91/pkg/loader/testdata/agent/agent.gpt").Equal(t, url) @@ -33,7 +33,7 @@ func TestLoad(t *testing.T) { Revision: "172dfb00b48c6adbbaa7e99270933f95887d1b91", }).Equal(t, repo) - url, repo, ok, err = Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/bothtoolagent@172dfb0") + url, _, repo, ok, err = Load(context.Background(), nil, "github.com/gptscript-ai/gptscript/pkg/loader/testdata/bothtoolagent@172dfb0") require.NoError(t, err) assert.True(t, ok) autogold.Expect("https://raw.githubusercontent.com/gptscript-ai/gptscript/172dfb00b48c6adbbaa7e99270933f95887d1b91/pkg/loader/testdata/bothtoolagent/agent.gpt").Equal(t, url) diff --git a/pkg/loader/url.go b/pkg/loader/url.go index c050edba..bc4d5c9f 100644 --- a/pkg/loader/url.go +++ b/pkg/loader/url.go @@ -14,7 +14,7 @@ import ( "github.com/gptscript-ai/gptscript/pkg/types" ) -type VCSLookup func(context.Context, *cache.Client, string) (string, *types.Repo, bool, error) +type VCSLookup func(context.Context, *cache.Client, string) (string, string, *types.Repo, bool, error) var vcsLookups []VCSLookup @@ -35,10 +35,11 @@ type cacheValue struct { func loadURL(ctx context.Context, cache *cache.Client, base *source, name string) (*source, bool, error) { var ( - repo *types.Repo - url = name - relative = strings.HasPrefix(name, ".") || !strings.Contains(name, "/") - cachedKey = cacheKey{ + repo *types.Repo + url = name + bearerToken = "" + relative = strings.HasPrefix(name, ".") || !strings.Contains(name, "/") + cachedKey = cacheKey{ Name: name, Path: base.Path, Repo: base.Repo, @@ -67,12 +68,13 @@ func loadURL(ctx context.Context, cache *cache.Client, base *source, name string if repo == nil || !relative { for _, vcs := range vcsLookups { - newURL, newRepo, ok, err := vcs(ctx, cache, name) + newURL, newBearer, newRepo, ok, err := vcs(ctx, cache, name) if err != nil { return nil, false, err } else if ok { repo = newRepo url = newURL + bearerToken = newBearer break } } @@ -105,6 +107,10 @@ func loadURL(ctx context.Context, cache *cache.Client, base *source, name string return nil, false, err } + if bearerToken != "" { + req.Header.Set("Authorization", "Bearer "+bearerToken) + } + data, err := getWithDefaults(req) if err != nil { return nil, false, fmt.Errorf("error loading %s: %v", url, err) diff --git a/pkg/repos/get.go b/pkg/repos/get.go index a0d7b0c3..be8d8594 100644 --- a/pkg/repos/get.go +++ b/pkg/repos/get.go @@ -128,7 +128,7 @@ func (m *Manager) deferredSetUpCredentialHelpers(ctx context.Context, cliCfg *co } // Load the credential helpers repo information. - _, repo, _, err := github.Load(ctx, nil, credentialHelpersRepo) + _, _, repo, _, err := github.Load(ctx, nil, credentialHelpersRepo) if err != nil { return err } diff --git a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json index 92fc786a..1e924928 100644 --- a/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json +++ b/pkg/tests/smoke/testdata/Bob/claude-3-opus-20240229-expected.json @@ -1,15 +1,15 @@ [ { - "time": "2024-06-20T17:10:23.193337-04:00", + "time": "2024-06-28T10:48:16.036954-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-20T17:10:23.19359-04:00", + "time": "2024-06-28T10:48:16.037294-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -34,55 +34,57 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callStart", "usage": {} }, { - "time": "2024-06-20T17:10:24.059514-04:00", + "time": "2024-06-28T10:48:17.325263-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-20T17:10:24.059807-04:00", + "time": "2024-06-28T10:48:17.325584-04:00", "callContext": { - "id": "1718917825", + "id": "1719586098", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "modelProvider": true, "internalPrompt": null, "credentials": [ - "github.com/gptscript-ai/claude3-anthropic-provider/credential" + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { - "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { - "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" }, + "currentAgent": {}, "inputContext": null, "toolCategory": "provider", "displayText": "Running sys.daemon" @@ -91,63 +93,64 @@ "usage": {} }, { - "time": "2024-06-20T17:10:25.074481-04:00", + "time": "2024-06-28T10:48:18.342475-04:00", "callContext": { - "id": "1718917825", + "id": "1719586098", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "modelProvider": true, "internalPrompt": null, "credentials": [ - "github.com/gptscript-ai/claude3-anthropic-provider/credential" + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { - "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { - "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" }, + "currentAgent": {}, "inputContext": null, "toolCategory": "provider", "displayText": "Running sys.daemon" }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:11060" + "content": "http://127.0.0.1:10961" }, { - "time": "2024-06-20T17:10:25.074606-04:00", + "time": "2024-06-28T10:48:18.342649-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-06-20T17:10:25.074685-04:00", + "time": "2024-06-28T10:48:18.342718-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -172,10 +175,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917826", + "chatCompletionId": "1719586099", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", @@ -207,11 +211,11 @@ } }, { - "time": "2024-06-20T17:10:25.075088-04:00", + "time": "2024-06-28T10:48:18.343115-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -236,19 +240,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917826", + "chatCompletionId": "1719586099", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:33.389627-04:00", + "time": "2024-06-28T10:48:21.291497-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -273,19 +278,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917826", + "chatCompletionId": "1719586099", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:33.389848-04:00", + "time": "2024-06-28T10:48:21.291864-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -310,10 +316,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917826", + "chatCompletionId": "1719586099", "usage": {}, "chatResponse": { "role": "assistant", @@ -321,7 +328,7 @@ { "toolCall": { "index": 0, - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -333,11 +340,11 @@ } }, { - "time": "2024-06-20T17:10:33.389967-04:00", + "time": "2024-06-28T10:48:21.291966-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -362,10 +369,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "toolSubCalls": { - "toolu_01EEcv7qDLHDmGAzm15vobxM": { + "bob": { "toolID": "testdata/Bob/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -374,13 +382,13 @@ "usage": {} }, { - "time": "2024-06-20T17:10:33.389997-04:00", + "time": "2024-06-28T10:48:21.291997-04:00", "callContext": { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -403,22 +411,23 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917824" + "parentID": "1719586097" }, "type": "callStart", "usage": {}, "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:33.584228-04:00", + "time": "2024-06-28T10:48:21.50264-04:00", "callContext": { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -441,12 +450,13 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917824" + "parentID": "1719586097" }, "type": "callChat", - "chatCompletionId": "1718917827", + "chatCompletionId": "1719586100", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", @@ -464,13 +474,13 @@ } }, { - "time": "2024-06-20T17:10:33.584507-04:00", + "time": "2024-06-28T10:48:21.503008-04:00", "callContext": { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -493,23 +503,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917824" + "parentID": "1719586097" }, "type": "callProgress", - "chatCompletionId": "1718917827", + "chatCompletionId": "1719586100", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:35.540664-04:00", + "time": "2024-06-28T10:48:22.875617-04:00", "callContext": { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -532,23 +543,24 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917824" + "parentID": "1719586097" }, "type": "callProgress", - "chatCompletionId": "1718917827", + "chatCompletionId": "1719586100", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:35.540967-04:00", + "time": "2024-06-28T10:48:22.875964-04:00", "callContext": { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -571,12 +583,13 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917824" + "parentID": "1719586097" }, "type": "callChat", - "chatCompletionId": "1718917827", + "chatCompletionId": "1719586100", "usage": {}, "chatResponse": { "role": "assistant", @@ -589,13 +602,13 @@ } }, { - "time": "2024-06-20T17:10:35.541005-04:00", + "time": "2024-06-28T10:48:22.876034-04:00", "callContext": { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -618,20 +631,21 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917824" + "parentID": "1719586097" }, "type": "callFinish", "usage": {}, "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:35.541033-04:00", + "time": "2024-06-28T10:48:22.876064-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -656,6 +670,7 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "toolResults": 1, @@ -663,11 +678,11 @@ "usage": {} }, { - "time": "2024-06-20T17:10:35.71784-04:00", + "time": "2024-06-28T10:48:23.086894-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -692,10 +707,11 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917828", + "chatCompletionId": "1719586101", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", @@ -709,7 +725,7 @@ "content": "", "tool_calls": [ { - "id": "toolu_01EEcv7qDLHDmGAzm15vobxM", + "id": "bob", "type": "function", "function": { "name": "bob", @@ -722,7 +738,7 @@ "role": "tool", "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!", "name": "bob", - "tool_call_id": "toolu_01EEcv7qDLHDmGAzm15vobxM" + "tool_call_id": "bob" } ], "temperature": 0, @@ -747,11 +763,11 @@ } }, { - "time": "2024-06-20T17:10:35.718216-04:00", + "time": "2024-06-28T10:48:23.087303-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -776,19 +792,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917828", + "chatCompletionId": "1719586101", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:39.50448-04:00", + "time": "2024-06-28T10:48:25.243408-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -813,19 +830,20 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917828", + "chatCompletionId": "1719586101", "usage": {}, - "content": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:39.504769-04:00", + "time": "2024-06-28T10:48:25.243765-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -850,27 +868,28 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917828", + "chatCompletionId": "1719586101", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" } ], "usage": {} } }, { - "time": "2024-06-20T17:10:39.504807-04:00", + "time": "2024-06-28T10:48:25.243793-04:00", "callContext": { - "id": "1718917824", + "id": "1719586097", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -895,14 +914,15 @@ }, "workingDir": "testdata/Bob" }, + "currentAgent": {}, "inputContext": null }, "type": "callFinish", "usage": {}, - "content": "Bob replied: \"Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking \"how are you doing\", I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:39.504821-04:00", + "time": "2024-06-28T10:48:25.243836-04:00", "type": "runFinish", "usage": {} } diff --git a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json index 7b0477fc..88077e74 100644 --- a/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json +++ b/pkg/tests/smoke/testdata/BobAsShell/claude-3-opus-20240229-expected.json @@ -1,15 +1,15 @@ [ { - "time": "2024-06-20T17:10:39.522006-04:00", + "time": "2024-06-28T10:48:25.264658-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-20T17:10:39.522174-04:00", + "time": "2024-06-28T10:48:25.264853-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -34,55 +34,57 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callStart", "usage": {} }, { - "time": "2024-06-20T17:10:39.975716-04:00", + "time": "2024-06-28T10:48:26.063945-04:00", "type": "runStart", "usage": {} }, { - "time": "2024-06-20T17:10:39.975965-04:00", + "time": "2024-06-28T10:48:26.064323-04:00", "callContext": { - "id": "1718917841", + "id": "1719586107", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "modelProvider": true, "internalPrompt": null, "credentials": [ - "github.com/gptscript-ai/claude3-anthropic-provider/credential" + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { - "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { - "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" }, + "currentAgent": {}, "inputContext": null, "toolCategory": "provider", "displayText": "Running sys.daemon" @@ -91,63 +93,64 @@ "usage": {} }, { - "time": "2024-06-20T17:10:40.990696-04:00", + "time": "2024-06-28T10:48:27.081163-04:00", "callContext": { - "id": "1718917841", + "id": "1719586107", "tool": { "name": "Anthropic Claude3 Model Provider", "description": "Model provider for Anthropic hosted Claude3 models", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "modelProvider": true, "internalPrompt": null, "credentials": [ - "github.com/gptscript-ai/claude3-anthropic-provider/credential" + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env" ], "instructions": "#!sys.daemon /usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/main.py", - "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider", + "id": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider", "toolMapping": { - "github.com/gptscript-ai/claude3-anthropic-provider/credential": [ + "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env": [ { - "reference": "github.com/gptscript-ai/claude3-anthropic-provider/credential", - "toolID": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/3b9b9365d469c4c702291f1764537ab5c226c2e0/credential/tool.gpt:claude3-anthropic-credential" + "reference": "github.com/gptscript-ai/credential as github.com/gptscript-ai/claude3-anthropic-provider/credential with \"Please enter your Anthropic API Key\" as message and token as field and \"ANTHROPIC_API_KEY\" as env", + "toolID": "https://raw.githubusercontent.com/gptscript-ai/credential/651dfad6e7cf3a385ef408afa93ce522c10f8508/tool.gpt:token" } ] }, "localTools": { - "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt:Anthropic Claude3 Model Provider" + "anthropic claude3 model provider": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt:Anthropic Claude3 Model Provider" }, "source": { - "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad/tool.gpt", + "location": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548/tool.gpt", "lineNo": 1, "repo": { "VCS": "git", "Root": "https://github.com/gptscript-ai/claude3-anthropic-provider.git", "Path": "/", "Name": "tool.gpt", - "Revision": "d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "Revision": "dfd21adc6be9fbda34b79a71e661ac0cfb725548" } }, - "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/d47b232371d18e0254cf7cc5e15a813e7e24a8ad" + "workingDir": "https://raw.githubusercontent.com/gptscript-ai/claude3-anthropic-provider/dfd21adc6be9fbda34b79a71e661ac0cfb725548" }, + "currentAgent": {}, "inputContext": null, "toolCategory": "provider", "displayText": "Running sys.daemon" }, "type": "callFinish", "usage": {}, - "content": "http://127.0.0.1:11124" + "content": "http://127.0.0.1:10315" }, { - "time": "2024-06-20T17:10:40.990787-04:00", + "time": "2024-06-28T10:48:27.081351-04:00", "type": "runFinish", "usage": {} }, { - "time": "2024-06-20T17:10:40.990853-04:00", + "time": "2024-06-28T10:48:27.081414-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -172,10 +175,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917842", + "chatCompletionId": "1719586108", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", @@ -207,11 +211,11 @@ } }, { - "time": "2024-06-20T17:10:40.991247-04:00", + "time": "2024-06-28T10:48:27.081844-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -236,19 +240,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1719586108", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:49.116225-04:00", + "time": "2024-06-28T10:48:30.452648-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -273,19 +278,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917842", + "chatCompletionId": "1719586108", "usage": {}, "content": "\u003ctool call\u003e bob -\u003e {\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:49.116466-04:00", + "time": "2024-06-28T10:48:30.452871-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -310,10 +316,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917842", + "chatCompletionId": "1719586108", "usage": {}, "chatResponse": { "role": "assistant", @@ -321,7 +328,7 @@ { "toolCall": { "index": 0, - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "function": { "name": "bob", "arguments": "{\"question\": \"how are you doing\"}" @@ -333,11 +340,11 @@ } }, { - "time": "2024-06-20T17:10:49.116557-04:00", + "time": "2024-06-28T10:48:30.452992-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -362,10 +369,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "toolSubCalls": { - "toolu_01BzEjSj1HhTi52c1N9mS1jK": { + "bob": { "toolID": "testdata/BobAsShell/test.gpt:bob", "input": "{\"question\": \"how are you doing\"}" } @@ -374,13 +382,13 @@ "usage": {} }, { - "time": "2024-06-20T17:10:49.116583-04:00", + "time": "2024-06-28T10:48:30.453034-04:00", "callContext": { - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -403,9 +411,10 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840", + "parentID": "1719586106", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callStart", @@ -413,13 +422,13 @@ "content": "{\"question\": \"how are you doing\"}" }, { - "time": "2024-06-20T17:10:49.116924-04:00", + "time": "2024-06-28T10:48:30.453498-04:00", "callContext": { - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -442,13 +451,14 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840", + "parentID": "1719586106", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1718917843", + "chatCompletionId": "1719586109", "usage": {}, "chatRequest": { "model": "", @@ -456,13 +466,13 @@ } }, { - "time": "2024-06-20T17:10:49.119266-04:00", + "time": "2024-06-28T10:48:30.455907-04:00", "callContext": { - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -485,24 +495,25 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840", + "parentID": "1719586106", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callProgress", - "chatCompletionId": "1718917843", + "chatCompletionId": "1719586109", "usage": {}, "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-20T17:10:49.119489-04:00", + "time": "2024-06-28T10:48:30.45611-04:00", "callContext": { - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -525,26 +536,27 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840", + "parentID": "1719586106", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callChat", - "chatCompletionId": "1718917843", + "chatCompletionId": "1719586109", "usage": {}, "chatResponse": { "usage": {} } }, { - "time": "2024-06-20T17:10:49.119539-04:00", + "time": "2024-06-28T10:48:30.456161-04:00", "callContext": { - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "tool": { "name": "bob", "description": "I'm Bob, a friendly guy.", - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "arguments": { "properties": { @@ -567,9 +579,10 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null, "toolName": "bob", - "parentID": "1718917840", + "parentID": "1719586106", "displayText": "Running bob from testdata/BobAsShell/test.gpt" }, "type": "callFinish", @@ -577,11 +590,11 @@ "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n" }, { - "time": "2024-06-20T17:10:49.119572-04:00", + "time": "2024-06-28T10:48:30.456194-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -606,6 +619,7 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "toolResults": 1, @@ -613,11 +627,11 @@ "usage": {} }, { - "time": "2024-06-20T17:10:49.298305-04:00", + "time": "2024-06-28T10:48:30.652688-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -642,10 +656,11 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917844", + "chatCompletionId": "1719586110", "usage": {}, "chatRequest": { "model": "claude-3-opus-20240229", @@ -659,7 +674,7 @@ "content": "", "tool_calls": [ { - "id": "toolu_01BzEjSj1HhTi52c1N9mS1jK", + "id": "bob", "type": "function", "function": { "name": "bob", @@ -672,7 +687,7 @@ "role": "tool", "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\n", "name": "bob", - "tool_call_id": "toolu_01BzEjSj1HhTi52c1N9mS1jK" + "tool_call_id": "bob" } ], "temperature": 0, @@ -697,11 +712,11 @@ } }, { - "time": "2024-06-20T17:10:49.298759-04:00", + "time": "2024-06-28T10:48:30.652933-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -726,19 +741,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917844", + "chatCompletionId": "1719586110", "usage": {}, "content": "Waiting for model response..." }, { - "time": "2024-06-20T17:10:51.580939-04:00", + "time": "2024-06-28T10:48:33.127339-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -763,19 +779,20 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callProgress", - "chatCompletionId": "1718917844", + "chatCompletionId": "1719586110", "usage": {}, - "content": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:51.581258-04:00", + "time": "2024-06-28T10:48:33.127696-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -800,27 +817,28 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callChat", - "chatCompletionId": "1718917844", + "chatCompletionId": "1719586110", "usage": {}, "chatResponse": { "role": "assistant", "content": [ { - "text": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "text": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" } ], "usage": {} } }, { - "time": "2024-06-20T17:10:51.581281-04:00", + "time": "2024-06-28T10:48:33.127717-04:00", "callContext": { - "id": "1718917840", + "id": "1719586106", "tool": { - "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider@tool-beta", + "modelName": "claude-3-opus-20240229 from github.com/gptscript-ai/claude3-anthropic-provider", "internalPrompt": null, "tools": [ "bob" @@ -845,14 +863,15 @@ }, "workingDir": "testdata/BobAsShell" }, + "currentAgent": {}, "inputContext": null }, "type": "callFinish", "usage": {}, - "content": "Bob replied: \"Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!\"" + "content": "Thanks for asking how are you doing, I'm doing great fellow friendly AI tool!" }, { - "time": "2024-06-20T17:10:51.581291-04:00", + "time": "2024-06-28T10:48:33.127758-04:00", "type": "runFinish", "usage": {} } From 361ca9f5bccfea53d5219081ef5062bfca519341 Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 1 Jul 2024 12:46:18 -0400 Subject: [PATCH 22/24] fix: resolve ~ in workspace path (#594) Signed-off-by: Grant Linville --- pkg/gptscript/gptscript.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 7a11e7dd..d25915b1 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -4,8 +4,10 @@ import ( "context" "fmt" "os" + "os/user" "path/filepath" "slices" + "strings" "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -167,7 +169,7 @@ func (g *GPTScript) getEnv(env []string) ([]string, error) { } } else if !filepath.IsAbs(g.WorkspacePath) { var err error - g.WorkspacePath, err = filepath.Abs(g.WorkspacePath) + g.WorkspacePath, err = makeAbsolute(g.WorkspacePath) if err != nil { return nil, err } @@ -181,6 +183,18 @@ func (g *GPTScript) getEnv(env []string) ([]string, error) { }), nil } +func makeAbsolute(path string) (string, error) { + if strings.HasPrefix(path, "~"+string(filepath.Separator)) { + usr, err := user.Current() + if err != nil { + return "", err + } + + return filepath.Join(usr.HomeDir, path[2:]), nil + } + return filepath.Abs(path) +} + func (g *GPTScript) Chat(ctx context.Context, prevState runner.ChatState, prg types.Program, envs []string, input string) (runner.ChatResponse, error) { envs, err := g.getEnv(envs) if err != nil { From cb7fa1dedc9e38fdafaf32c24b80ebfc376eb60f Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 1 Jul 2024 15:12:35 -0400 Subject: [PATCH 23/24] enhance: credential overrides: support multiple arguments (#593) Signed-off-by: Grant Linville Co-authored-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- docs/docs/03-tools/04-credential-tools.md | 6 +-- .../04-command-line-reference/gptscript.md | 2 +- .../gptscript_eval.md | 42 +++++++++---------- .../gptscript_fmt.md | 42 +++++++++---------- .../gptscript_parse.md | 42 +++++++++---------- pkg/cli/gptscript.go | 40 +++++++++--------- pkg/runner/credentials.go | 14 +++---- pkg/runner/credentials_test.go | 21 ++++++---- pkg/runner/runner.go | 24 ++++++----- pkg/sdkserver/routes.go | 4 +- pkg/sdkserver/types.go | 18 ++++---- 11 files changed, 128 insertions(+), 127 deletions(-) diff --git a/docs/docs/03-tools/04-credential-tools.md b/docs/docs/03-tools/04-credential-tools.md index 981a708c..9aaa7601 100644 --- a/docs/docs/03-tools/04-credential-tools.md +++ b/docs/docs/03-tools/04-credential-tools.md @@ -153,6 +153,8 @@ You can bypass credential tools and stored credentials by setting the `--credent need to be aware of which environment variables the credential tool sets. You can find this out by running the `gptscript credential --show-env-vars` command. +To override multiple credentials, specify the `--credential-override` argument multiple times. + ### Format :::info @@ -171,10 +173,6 @@ The `--credential-override` argument must be formatted in one of the following t In this example, both `toolA` provides the variables `ENV_VAR_1` and `ENV_VAR_2`. This will set the environment variables `ENV_VAR_1` and `ENV_VAR_2` to the specific values `value1` and `value2`. -:::info -To override more than one credential, use `;` as a separator. For example, `toolA:ENV_VAR_1=value1;toolB:ENV_VAR_2=value2`. -::: - #### 2. Environment Variables `toolA:ENV_VAR_1,ENV_VAR_2` diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 4da342fb..0c485603 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -19,7 +19,7 @@ gptscript [flags] PROGRAM_FILE [INPUT...] --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override string Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) --debug Enable debug logging ($GPTSCRIPT_DEBUG) --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 4c485c7c..0fdd0249 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -25,27 +25,27 @@ gptscript eval [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override string Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index 511132d9..5780c838 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -19,27 +19,27 @@ gptscript fmt [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override string Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index 3dde0073..680aebf6 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -19,27 +19,27 @@ gptscript parse [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override string Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 3ba5e8c7..735fd8d3 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -53,24 +53,24 @@ type GPTScript struct { Output string `usage:"Save output to a file, or - for stdout" short:"o"` EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` - SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` - Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` - ListModels bool `usage:"List the models available and exit" local:"true"` - ListTools bool `usage:"List built-in tools and exit" local:"true"` - ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` - Chdir string `usage:"Change current working directory" short:"C"` - Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` - Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` - CredentialContext string `usage:"Context name in which to store credentials" default:"default"` - CredentialOverride string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` - ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` - ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` - ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` - Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` - UI bool `usage:"Launch the UI" local:"true" name:"ui"` - DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` - SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` + SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` + Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` + ListModels bool `usage:"List the models available and exit" local:"true"` + ListTools bool `usage:"List built-in tools and exit" local:"true"` + ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` + Chdir string `usage:"Change current working directory" short:"C"` + Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` + Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` + CredentialContext string `usage:"Context name in which to store credentials" default:"default"` + CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` + ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` + ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` + ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` + Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` + UI bool `usage:"Launch the UI" local:"true" name:"ui"` + DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` + SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` readData []byte } @@ -130,8 +130,8 @@ func (r *GPTScript) NewGPTScriptOpts() (gptscript.Options, error) { OpenAI: openai.Options(r.OpenAIOptions), Monitor: monitor.Options(r.DisplayOptions), Runner: runner.Options{ - CredentialOverride: r.CredentialOverride, - Sequential: r.ForceSequential, + CredentialOverrides: r.CredentialOverride, + Sequential: r.ForceSequential, }, Quiet: r.Quiet, Env: os.Environ(), diff --git a/pkg/runner/credentials.go b/pkg/runner/credentials.go index f1c5d0dd..d2fbb00e 100644 --- a/pkg/runner/credentials.go +++ b/pkg/runner/credentials.go @@ -8,8 +8,8 @@ import ( // parseCredentialOverrides parses a string of credential overrides that the user provided as a command line arg. // The format of credential overrides can be one of two things: -// cred1:ENV1,ENV2;cred2:ENV1,ENV2 (direct mapping of environment variables) -// cred1:ENV1=VALUE1,ENV2=VALUE2;cred2:ENV1=VALUE1,ENV2=VALUE2 (key-value pairs) +// cred1:ENV1,ENV2 (direct mapping of environment variables) +// cred1:ENV1=VALUE1,ENV2=VALUE2 (key-value pairs) // // This function turns it into a map[string]map[string]string like this: // @@ -17,16 +17,12 @@ import ( // "cred1": { // "ENV1": "VALUE1", // "ENV2": "VALUE2", -// }, -// "cred2": { -// "ENV1": "VALUE1", -// "ENV2": "VALUE2", -// }, +// } // } -func parseCredentialOverrides(override string) (map[string]map[string]string, error) { +func parseCredentialOverrides(overrides []string) (map[string]map[string]string, error) { credentialOverrides := make(map[string]map[string]string) - for _, o := range strings.Split(override, ";") { + for _, o := range overrides { credName, envs, found := strings.Cut(o, ":") if !found { return nil, fmt.Errorf("invalid credential override: %s", o) diff --git a/pkg/runner/credentials_test.go b/pkg/runner/credentials_test.go index 548bc3b5..c568d6be 100644 --- a/pkg/runner/credentials_test.go +++ b/pkg/runner/credentials_test.go @@ -11,13 +11,18 @@ func TestParseCredentialOverrides(t *testing.T) { cases := []struct { name string envs map[string]string - in string + in []string out map[string]map[string]string expectErr bool }{ + { + name: "nil", + in: nil, + out: map[string]map[string]string{}, + }, { name: "empty", - in: "", + in: []string{""}, expectErr: true, }, { @@ -25,7 +30,7 @@ func TestParseCredentialOverrides(t *testing.T) { envs: map[string]string{ "ENV1": "VALUE1", }, - in: "cred1:ENV1", + in: []string{"cred1:ENV1"}, out: map[string]map[string]string{ "cred1": { "ENV1": "VALUE1", @@ -38,7 +43,7 @@ func TestParseCredentialOverrides(t *testing.T) { "ENV1": "VALUE1", "ENV2": "VALUE2", }, - in: "cred1:ENV1,ENV2", + in: []string{"cred1:ENV1,ENV2"}, out: map[string]map[string]string{ "cred1": { "ENV1": "VALUE1", @@ -52,7 +57,7 @@ func TestParseCredentialOverrides(t *testing.T) { "ENV1": "VALUE1", "ENV2": "VALUE2", }, - in: "cred1:ENV1=OTHERVALUE1,ENV2=OTHERVALUE2", + in: []string{"cred1:ENV1=OTHERVALUE1,ENV2=OTHERVALUE2"}, out: map[string]map[string]string{ "cred1": { "ENV1": "OTHERVALUE1", @@ -66,7 +71,7 @@ func TestParseCredentialOverrides(t *testing.T) { "ENV1": "VALUE1", "ENV2": "VALUE2", }, - in: "cred1:ENV1,ENV2;cred2:ENV1,ENV2", + in: []string{"cred1:ENV1,ENV2", "cred2:ENV1,ENV2"}, out: map[string]map[string]string{ "cred1": { "ENV1": "VALUE1", @@ -84,7 +89,7 @@ func TestParseCredentialOverrides(t *testing.T) { "ENV1": "VALUE1", "ENV2": "VALUE2", }, - in: "cred1:ENV1=OTHERVALUE1,ENV2=OTHERVALUE2;cred2:ENV1=OTHERVALUE3,ENV2=OTHERVALUE4", + in: []string{"cred1:ENV1=OTHERVALUE1,ENV2=OTHERVALUE2", "cred2:ENV1=OTHERVALUE3,ENV2=OTHERVALUE4"}, out: map[string]map[string]string{ "cred1": { "ENV1": "OTHERVALUE1", @@ -98,7 +103,7 @@ func TestParseCredentialOverrides(t *testing.T) { }, { name: "invalid format", - in: "cred1=ENV1,ENV2", + in: []string{"cred1=ENV1,ENV2"}, expectErr: true, }, } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 41e4f058..cc5a3927 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -30,13 +30,13 @@ type Monitor interface { } type Options struct { - MonitorFactory MonitorFactory `usage:"-"` - RuntimeManager engine.RuntimeManager `usage:"-"` - StartPort int64 `usage:"-"` - EndPort int64 `usage:"-"` - CredentialOverride string `usage:"-"` - Sequential bool `usage:"-"` - Authorizer AuthorizerFunc `usage:"-"` + MonitorFactory MonitorFactory `usage:"-"` + RuntimeManager engine.RuntimeManager `usage:"-"` + StartPort int64 `usage:"-"` + EndPort int64 `usage:"-"` + CredentialOverrides []string `usage:"-"` + Sequential bool `usage:"-"` + Authorizer AuthorizerFunc `usage:"-"` } type AuthorizerResponse struct { @@ -58,11 +58,13 @@ func Complete(opts ...Options) (result Options) { result.RuntimeManager = types.FirstSet(opt.RuntimeManager, result.RuntimeManager) result.StartPort = types.FirstSet(opt.StartPort, result.StartPort) result.EndPort = types.FirstSet(opt.EndPort, result.EndPort) - result.CredentialOverride = types.FirstSet(opt.CredentialOverride, result.CredentialOverride) result.Sequential = types.FirstSet(opt.Sequential, result.Sequential) if opt.Authorizer != nil { result.Authorizer = opt.Authorizer } + if opt.CredentialOverrides != nil { + result.CredentialOverrides = append(result.CredentialOverrides, opt.CredentialOverrides...) + } } return } @@ -90,7 +92,7 @@ type Runner struct { factory MonitorFactory runtimeManager engine.RuntimeManager credMutex sync.Mutex - credOverrides string + credOverrides []string credStore credentials.CredentialStore sequential bool } @@ -103,7 +105,7 @@ func New(client engine.Model, credStore credentials.CredentialStore, opts ...Opt factory: opt.MonitorFactory, runtimeManager: opt.RuntimeManager, credMutex: sync.Mutex{}, - credOverrides: opt.CredentialOverride, + credOverrides: opt.CredentialOverrides, credStore: credStore, sequential: opt.Sequential, auth: opt.Authorizer, @@ -836,7 +838,7 @@ func (r *Runner) handleCredentials(callCtx engine.Context, monitor Monitor, env credOverrides map[string]map[string]string err error ) - if r.credOverrides != "" { + if r.credOverrides != nil { credOverrides, err = parseCredentialOverrides(r.credOverrides) if err != nil { return nil, fmt.Errorf("failed to parse credential overrides: %w", err) diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index 176552b1..c16b4429 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -200,8 +200,8 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { CredentialContext: reqObject.CredentialContext, Runner: runner.Options{ // Set the monitor factory so that we can get events from the server. - MonitorFactory: NewSessionFactory(s.events), - CredentialOverride: reqObject.CredentialOverride, + MonitorFactory: NewSessionFactory(s.events), + CredentialOverrides: reqObject.CredentialOverrides, }, } diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index e6325956..6f940c8b 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -52,15 +52,15 @@ type toolOrFileRequest struct { cacheOptions `json:",inline"` openAIOptions `json:",inline"` - ToolDefs toolDefs `json:"toolDefs,inline"` - SubTool string `json:"subTool"` - Input string `json:"input"` - ChatState string `json:"chatState"` - Workspace string `json:"workspace"` - Env []string `json:"env"` - CredentialContext string `json:"credentialContext"` - CredentialOverride string `json:"credentialOverride"` - Confirm bool `json:"confirm"` + ToolDefs toolDefs `json:"toolDefs,inline"` + SubTool string `json:"subTool"` + Input string `json:"input"` + ChatState string `json:"chatState"` + Workspace string `json:"workspace"` + Env []string `json:"env"` + CredentialContext string `json:"credentialContext"` + CredentialOverrides []string `json:"credentialOverrides"` + Confirm bool `json:"confirm"` } type content struct { From 37be71d160270f765f92e777b95684ba3660868e Mon Sep 17 00:00:00 2001 From: Grant Linville Date: Mon, 1 Jul 2024 16:30:06 -0400 Subject: [PATCH 24/24] fix: copy flag value when overriding flags to hide them (#596) Signed-off-by: Grant Linville --- pkg/cli/gptscript.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index 735fd8d3..19a6b2f2 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -95,6 +95,7 @@ func New() *cobra.Command { newFlag := pflag.Flag{ Name: f.Name, Usage: f.Usage, + Value: f.Value, } if f.Name != "credential-context" { // We want to keep credential-context