diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b34c2fad..4c1c22bf 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -40,16 +40,6 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAP_GITHUB_TOKEN: ${{ secrets.TAP_GITHUB_TOKEN }} GORELEASER_CURRENT_TAG: ${{ github.ref_name }} - homebrew-release: - needs: release-tag - if: "! contains(github.ref_name, '-rc')" - runs-on: ubuntu-latest - steps: - - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v3 - with: - token: ${{secrets.BREW_GH_TOKEN}} - formula: gptscript winget-release: needs: release-tag if: "! contains(github.ref_name, '-rc')" diff --git a/docs/docs/02-examples/01-cli.md b/docs/docs/02-examples/01-cli.md index 7a59f592..4c8f3bab 100644 --- a/docs/docs/02-examples/01-cli.md +++ b/docs/docs/02-examples/01-cli.md @@ -1,110 +1,59 @@ # Chat with a Local CLI -GPTScript makes it easy to write AI integrations with CLIs and other executable available on your local workstation. This is powerful because it allows you to work AI to solve complex problems using your available CLIs. You can describe complex requests in plain English and GPTScript will figure out the best CLI commands to make that happen. This guide will show you how to build a GPTScript that integrates with two CLIs: - -- [gh](https://cli.github.com/) - the GitHub CLI -- [kubectl](https://kubernetes.io/docs/reference/kubectl/) - the Kubernetes CLI +GPTScript makes it easy to write AI integrations with CLIs and other executables available on your local workstation. +You can describe complex requests in plain English and GPTScript will figure out the best CLI commands to make that happen. +This guide will show you how to build a GPTScript that integrates with the `gh` CLI for GitHub. :::warning -This script **does not install** or configure gh or kubectl. We assume you've done that already. - -- For gh, you must be logged in via `gh auth login`. [See here for more details](https://docs.github.com/en/github-cli/github-cli/quickstart) -- For kubectl, you must have a proper `kubeconfig`. [See here for more details](https://kubernetes.io/docs/tasks/tools/) - +This script **does not install** or configure `gh`. We assume you've done that already. +You must be logged in via `gh auth login`. [See here for more details](https://docs.github.com/en/github-cli/github-cli/quickstart) ::: -## Too Long; Didn't Read - -Want to start using this script now? Just run: - -``` -gptscript github.com/gptscript-ai/cli-demo -``` - -Or if you want to skip ahead and just grab the full script so that you can start hacking on it, jump to the [Putting it all together section](cli#putting-it-all-together). +You should have basic familiarity with [tools](../03-tools/01-using.md) before starting this guide. ## Getting Started -The rest of this guide will walk you through building a script that can serve as an assistant for GitHub and Kubernetes tasks. We'll be explaining the how, what, and why along the way. - -First, open up a new gptscript file in your favorite editor. We'll call the file cli-demo.gpt +First, open up a new file in your favorite editor. We'll call the file `cli-demo.gpt`. ``` vim cli-demo.gpt ``` -All edits below are assumed to be in this file. At the end, we'll share the entire script as one cohesive file, but along the way we'll just be adding tools one-by-one. - -## Create the Kubernetes Agent - -Let's start by adding the Kubernetes agent. In our script, add the following: - -``` ---- -Name: k8s-agent -Description: An agent that can help you with your Kubernetes cluster by executing kubectl commands -Context: shared-context -Tools: sys.exec -Parameter: task: The kubectl related task to accomplish -Chat: true - -You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. - -``` - -Now, let's walk through this tool line-by-line. - -**---** is a block separator. It's how we delineate tools in a script. - -**Name and Description** help the LLM understand the purpose of this tool. You should always have meaningful names and descriptions. - -**Tools: sys.exec** makes the built-in `sys.exec` tool available to this agent. This gives the agent the ability to execute arbitrary commands. Based on our prompt, it will be used for kubectl commands. GPTScript's authorization system will prompt for approval whenever it's going to run a `sys.exec` command. - -**Parameter: task:** defines a parameter named "task" for this tool. This will be important later on when other tools need to hand-off to this tool - they'll pass the task to it as this parameter. As with the name and description fields, it's important to provide a good description so that the LLM knows how to use this parameter. - -**Chat: true** turns this tool into a "chat-able" tool, which we also call an "agent". This is important for open-ended tasks that might take some iteration. - -Finally, we have the **tool body**, which in this case is a prompt: - -``` -You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. -``` - -This is what the tool will actually do. Tool bodies can be prompts or raw code like python, javascript, or the [world's best programming language](https://x.com/ibuildthecloud/status/1796227491943637125) - bash. For chat-able tools, your tool body should always be a prompt. - -That's all there is to the Kubernetes agent. You can try it out now. One nice thing about GPTScript is that tools are composable. So, you can get this tool working well and then move onto the next tool without affecting this one. To launch this tool, run: - -``` -gptscript --sub-tool k8s-agent cli-demo.gpt -``` - -Once you're chatting, try asking it do something like list all the pods in your cluster or even to launch an new deployment in the cluster. +All edits below are assumed to be in this file. -## Create the GitHub Agent +## Create the entrypoint tool -Now let's add the GitHub Agent. Drop the following into the file below the tool we just added. +Let's start by adding the main tool to the file: ``` ---- -Name: github-agent -Description: An agent to help you with GitHub related tasks using the gh cli Context: learn-gh -Tools: sys.exec -Parameter: task: The GitHub task to accomplish +Context: github.com/gptscript-ai/context/cli Chat: true You have the gh cli available to you. Use it to accomplish the tasks that the user asks of you. ``` -This tool is very similar to the Kubernetes agent. There are just a few key differences: +Let's walk through this tool line by line. + +Each `Context` line references a context tool that will be run before the tool itself runs. +Context tools provide helpful output for the LLM to understand its capabilities and what it is supposed to do. +The first, `learn-gh`, we will define later in this file. +The second, `github.com/gptscript-ai/context/cli`, provides information to the LLM about the operating system that GPTScript is running on, +and gives it access to the `sys.exec` built-in tool, which is used to run commands. -1. Names and descriptions have been changed to reference GitHub and gh as appropriate. -2. We've introduced the `learn-gh` context. We'll explore this next. +`Chat: true` turns this tool into a "chat-able" tool, which we also call an "agent". +This causes the tool to run as an interactive chatbot, asking for user input and providing output. +If `Chat` is set to `false` (or not specified at all), the tool will run once without user interaction and exit. +This is useful for automated tasks, but right now we are working on an agent, so we set it to `true`. + +Lastly, there is the **tool body**, which in this case is a simple prompt, letting the LLM know that it should use the `gh` command and follow the user's instructions. +The tool body specifies what the tool should actually do. It can be a prompt or raw code like Python, JavaScript, or bash. +For chat-able tools, the tool body must be a prompt. ### The learn-gh context tool -Add this for the learn-gh context tool: +Next, add this to the file for the `learn-gh` context tool: ``` --- @@ -112,7 +61,7 @@ Name: learn-gh #!/usr/bin/env bash -echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." +echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicated --sort flag." gh --help gh repo --help gh issue --help @@ -128,159 +77,34 @@ gh release --help gh release create --help ``` -As we saw, this tool is used as the context for the github-agent. Why did we add this and what does it do? +The `---` at the top of this tool is a block separator. It's how we delineate tools within a script file. -To answer that, let's first understand what the Context stanza does. Any tools referenced in the Context stanza will be called and their output will be added to the chat context. As the name suggests, this gives the LLM additional context for subsequent messages. Sometimes, an LLM needs extra instructions or context in order to achieve the desired results. There's no hard or fast rule here for when you should include context; it's best discovered through trial-and-error. +This tool has a `Name` field. We named this tool `learn-gh` so that it matches the `Context: learn-gh` line from the entrypoint tool. - We didn't need extra context for the Kubernetes tool because we found our default LLM knows kubectl (and Kubernetes) quite well. However, our same testing showed that our default LLM doesn't know the gh cli as well. Specifically, the LLM would sometimes hallucinate invalid combinations of flags and parameters. Without this context, the LLM often takes several tries to get the gh command correct. +The body of this tool is a bash script, rather than a prompt. +This context tool will be run by GPTScript automatically at the start of execution, and its output will be provided to the LLM. +We're running a bunch of `--help` commands in the `gh` CLI so that the LLM can understand how to use it. +GPTScript knows that this tool body is a script rather than a prompt because it begins with `#!`. -:::tip -Did you catch that "takes several tries to get the command correct" part? One useful feature of GPTScript is that it will feed error messages back to the LLM, which allows the LLM to learn from its mistake and try again. -::: +## Running the tool -And that's the GitHub Agent. You can try it out now: +Now try running the tool: ``` -gptscript --sub-tool github-agent cli-demo.gpt +gptscript cli-demo.gpt ``` -Once you're chatting, try asking it do something like "Open an issue in gptscript-ai/gptscript with a title and body that says Hi from me and states how wonderful gptscript is but jazz it up and make it unique" - -## Your CLI Assistant - -Right now if you were to launch this script, you'd be dropped right into the Kubernetes agent. Let's create a new entrypoint whose job it is to handle your initial conversation and route to the appropriate agent. Add this to the **TOP** of your file: - -``` -Name: Your CLI Assistant -Description: An assistant to help you with local cli-based tasks for GitHub and Kubernetes -Agents: k8s-agent, github-agent -Context: shared-context -Chat: true - -Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You donlt need to start off by guiding them. -``` - -By being at the top of the file, this tool will serve as the script's entrypoint. Here are the parts of this tool that are worth additional explanation: - -**Agents: k8s-agent, github-agent** puts these two agents into a group that can hand-off to each other. So, you can ask a GitHub question, then a Kubernetes question, and then a GitHub question again and the chat conversation will get transferred to the proper agent each time. - -Next is **Context: shared-context**. You're already familiar with contexts, but in the next section we'll explain what's unique about this one. - -### The shared-context tool - -Drop the shared-context tool in at the very bottom of the page: - -``` ---- -Name: shared-context -Share Context: github.com/gptscript-ai/context/history - -#!sys.echo -Always delegate to the best tool for the users request. -Ask the user for information needed to complete a task. -Provide the user with the exact action you will be taking and get the users confirmation when creating or updating resources. -ALWAYS ask the user to confirm deletions, provide as much detail about the action as possible. -``` - -and do one more thing: add it as a context tool to both the k8s-agent and github-agent. For k8s-agent, that means adding this line: `Context: shared-context` and for github-agent, it means modifying the existing Context line to: `Context: learn-gh, shared-context`. - -**Share Context: github.com/gptscript-ai/context/history** - In this line, "Share Context" means that the specified tool(s) will be part of the context for any tools that references this tool in their Context stanza. It's a way to compose and aggregate contexts. - - The specific tool referenced here - github.com/gptscript-ai/context/history - makes it so that when you transition from one agent to the next, your chat history is carried across. Using this file as an example, this would allow you to have a history of all the Kubernetes information you gathered available when talking to the GitHub tool. - -The **#!sys.echo** body is a simple way to directly output whatever text follows it. This is useful if you just have a static set of instructions you need to inject into the context. The actual text should make sense if you read it. We're telling the agents how we want them to behave and interact. - -## Putting it all together - -Let's take a look at this script as one cohesive file: - -``` -Name: Your CLI Assistant -Description: An assistant to help you with local cli-based dev tasks -Context: shared-context -Agents: k8s-agent, github-agent -Chat: true - -Help the user acomplish their tasks using the tools you have. When the user starts this chat, just say hello and ask what you can help with. You donlt need to start off by guiding them. - ---- -Name: k8s-agent -Description: An agent that can help you with your Kubernetes cluster by executing kubectl commands -Context: shared-context -Tools: sys.exec -Parameter: task: The kubectl releated task to accomplish -Chat: true - -You have the kubectl cli available to you. Use it to accomplish the tasks that the user asks of you. - ---- -Name: github-agent -Description: An agent to help you with GitHub related tasks using the gh cli -Context: learn-gh, shared-context -Tools: sys.exec -Parameter: task: The GitHub task to accomplish -Chat: true - -You have the gh cli available to you. Use it to accomplish the tasks that the user asks of you. - ---- -Name: learn-gh - -#!/usr/bin/env bash - -echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." -gh --help -gh repo --help -gh issue --help -gh issue list --help -gh issue create --help -gh issue comment --help -gh issue delete --help -gh issue edit --help -gh pr --help -gh pr create --help -gh pr checkout --help -gh release --help -gh release create --help - - ---- -Name: shared-context -Share Context: github.com/gptscript-ai/context/history - -#!sys.echo -Always delegate to the best tool for the users request. -Ask the user for information needed to complete a task. -Provide the user with the exact action you will be taking and get the users confirmation when creating or updating resources. -ALWAYS ask the user to confirm deletions, provide as much detail about the action as possible. -``` - -There isn't anything new to cover in this file, we just wanted you to get a holistic view of it. This script is now fully functional. You can launch it via: - -``` -gpscript cli-demo.gpt -``` - -### Adding your own CLI - -By now you should notice a simple pattern emerging that you can follow to add your own CLI-powered agents to a script. Here are the basics of what you need: - -``` -Name: {your cli}-agent -Description: An agent to help you with {your taks} related tasks using the gh cli -Context: {here's your biggest decsion to make}, shared-context -Tools: sys.exec -Parameter: task: The {your task}The GitHub task to accomplish -Chat: true - -You have the {your cli} cli available to you. Use it to accomplish the tasks that the user asks of you. -``` +Once you're chatting, try asking it do something like "Open an issue in gptscript-ai/gptscript with a title and body that says Hi from me and states how wonderful gptscript is but jazz it up and make it unique". +GPTScript will ask for confirmation before it runs each command, so you can make sure that it only runs the commands you want it to. -You can drop in your task and CLI and have a fairly functional CLI-based chat agent. The biggest decision you'll need to make is what and how much context to give your agent. For well-known for CLIs/technologies like kubectl and Kubernetes, you probably won't need a custom context. For custom CLIs, you'll definitely need to help the LLM out. The best approach is to experiment and see what works best. +## A note on context tools -## Next steps +Context tools are a powerful way to provide additional information to the LLM, but they are not always necessary. +If you are working with a system that the LLM already understands well, you will probably not need to provide additional context. +When writing your own tools, it may take some trial and error to determine whether a context tool is needed. +If the LLM frequently hallucinates subcommands or arguments, it is probably worth adding a context tool to provide more information about the CLI. -Hopefully you've found this guide helpful. From here, you have several options: +## Next Steps -- You can checkout out some of our other guides available in this section of the docs -- You can dive deeper into the options available when [writing script](/tools/gpt-file-reference) +- You can check out some of our other guides available in this section of the docs +- You can dive deeper into the options available when [writing scripts](/tools/gpt-file-reference) diff --git a/docs/docs/02-examples/02-api.md b/docs/docs/02-examples/02-api.md index d84ae653..6c312c23 100644 --- a/docs/docs/02-examples/02-api.md +++ b/docs/docs/02-examples/02-api.md @@ -1,40 +1,27 @@ # Chat with an API -Interacting with cloud providers through dashboards, APIs, and CLIs is second nature to devops engineers. Using AI chat, the engineer can express a goal, and the AI can generate and execute the calls needed to achieve it. This saves the engineer time from having to look up the API calls needed themselves. GPTScript makes building a chat integration with an existing OpenAPI schema quick and easy. +GPTScript makes it easy to create a chatbot interface to interact with an API. -This guide will walk through the process of using the OpenAPI spec from Digital Ocean to build a chatbot capable of launching droplets and databases. The reader will be able to continue adding Digital Ocean capabilities or build their own chatbot with another OpenAPI schema. +This guide will demonstrate how to build a chatbot that interacts with the DigitalOcean API. -## Too Long; Didn't Read +## Getting Started -If you just want to try out the Digital Ocean chatbot first: - -Follow the [API credential](#api-access) settings here. - -Then you can run the following commands to get started: - -```bash -gptscript github.com/gptscript-ai/digital-ocean-agent -``` - -## Getting started - -First we will need to download a copy of the openapi.yaml. This spec technically can be accessed by URL, but initially, it is easier to download a copy and save it as openapi.yaml. - -### The Digital Ocean openapi.yaml spec - -Getting the openapi.yaml file from Digital Ocean can be done by running the following command in a terminal. +First, you will need to download a copy of DigitalOcean's OpenAPI definition. +While you can reference it by its URL, it is a bit easier to work with it locally. +You can download the file by running the following command: ```bash curl -o openapi.yaml -L https://api-engineering.nyc3.cdn.digitaloceanspaces.com/spec-ci/DigitalOcean-public.v2.yaml ``` -This will download a copy of the openapi yaml file to the local directory. +This will download a copy of the OpenAPI definition to the current directory. -Lets take a look at the spec file a little bit. The integration in GPTScript creates a tool named after each operationId in the OpenAPI spec. You can see what these tools would be by running the following. +Let's examine this OpenAPI file. GPTScript will create a tool named after each operationId in the file. +You can see the operationIds by running the following command: ```bash grep operationId openapi.yaml -# … +# ... # operationId: domains_delete_record # operationId: droplets_list # operationId: droplets_create @@ -50,189 +37,72 @@ grep operationId openapi.yaml # operationId: droplets_list_kernels # operationId: droplets_list_firewalls # operationId: droplets_list_neighbors -# … -``` - -If we look at the operationIds, you’ll notice they are structured around an object like droplet, database, or project. Each object has a collection of verb like list, get, delete, create, etc. Each tool in GPTScript has it’s own set of tools. So we can create agents, tools with chat enabled, that are experts in a specific set of objects and have access to all of the object_verb tools available to them. This allows us to fan out tools from a main entrypoint to multiple experts that can solve the users tasks. - -Lets explore this design pattern. - -## Creating Main Entrypoint - -Lets start by creating our main entrypoint to the Digital Ocean chatbot. The main tool in a GPTScript chat program is usually named agent.gpt. Let’s first setup the agents by giving it a name, the ability to chat, basic instructions, and the main greeting prompt. Create an agent.gpt file with the following contents. - -agent.gpt - -``` -Name: Digital Ocean Bot -Chat: true - -You are a helpful DevOps assistant that is an expert in Digital Ocean. -Using only the tools available, do not answer without using a tool, respond to the user task. -Greet the User with: "Hello! How can I help you with Digital Ocean?" -``` - -This file when run will show the following. - -![screenshot](/img/chat-api.png) - -In the current form, the chatbot will not be able to do anything since it doesn’t have access to any APIs. Let’s address that now, open our tool.gpt file and add the following. - -agent.gpt - +# ... ``` -Name: Digital Ocean Bot -Chat: true -Agents: droplets.gpt -You are a helpful DevOps assistant that is an expert in Digital Ocean -Using only the tools available, do not answer without using a tool, respond to the user task. -Greet the User with: "Hello! How can I help you with Digital Ocean?" -``` +The operationIds generally follow a pattern of `object_verb`. +This will be helpful for us, because we can use wildcard matching to refer to a subset of the operations. -Now lets create a droplets.gpt file to bring in the droplet tools. +## Creating the Script -droplets.gpt +Create a `tool.gpt` file with the following contents: ``` -Name: Droplet Agent +Name: DigitalOcean Bot Chat: true -Tools: droplets* from ./openapi.yaml -Description: Use this tool to work with droplets -Args: request: the task requested by the user +Tools: droplets* from openapi.yaml +Tools: databases* from openapi.yaml +Tools: images* from openapi.yaml +Tools: regions_list from openapi.yaml +Tools: tags* from openapi.yaml +Tools: sizes_list from openapi.yaml +Tools: sshKeys_list from openapi.yaml +Tools: sys.time.now -Help the user complete their Droplet operation requests using the tools available. -When creating droplets, always ask if the user would like to access via password or via SSHkey. +You are a helpful assistant with access to the DigitalOcean API to manage droplets and databases. +Before creating, updating, or deleting anything, tell the user about the exact action you are going to take, and get their confirmation. +Start the conversation by asking the user how you can help. ``` -Here we have defined the Droplet Agent, and enabled chat. We have also brought in an subset of the openapi.yaml tools that relate to droplets. By using droplets* we are making available everything droplet related into the available tools for this agent. We also provided the description to the main agent, and any other agent that has access to it, when to utilize this tool. We also have an argument called “request”, this is used when the LLM decides to call the agent it can smoothly pass off the user request without the Droplet Agent having to ask again. - -## Chat with Digital Ocean +This chatbot has access to several tools that correspond to various operations in the DigitalOcean OpenAPI file. +We give it access to all tools related to droplets and databases, since those are the main things we want it to work with. +In order to support this, we also need to give it access to images, regions, tags, etc. so that it can get the information it needs to create new droplets and databases. +Lastly, the `sys.time.now` tool is a tool that is built-in to GPTScript that provides the current date and time. -### API Access +:::note +We cannot give the entire `openapi.yaml` file to the tool because it contains too many API operations. +Most LLM providers, such as OpenAI, have a limit on the number of tools that you can provide to the model at one time. +The OpenAPI file contains over 300 operations, which is too many for most LLMs to handle at once. +::: -Now that we have brought in our first tool using the OpenAPI spec, we will need to setup authentication. Defined in the openapi.yaml is how the Digital Ocean API expects authenticated requests to work. If you look in the spec file path of components.securitySchemes you will see that Digital Ocean expects bearer_auth. So you will need to create an API key in the Digital Ocean dashboard with the access you want the LLM to be able to interact with Digital Ocean. For instance, you can do a read only key that will allow you to just query information, or you can provide it full access and the operator can work with the LLM to do anything in the project. It is up to you. For this example, we will be using a full access token, but you can adjust for your needs. You can create your API key by going to this link [Apps & API](https://cloud.digitalocean.com/account/api/tokens) section in your account. +## Creating an API Token -Once you have an API key, you will need to set an environment variable with that value stored. - -```bash -export GPTSCRIPT_API_DIGITALOCEAN_COM_BEARER_AUTH=****** -``` +Before you run this script, you need to have a DigitalOcean API token. -Where the *** is the API key created in the dashboard. +Go to [Applications & API](https://cloud.digitalocean.com/account/api/tokens) in the DigitalOcean dashboard and create a new token. +You can select whichever scopes you want, but you should at least give it the ability to read droplets and databases. -### Chatting with Digital Ocean APIs +## Running the Script -Now you can run gptscript to start your conversation with Digital Ocean. +Let's run the script and start chatting with it: ```bash -gptscript agent.gpt -``` - -You should now be able to ask how many droplets are running? - -And get an output from the chatbot. This is great, but not quite ready to use just yet. Lets keep adding some functionality. - -## Adding Database Support - -Now that we can do droplets, we can add support for databases just as easy. Lets create a databases.gpt file with the following contents. - -Ddtabases.gpt - -``` -Name: Database Agent -Chat: true -Tools: databases* from ./openapi.yaml -Description: Call this tool to manage databases on digital ocean -Args: request: the task requested by the user - -Help the user complete database operation requests with the tools available. -``` - -Here again, we are essentially scoping our agent to handle database calls with the Digital Ocean API. Now in order for this to be used, we need to add it to our agent list in the main agent.gpt file. - -Agent.gpt - -``` -Name: Digital Ocean Bot -Chat: true -Agents: droplets.gpt, databases.gpt - -You are a helpful DevOps assistant that is an expert in Digital Ocean -Using only the tools available, do not answer without using a tool, respond to the user task. -Greet the User with: "Hello! How can I help you with Digital Ocean?" -``` - -Now when we test it out we can ask how many databases are running? And it should give back the appropriate response. - -Now, when it comes to creating a database or droplet, we are missing some APIs to gather the correct information. We don’t have access to size information, regions, SSH Keys, etc. Since these are common tools, it would be a bit of a hassle to add lines to both the databases.gpt and droplets.gpt files. To avoid this, we can make use of the GPTScript Context to provide a common set of tools and instructions. - -## Context - -Context is a powerful concept in GPTScript that provides information to the system prompt, and provide a mechanism to compose a common set of tools reducing duplication in your GPTScript application. Lets add a context.gpt file to our chatbot here with the following contents. - -context.gpt - -``` -Share Tools: sys.time.now - -Share Tools: images* from ./openapi.yaml -Share Tools: regions_list from ./openapi.yaml -Share Tools: tags* from openapi.yaml -Share Tools: sizes_list from ./openapi.yaml -Share Tools: sshKeys_list from ./openapi.yaml - - -#!sys.echo -Always delegate to the best tool for the users request. -Ask the user for information needed to complete a task. -Provide the user with the exact action you will be taking and get the users confirmation when creating or updating resources. -ALWAYS ask the user to confirm deletions, provide as much detail about the action as possible. -``` - -There is quite a bit going on here, so lets break it down. Anywhere you see Share Tools it is making that tool available to anything uses the context. In this case, it is providing access to the time now tool so you can ask what was created yesterday and the LLM can get a frame of reference. Additionally, it provides a common set of Digital Ocean APIs that are needed for placement, organization(tags), sizes, and images, etc. Since multiple components in Digital Ocean use these values, it is useful to only need to define it once. Last we are providing a set of common instructions for how we want the chatbot to behave overall. This way, we do not need to provide this information in each agent. Also, since this is in the system prompt, it is given a higher weight to the instructions in the individual agents. - -Now lets add this to our agents. You will need to add the line: - -``` -Context: context.gpt -``` - -To each of our agents, so the droplets.gpt, agent.gpt, and databases.gpt will have this line. - -## Wrapping up - -Provided you have given API access through your token, you should now be able to run the chatbot and create a database or a droplet and be walked through the process. You should also be able to ask quesitons like What VMs were created this week? - -You now know how to add additional capabilities through agents to the chatbots. You can follow the same patterns outlined above to add more capabilities or you can checkout the chat bot repository to see additional functionality. - -### Use your own OpenAPI schema - -If you have your own OpenAPI schema, you can follow the same pattern to build a chatbot for your own APIs. The simplest way to get started is to create a gptscript file with the following contents. - -``` -Name: {Your API Name} Bot -Chat: true -Tools: openapi.yaml - -You are a helpful assistant. Say "Hello, how can I help you with {Your API Name} system today?" +gptscript tool.gpt ``` -You can then run that and the LLM will be able to interact with your API. +Try asking it to list your current databases or droplets, or to help you create a new one. -#### Note on OpenAI tool limits +The first time the LLM tries to make an API call, it will ask for your API token. +Paste it into the prompt. It will be used for all future API calls as well. +The LLM will never see or store your API token. It is only used client-side, on your computer. -As we mentioned before, GPTScript creates a tool for each operationId in the OpenAPI spec. If you have a large OpenAPI spec, you may run into a limit on the number of tools that can be created. OpenAI, the provider of the GPT-4o model only allows a total of 200 tools to be passed in at a single time. If you exceed this limit, you will see an error message from OpenAI. If you run into this issue, you can follow the same pattern we did above to create our Digital Ocean bot. +## Next Steps -A quick check to see how many tools total would be created, you can run the following: +Feel free to modify the script to add other parts of the DigitalOcean API. +You could also try creating a chatbot for a different API with an OpenAPI definition. -```bash -grep operationId openapi.yaml|wc -l - 306 -``` +For a more advanced DigitalOcean chatbot, see our [DigitalOcean Agent](https://github.com/gptscript-ai/digital-ocean-agent) tool. -In our case, there are 306 tools that would be created in the case of our Digital Ocean spec. This would not fit into a single agent, so breaking it up into multiple agents is the best way to handle this. - -## Next Steps +To read more about OpenAPI tools in GPTScript, see the [OpenAPI Tools](../03-tools/03-openapi.md) article. -Now that you have seen how to create a chatbot with an OpenAPI schema, checkout our other guides to see how to build other ChatBots and agents. +To read more about credential storage in GPTScript, see the [Credentials](../06-credentials.md) article. diff --git a/docs/docs/02-examples/04-local-files.md b/docs/docs/02-examples/04-local-files.md index 522deb01..252ddf96 100644 --- a/docs/docs/02-examples/04-local-files.md +++ b/docs/docs/02-examples/04-local-files.md @@ -1,6 +1,9 @@ # Chat with Local Files -With GPTScript interacting with local files is simple and powerful. This can help you streamline repetitive or data-intensive tasks. In this guide, we'll build a script that can query Excel files, CSVs, and PDFs. We'll then use the script to read, transform, and utilize the data in these files. +With GPTScript, interacting with local files is simple and powerful. +This can help you streamline repetitive or data-intensive tasks. +In this guide, we'll build a script that can query Excel files, CSVs, and PDFs. +We'll then use the script to read, transform, and utilize the data in these files. ## Too Long; Didn't Read @@ -14,60 +17,79 @@ gptscript --workspace=~/Documents github.com/gptscript-ai/local-files-demo ``` ## Getting Started -The rest of this guide will walk you through building and using a data processing assistant. We'll be explaining the how, what, and why along the way. + +The rest of this guide will walk you through building and using a data processing assistant. First, let's get some sample data to work with. You can clone our repo with our sample data: + ``` git clone https://github.com/gptscript-ai/local-files-demo.git cd local-files-demo ``` -Next, open up a new gptscript file in your favorite editor. We'll call the file data-assistant.gpt. +Next, open up a new gptscript file in your favorite editor. We'll call the file `data-assistant.gpt`. + ``` vim data-assistant.gpt ``` + All edits below are assumed to be in this file. ### Create the Assistant -Put this in the gpt file: + +Add this to the file: + ``` -Name: Your Data Processing Assitant -Description: An asistant to help you with processing data found in files on your workstation. Helpful for querying spreadsheets, CSVs, JSON files, and pdfs. +Name: Data Processing Assitant +Description: An assistant to help you with processing data found in files on your workstation. Helpful for querying spreadsheets, CSVs, JSON files, and PDFs. Tools: github.com/gptscript-ai/structured-data-querier, github.com/gptscript-ai/pdf-reader Context: github.com/gptscript-ai/context/workspace Chat: true -You are a helpful data processing assistant. Your goal is to help the user with data processing. Help the user accomplish their tasks using the tools you have. When the user starts this chat, just say hi, introduce yourself, and ask what you can help with. +You are a helpful data processing assistant. Your goal is to help the user with data processing. +Help the user accomplish their tasks using the tools you have. +When the user starts this chat, say hi, introduce yourself, and ask what you can help with. ``` -This is actually the entirety of the script. We're packing a lot of power into just a handful of lines here. Let's talk through them. -**Name and Description** help the LLM understand the purpose of this tool. You should always have meaningful names and descriptions. +This is the entire script. Here's what each part does: + +`Name and Description` help the LLM understand the purpose of this tool. You should always have meaningful names and descriptions. -The **Tools: ...** stanza pulls two useful tools into this assistant. +The `Tools: ...` line provides two useful tools to this assistant. -The [structured-data-querier](https://github.com/gptscript-ai/structured-data-querier) makes it possible to query csv, xlsx, and json files as though they SQL databases (using an application called [DuckDB](https://duckdb.org/)). This is extremely powerful when combined with the power of LLMs because it let's you ask natural language questions that the LLM can then translate to SQL. +The [structured-data-querier](https://github.com/gptscript-ai/structured-data-querier) makes it possible to query CSV, XLSX, and JSON files as though they were SQL databases (using an application called [DuckDB](https://duckdb.org/)). +This is extremely powerful when combined with the power of LLMs because it allows you to ask natural language questions that the LLM can then translate to SQL. -The [pdf-reader](https://github.com/gptscript-ai/pdf-reader) isn't quite as exciting, but still useful. It parses and reads PDFs and returns the contents to the LLM. This will put the entire contents in your chat context, so it's not appropriate for extremely large PDFs, but it's handy for smaller ones. +The [pdf-reader](https://github.com/gptscript-ai/pdf-reader) parses and reads PDFs and returns the contents to the LLM. +This will put the entire contents in your chat context, so it's not appropriate for extremely large PDFs, but it's handy for smaller ones. -**Context: github.com/gptscript-ai/context/workspace** introduces a context tool makes this assistant "workspace" aware. It's description reads: +`Context: github.com/gptscript-ai/context/workspace` introduces a [context tool](../03-tools/05-context.md) makes this assistant "workspace" aware. Its description reads: > Adds the workspace and tools needed to access the workspace to the current context -That translates to telling the LLM what the workspace directory is and instructing it to use that directory for reading and writing files. As we saw above, you can specify a workspace like this: +Basically, this context tool tells the LLM what the workspace directory is and instructs it to use that directory for reading and writing files. +As we saw above, you can specify a workspace like this: + ``` gptscript --workspace=/Your/path/here ... ``` + If you don't specify one, a temporary directory will be created and used for the workspace. -This context also shares the `sys.read`, `sys.write`, and `sys.ls` built-in tools with the assistant so that it automatically has access to them. +This context tool also shares the `sys.read`, `sys.write`, and `sys.ls` built-in tools with the assistant. -Next we have **Chat: true**, which you've seen if you looked at any of our other guides. This makes the current tool "chat-able". We refer to chatable tools as agents or assistants. +Next we have `Chat: true`. This makes the current tool "chat-able". We refer to chat-able tools as agents or assistants. Finally, we have the prompt: -> You are a helpful data processing assistant. Your goal is to help the user with data processing tasks. Help the user accomplish their tasks using the tools you have. When the user starts this chat, just say hi, introduce yourself, and ask what you can help with. +> You are a helpful data processing assistant. Your goal is to help the user with data processing. +> Help the user accomplish their tasks using the tools you have. +> When the user starts this chat, say hi, introduce yourself, and ask what you can help with. ## Using the Assistant -Once again, that's all there is to this assistant. You can start using it by specifying your own workspace or using our sample-data directory as the workspace. Assuming you're using our sample data and have followed these instructions, here's how you launch it: + +When you run the assistant, you can specify your own workspace folder or our sample data directory +Assuming you're using our sample data and have followed these instructions, here's how you run it: + ``` gptscript --workspace=./sample-data data-assistant.gpt ``` @@ -75,6 +97,7 @@ gptscript --workspace=./sample-data data-assistant.gpt Here's a few sample interactions with these files. ### Cleaning up data + ``` > whats in the key contacts file? @@ -114,6 +137,7 @@ Here's a few sample interactions with these files. ``` ### Identifying and fixing data gaps + ``` > is there any missing data in that csv? ... @@ -126,6 +150,7 @@ Here's a few sample interactions with these files. ``` ### Cross-referencing + ``` > what were sales like for Kevin's location? @@ -149,7 +174,9 @@ Here's a few sample interactions with these files. Is there anything else you would like to know or do with this data? ``` + ### Pulling all the info together + ``` > Let's help Kevin raise sales. What promotions do we have going on? ... @@ -234,11 +261,12 @@ Here's a few sample interactions with these files. Feel free to customize this email further to better suit your needs. Let me know if there's anything else I can assist you with! ``` + Try it out yourself and see what you can come up with. ## Next steps Hopefully you've found this guide helpful. From here, you have several options: -- You can checkout out some of our other guides available in this section of the docs +- You can check out some of our other guides available in this section of the docs - You can dive deeper into the options available when [writing script](/tools/gpt-file-reference) diff --git a/docs/docs/02-examples/05-workflow.md b/docs/docs/02-examples/05-workflow.md index 0aca6fc3..7f903536 100644 --- a/docs/docs/02-examples/05-workflow.md +++ b/docs/docs/02-examples/05-workflow.md @@ -1,15 +1,14 @@ # Run an Automated Workflow -Automating a sequence of tasks that integrate with one or more systems is a ubiquitous engineering problem that typically requires some degree of domain-specific knowledge up-front. However, workflows written with GPTScript all but eliminate this prerequisite, enabling developers to build their workflows by describing the high-level steps it should perform. +Automating a sequence of tasks that integrate with one or more systems is a ubiquitous engineering problem that typically requires some degree of domain-specific knowledge up-front. +However, workflows written with GPTScript all but eliminate this prerequisite, enabling developers to build their workflows by describing the high-level steps it should perform. This guide will show you how to build a GPTScript that encapsulates a workflow consisting of the following steps: -1. Get a selection of twitter posts +1. Get a selection of X (Twitter) posts 2. Summarize their content 3. Summarize the content of any links they directly reference 4. Write the results to a Markdown document -We'll be explaining the how, what, and why along the way. - ## Too long; didn't read Want to start using this script now? Just run: @@ -53,7 +52,7 @@ This tool: - imports two other tools - `sys.write` is a built-in tool which enables the entrypoint tool to write files to your system. - `summarize-tweet` is a custom tool that encapsulates how each tweet gets summarized. We'll define this tool in the next step. -- ensures tweets are never summarized in parallel to ensure they are summarized in the correct order +- ensures tweets are never summarized in parallel so that they are summarized in the correct order - defines the tweet URLs to summarize and the file to write them to At a high-level, it's getting the summaries for two tweets and storing them in the `tweets.md` file. @@ -87,7 +86,7 @@ This tool - imports three other tools to solve summarization sub-problems - `github.com/gptscript-ai/browser` is an external tool that is used to open the tweet URL in the browser and extract the page content - `get-hyperlinks` and `summarize-hyperlinks` are custom helper tools we'll define momentarily that extract hyperlinks from tweet text and summarize them -- describes the markdown document this tool should produce, leaving it up to the LLM to decide which of the available tools to call to make this happen +- describes the Markdown document this tool should produce, leaving it up to the LLM to decide which of the available tools to call to make this happen ## Hyperlink Summarization Tools diff --git a/docs/docs/03-tools/01-using.md b/docs/docs/03-tools/01-using.md index a2c8326b..27a59037 100644 --- a/docs/docs/03-tools/01-using.md +++ b/docs/docs/03-tools/01-using.md @@ -1,5 +1,9 @@ # Using Tools -In GPTScript, tools are used to extend the capabilities of a script. The idea behind them is that AI performs better when it has very specific instructions for a given task. Tools are a way to break-up the problem into smaller and more focused pieces where each tool is responsible for a specific task. A typical flow like this is to have a main script that imports a set of tools it can use to accomplish its goal. + +In GPTScript, tools are used to extend the capabilities of a script. +The idea behind them is that AI performs better when it has very specific instructions for a given task. +Tools are a way to break up the problem into smaller and more focused pieces where each tool is responsible for a specific task. +A typical pattern is to have a main script that imports a set of tools it can use to accomplish its goal. GPTScripts can utilize tools in one of three ways: 1. Built-in system tools @@ -7,6 +11,7 @@ GPTScripts can utilize tools in one of three ways: 3. External tools ### System Tools + All GPTScripts have access to system tools, like `sys.read` and `sys.write`, that can be used without any additional configuration. ```yaml @@ -16,11 +21,14 @@ Read all of the files in my current directory, do not recurse over any subdirect ``` System tools are a set of core tools that come packaged with GPTScript by default. +To see a list of the system tools, run `gptscript --list-tools`. ### In-Script Tools -Things get more interesting when you start to use custom tools. -The most basic example of this is an in-script tool that is defined in the same file as the main script. This is useful for breaking up a large script into smaller, more manageable pieces. +Things get more interesting when you start to write your own tools. + +The most basic example of this is an in-script tool that is defined in the same file as the main script. +This is useful for breaking up a large script into smaller, more manageable pieces. ```yaml tools: random-number @@ -35,7 +43,9 @@ Select a number at random between 1 and 100 and return only the number. ``` ### External Tools -You can refer to GPTScript tool files that are served on the web or stored locally. This is useful for sharing tools across multiple scripts or for using tools that are not part of the core GPTScript distribution. + +You can refer to GPTScript tool files that are served on the web or stored locally. +This is useful for sharing tools across multiple scripts or for using tools that are not part of the core GPTScript distribution. ```yaml tools: https://get.gptscript.ai/echo.gpt @@ -51,9 +61,11 @@ tools: echo.gpt Echo the phrase "Hello, World!". ``` -You can also refer to OpenAPI definition files as though they were GPTScript tool files. GPTScript will treat each operation in the file as a separate tool. For more details, see [OpenAPI Tools](03-openapi.md). +You can also refer to OpenAPI definition files as though they were GPTScript tool files. +GPTScript will treat each operation in the file as a separate tool. For more details, see [OpenAPI Tools](03-openapi.md). ### Packaged Tools on GitHub + GPTScript tools can be packaged and shared on GitHub, and referred to by their GitHub URL. For example: ```yaml @@ -64,5 +76,9 @@ Generate an image of a city skyline at night and write the resulting image to a Take this image and write a description of it in the style of pirate. ``` +:::important +The GitHub URL must not be prefixed with `http://` or `https://`. +::: + When this script is run, GPTScript will locally clone the referenced GitHub repos and run the tools referenced inside them. For more info on how this works, see [Authoring Tools](02-authoring.md). diff --git a/docs/docs/03-tools/02-authoring.md b/docs/docs/03-tools/02-authoring.md index 3e81613a..b8757440 100644 --- a/docs/docs/03-tools/02-authoring.md +++ b/docs/docs/03-tools/02-authoring.md @@ -2,11 +2,12 @@ You can author your own tools for your use or to share with others. The process for authoring a tool is as simple as creating a `tool.gpt` file in the root directory of your project. -This file is itself a GPTScript that defines the tool's name, description, and what it should do. +This file is a GPTScript that defines the tool's name, description, and what it should do. ## Quickstart -This is a guide for writing portable tools for GPTScript. The supported languages currently are Python, NodeJS, and Go. This guide uses Python but you can see documentation for the other language below. +This is a guide for writing portable tools for GPTScript. The supported languages currently are Python, Node.js, and Go. +This guide uses Python, but you can see documentation for the other languages below. ### 1. Write the code @@ -65,7 +66,11 @@ gptscript github.com// '{"url": "https://github.com"}' ## Sharing Tools -GPTScript is designed to easily export and import tools. Doing this is currently based entirely around the use of GitHub repositories. You can export a tool by creating a GitHub repository and ensuring you have the `tool.gpt` file in the root of the repository. You can then import the tool into a GPTScript by specifying the URL of the repository in the `tools` section of the script. For example, we can leverage the `image-generation` tool by adding the following line to a GPTScript: +GPTScript is designed to easily export and import tools. +Doing this is currently based entirely around the use of GitHub repositories. +You can export a tool by creating a GitHub repository and ensuring you have the `tool.gpt` file in the root of the repository. +You can then import the tool into a GPTScript by specifying the URL of the repository in the `tools` section of the script. +For example, we can leverage the `image-generation` tool by adding the following line to a GPTScript: ```yaml tools: github.com/gptscript-ai/dalle-image-generation @@ -73,9 +78,12 @@ tools: github.com/gptscript-ai/dalle-image-generation Generate an image of a city skyline at night. ``` -### Supported Languages +## Supported Languages -GPTScript can execute any binary that you ask it to. However, it can also manage the installation of a language runtime and dependencies for you. Currently this is only supported for a few languages. Here are the supported languages and examples of tools written in those languages: +GPTScript can execute any binary that you ask it to. +However, it can also manage the installation of a language runtime and dependencies for you. +Currently, this is only supported for a few languages. +Here are the supported languages and examples of tools written in those languages: | Language | Example | |-----------|----------------------------------------------------------------------------------------------------------------| @@ -84,10 +92,13 @@ GPTScript can execute any binary that you ask it to. However, it can also manage | `Golang` | [Search](https://github.com/gptscript-ai/search) - Use various providers to search the internet | -### Automatic Documentation +## Automatic Documentation -Each GPTScript tool is self-documented using the `tool.gpt` file. You can automatically generate documentation for your tools by visiting `tools.gptscript.ai/`. This documentation site allows others to easily search and explore the tools that have been created. +Each GPTScript tool is self-documented using the `tool.gpt` file. +You can automatically generate documentation for your tools by visiting `https://tools.gptscript.ai/`. +This documentation site allows others to easily search and explore the tools that have been created. -You can add more information about how to use your tool by adding an `examples` directory to your repository and adding a collection of `.gpt` files that demonstrate how to use your tool. These examples will be automatically included in the documentation. +You can add more information about how to use your tool by adding an `examples` directory to your repository and adding a collection of `.gpt` files that demonstrate how to use your tool. +These examples will be automatically included in the documentation. For more information and to explore existing tools, visit [tools.gptscript.ai](https://tools.gptscript.ai). diff --git a/docs/docs/03-tools/03-openapi.md b/docs/docs/03-tools/03-openapi.md index 2069b331..0b0f4961 100644 --- a/docs/docs/03-tools/03-openapi.md +++ b/docs/docs/03-tools/03-openapi.md @@ -1,6 +1,6 @@ # OpenAPI Tools -GPTScript can treat OpenAPI v3 definition files as though they were tool files. +GPTScript can treat OpenAPI v2 and v3 definition files as though they were tool files. Each operation (a path and HTTP method) in the file will become a simple tool that makes an HTTP request. GPTScript will automatically and internally generate the necessary code to make the request and parse the response. @@ -42,8 +42,9 @@ Will be resolved as `https://api.example.com/v1`. ## Authentication :::warning -All authentication options will be completely ignored if the server uses HTTP and not HTTPS. +All authentication options will be completely ignored if the server uses HTTP and not HTTPS, unless the request is for `localhost` or 127.0.0.1. This is to protect users from accidentally sending credentials in plain text. +HTTP is only OK, if it's on localhost/127.0.0.1. ::: ### 1. Security Schemes diff --git a/docs/docs/03-tools/04-credential-tools.md b/docs/docs/03-tools/04-credential-tools.md index 9aaa7601..3e6a678a 100644 --- a/docs/docs/03-tools/04-credential-tools.md +++ b/docs/docs/03-tools/04-credential-tools.md @@ -13,7 +13,7 @@ Here is a simple example of a credential provider tool that uses the builtin `sy ```yaml # my-credential-tool.gpt -name: my-credential-tool +Name: my-credential-tool #!/usr/bin/env bash @@ -27,13 +27,17 @@ echo "{\"env\":{\"MY_ENV_VAR\":\"$credential\"}}" Continuing with the above example, this is how you can use it in a script: ```yaml -credentials: my-credential-tool.gpt +Credentials: my-credential-tool.gpt as myCred #!/usr/bin/env bash echo "The value of MY_ENV_VAR is $MY_ENV_VAR" ``` +:::note +GPTScript accepts `Cred:`, `Creds:`, `Credential:`, and `Credentials:` as valid directives. +::: + When you run the script, GPTScript will call the credential provider tool first, set the environment variables from its output, and then run the script body. The credential provider tool is called by GPTScript itself. GPTScript does not ask the LLM about it or even tell the LLM about the tool. @@ -41,11 +45,13 @@ LLM about it or even tell the LLM about the tool. If GPTScript has called the credential provider tool in the same context (more on that later), then it will use the stored credential instead of fetching it again. +To delete the credential that just got stored, run `gptscript credential delete myCred`. + You can also specify multiple credential tools for the same script, but they must be on separate lines: ```yaml -credentials: credential-tool-1.gpt -credentials: credential-tool-2.gpt +Credentials: credential-tool-1.gpt +Credentials: credential-tool-2.gpt (tool stuff here) ``` @@ -56,7 +62,7 @@ GPTScript also provides a generic credential tool (`github.com/gptscript-ai/cred where you only need to set one environment variable. Here is an example of how to use it: ```yaml -credentials: github.com/gptscript-ai/credential as myCredentialName with MY_ENV_VAR as env and "this message will be displayed to the user" as message and key as field +Credentials: github.com/gptscript-ai/credential as myCredentialName with MY_ENV_VAR as env and "this message will be displayed to the user" as message and key as field (tool stuff here) ``` @@ -66,24 +72,24 @@ the environment variable `MY_ENV_VAR` and stored in a credential called `myCrede See [the repo](https://github.com/gptscript-ai/credential) for more information. -## Credential Tool Arguments +## Credential Tool Parameters -A credential tool may define arguments. Here is an example: +A credential tool may define parameters. Here is an example: ```yaml -name: my-credential-tool -args: env: the environment variable to set -args: val: the value to set it to +Name: my-credential-tool +Parameter: env: the environment variable to set +Parameter: val: the value to set it to #!/usr/bin/env bash echo "{\"env\":{\"$ENV\":\"$VAL\"}}" ``` -When you reference this credential tool in another file, you can use syntax like this to set both arguments: +When you reference this credential tool in another file, you can use syntax like this to set both parameters: ```yaml -credential: my-credential-tool.gpt with MY_ENV_VAR as env and "my value" as val +Credential: my-credential-tool.gpt with MY_ENV_VAR as env and "my value" as val (tool stuff here) ``` @@ -92,7 +98,7 @@ In this example, the tool's output would be `{"env":{"MY_ENV_VAR":"my value"}}` ## Storing Credentials -By default, credentials are automatically stored in the credential store. Read the [main credentials page](../02-credentials.md) +By default, credentials are automatically stored in the credential store. Read the [main credentials page](../06-credentials.md) for more information about the credential store. :::note @@ -105,7 +111,7 @@ will not be stored in the credentials store. When you reference a credential tool in your script, you can give it an alias using the `as` keyword like this: ```yaml -credentials: my-credential-tool.gpt as myAlias +Credentials: my-credential-tool.gpt as myAlias (tool stuff here) ``` @@ -121,8 +127,7 @@ A credential context is basically a namespace for credentials. If you have multi you can switch between them by defining them in different credential contexts. The default context is called `default`, and this is used if none is specified. -You can set the credential context to use with the `--credential-context` flag when running GPTScript. For -example: +You can set the credential context to use with the `--credential-context` flag when running GPTScript. For example: ```bash gptscript --credential-context my-azure-workspace my-azure-script.gpt @@ -181,3 +186,21 @@ In this example, `toolA` provides the variables `ENV_VAR_1` and `ENV_VAR_2`, This will read the values of `ENV_VAR_1` through `ENV_VAR_4` from the current environment and set them for the credential. This is a direct mapping of environment variable names. **This is not recommended when overriding credentials for multiple tools that use the same environment variable names.** + +## Credential Refresh (Advanced) + +Some use cases (such as OAuth) may involve the need to refresh expired credentials. +To support this, your credential tool can return other fields besides `env` in its JSON output. +This is the full list of supported fields in the credential tool output: + +- `env` (type: object) - The environment variables to set. +- `expiresAt` (type: string, timestamp in RFC3339 format) - The time when the credential expires. +- `refreshToken` (type: string) - The refresh token to use to refresh the credential. + +When GPTScript tries to use a credential that has a defined `expiresAt` time, it will check if the credential has expired. +If the credential has expired, it will run the credential tool again, and the current value of the credential will be +set to the environment variable `GPTSCRIPT_EXISTING_CREDENTIAL` as a JSON string. This way, the credential tool can check for +that environment variable, and if it is set, get the refresh token from the existing credential and use it to refresh and return a new credential, +typically without user interaction. + +For an example of a tool that uses the refresh feature, see the [Gateway OAuth2 tool](https://github.com/gptscript-ai/gateway-oauth2). diff --git a/docs/docs/03-tools/05-context.md b/docs/docs/03-tools/05-context.md index 3a4e8c15..15e600e4 100644 --- a/docs/docs/03-tools/05-context.md +++ b/docs/docs/03-tools/05-context.md @@ -1,97 +1,94 @@ # Context -GPTScript provides a mechanism to share prompt information across many tools using the tool parameter `context`. It is used to provide additional information to the calling tool on when to use a specific tool by prepending the `context` to the instruction of the calling tool. +GPTScript provides a mechanism to share prompt information across many tools using the tool directive `Context`. +It is used to provide additional information to the calling tool on when to use a specific tool by prepending the context to the instruction of the calling tool. - Context can point to a static text or a GPTScript. -- Context tools are just regular GPTScript tools, and any valid gptscript field can be used. -- Exported tools from a context tool are made available to the calling tool. +- Context tools are just regular GPTScript tools, and any valid GPTScript fields can be used in them. +- Shared tools from a context tool are made available to the calling tool. - When context points to a GPTScript tool, output from the context tool gets prepended to the instruction of the calling tool. ## Writing a Context Provider Tool as static text ```yaml -# my-search-context.txt +# my-context.txt -You are an expert web researcher with access to the Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. +You have access to run commands on the user's system. Please ask for confirmation from the user before running a command. ``` -## Using a Context Provider Tool +## Using a Context Tool -Continuing with the above example, this is how you can use the same context in tools that uses different search providers: +Continuing with the above example, this is how you can use the same context in different tools: ```yaml -# my-search-duduckgo.gpt -context: ./my-search-context.txt -tools: github.com/gptscript-ai/search/duckduckgo,sys.http.html2text - -What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? +Context: ./my-context.txt +Tools: sys.exec, sys.write +Which processes on my system are using the most memory? Write their PIDs to a file called pids.txt. ``` ```yaml -# my-search-brave.gpt -context: ./my-search-context.txt -tools: github.com/gptscript-ai/search/brave,sys.http.html2text - -List out some of the main actors in the Christopher Nolan movie Inception, as well as the names of the other Christopher Nolan movies they have appeared in. +Context: ./my-context.txt +Tools: sys.exec +Which file in my current directory is the largest? ``` - ## Context Provider Tool with exported tools Here is a simple example of a context provider tool that provides additional context to search tool: ```yaml -# my-search-context-tool.gpt -export: sys.http.html2text? +# my-context-tool.gpt +Share Tools: sys.exec -#!/bin/bash -echo You are an expert web researcher with access to the Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. +#!sys.echo +You have access to run commands on the user's system. Please ask for confirmation from the user before running a command. ``` +The `#!sys.echo` at the start of the tool body tells GPTScript to return everything after it as the output of the tool. + Continuing with the above example, this is how you can use it in a script: ```yaml -context: ./my-search-context-tool.gpt -tools: github.com/gptscript-ai/search/duckduckgo - -What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? +Context: ./my-context-tool.gpt +Tools: sys.write +Which processes on my system are using the most memory? Write their PIDs to a file called pids.txt. ``` When you run this script, GPTScript will use the output from the context tool and add it to the user message along with the existing prompt in this tool to provide additional context to LLM. -## Context Provider Tool with args +## Context Provider Tool with Parameters -Here is an example of a context provider tool that uses args to decide which search tool to use when answering the user provided queries: +Here is an example of a context provider tool that takes a parameter: ```yaml -# context_with_arg.gpt -export: github.com/gptscript-ai/search/duckduckgo, github.com/gptscript-ai/search/brave, sys.http.html2text? -args: search_tool: tool to search with +# context_with_param.gpt +Param: tone: the tone to use when responding to the user's request #!/bin/bash -echo You are an expert web researcher with access to the ${search_tool} Search tool.If the search tool fails to return any information stop execution of the script with message "Sorry! Search did not return any results". Feel free to get the contents of the returned URLs in order to get more information. Provide as much detail as you can. Also return the source of the search results. +echo "Respond to the user's request in a ${tone} tone." ``` Continuing with the above example, this is how you can use it in a script: ```yaml -# my_context_with_arg.gpt -context: ./context_with_arg.gpt with ${search} as search_tool -Args: search: Search tool to use +# tool.gpt +Context: ./context_with_param.gpt with ${tone} as tone +Param: tone: the tone to use when responding to the user's request +Tools: sys.http.html2text -What are some of the most popular tourist destinations in Scotland, and how many people visit them each year? +What are the top stories on Hacker News right now? ``` -This script can be used to search with `brave` or `duckduckdb` tools depending on the search parameter passed to the tool. -Example usage for using brave search tool: +Here's how you can run the script and define the tone parameter: + ```yaml -gptscript --disable-cache my_context_with_arg.gpt '{"search": "brave"}' +gptscript tool.gpt '{"tone": "obnoxious"}' ``` diff --git a/docs/docs/03-tools/06-how-it-works.md b/docs/docs/03-tools/06-how-it-works.md index c6538395..31dd17ce 100644 --- a/docs/docs/03-tools/06-how-it-works.md +++ b/docs/docs/03-tools/06-how-it-works.md @@ -1,33 +1,33 @@ # How it works -**_GPTScript is composed of tools._** Each tool performs a series of actions similar to a function. Tools have available -to them other tools that can be invoked similar to a function call. While similar to a function, the tools are -primarily implemented with a natural language prompt. **_The interaction of the tools is determined by the AI model_**, -the model determines if the tool needs to be invoked and what arguments to pass. Tools are intended to be implemented -with a natural language prompt but can also be implemented with a command or HTTP call. +**_GPTScript is fundamentally composed of tools._** Each tool is either a natural language prompt for the LLM, or is +programmatic (i.e. a command, script, or program to be run). Tools that use a natural language prompt can also invoke +other tools, similar to function calls. The LLM decides when a tool needs to be invoked and sets the parameters to pass to it. ## Example -Below are two tool definitions, separated by `---`. The first tool does not require a name or description, but -every tool after name and description are required. The first tool, has the parameter `tools: bob` meaning that the tool named `bob` is available to be called if needed. +Below are two tool definitions, separated by `---`. +The first tool in the file (often referred to as the "entrypoint tool") does not need a name and description, +but a name is required for all other tools in the file, and a description is recommended. +The entrypoint tool also has the line `Tools: bob` meaning that the tool named `bob` is available to be called if needed. ```yaml -tools: bob +Tools: bob Ask Bob how he is doing and let me know exactly what he said. --- -name: bob -description: I'm Bob, a friendly guy. -args: question: The question to ask Bob. +Name: bob +Description: I'm Bob, a friendly guy. +Param: question: The question to ask Bob. When asked how I am doing, respond with "Thanks for asking "${question}", I'm doing great fellow friendly AI tool!" ``` Put the above content in a file named `bob.gpt` and run the following command: -```shell -$ gptscript bob.gpt +```bash +gptscript bob.gpt ``` ``` @@ -36,8 +36,8 @@ OUTPUT: Bob said, "Thanks for asking 'How are you doing?', I'm doing great fellow friendly AI tool!" ``` -Tools can be implemented by invoking a program instead of a natural language prompt. The below -example is the same as the previous example but implements Bob using python. +Tools can be implemented by invoking a program instead of a natural language prompt. +The below example is the same as the previous example but implements Bob using Python. ```yaml Tools: bob @@ -47,7 +47,7 @@ Ask Bob how he is doing and let me know exactly what he said. --- Name: bob Description: I'm Bob, a friendly guy. -Args: question: The question to ask Bob. +Param: question: The question to ask Bob. #!python3 @@ -56,6 +56,4 @@ import os print(f"Thanks for asking {os.environ['question']}, I'm doing great fellow friendly AI tool!") ``` -With these basic building blocks you can create complex scripts with AI interacting with AI, your local system, data, -or external services. - +With these basic building blocks you can create complex scripts with AI interacting with AI, your local system, data, or external services. diff --git a/docs/docs/03-tools/07-gpt-file-reference.md b/docs/docs/03-tools/07-gpt-file-reference.md index c6207ad2..fdc3b363 100644 --- a/docs/docs/03-tools/07-gpt-file-reference.md +++ b/docs/docs/03-tools/07-gpt-file-reference.md @@ -23,10 +23,10 @@ Do more sample tool stuff. ## Tool Definition -A tool starts with a preamble that defines the tool's name, description, args, available tools and additional parameters. +A tool starts with a preamble that defines the tool's name, description, parameters, available tools, and additional directives. The preamble is followed by the tool's body, which contains the instructions for the tool. Comments in the preamble are lines starting with `#` and are ignored by the parser. Comments are not really encouraged -as the text is typically more useful in the description, argument descriptions or instructions. +as the text is typically more useful in the description, parameter descriptions, or body. ```yaml Name: tool-name @@ -34,51 +34,53 @@ Name: tool-name Description: Tool description # This tool can invoke tool1 or tool2 if needed Tools: tool1, tool2 -Args: arg1: The description of arg1 +Param: param1: The description of param1 Tool instructions go here. ``` -## Tool Parameters - -Tool parameters are key-value pairs defined at the beginning of a tool block, before any instructional text. They are specified in the format `key: value`. The parser recognizes the following keys (case-insensitive and spaces are ignored): - -| Key | Description | -|--------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| -| `Name` | The name of the tool. | -| `Model Name` | The LLM model to use, by default it uses "gpt-4-turbo". | -| `Global Model Name`| The LLM model to use for all the tools. | -| `Description` | The description of the tool. It is important that this properly describes the tool's purpose as the description is used by the LLM. | -| `Internal Prompt` | Setting this to `false` will disable the built-in system prompt for this tool. | -| `Tools` | A comma-separated list of tools that are available to be called by this tool. | -| `Global Tools` | A comma-separated list of tools that are available to be called by all tools. | -| `Credentials` | A comma-separated list of credential tools to run before the main tool. | -| `Args` | Arguments for the tool. Each argument is defined in the format `arg-name: description`. | -| `Max Tokens` | Set to a number if you wish to limit the maximum number of tokens that can be generated by the LLM. | -| `JSON Response` | Setting to `true` will cause the LLM to respond in a JSON format. If you set true you must also include instructions in the tool. | -| `Temperature` | A floating-point number representing the temperature parameter. By default, the temperature is 0. Set to a higher number for more creativity. | -| `Chat` | Setting it to `true` will enable an interactive chat session for the tool. | - - +## Tool Directives + +Tool directives are key-value pairs defined at the beginning of a tool block, before the tool body. +They are specified in the format `Key: value`. The parser recognizes the following keys (case-insensitive and spaces are ignored): + +| Key | Description | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| `Name` | The name of the tool. | +| `Model Name` | The LLM model to use, by default it uses "gpt-4-turbo". | +| `Global Model Name` | The LLM model to use for all the tools. | +| `Description` | The description of the tool. It is important that this properly describes the tool's purpose as the description is used by the LLM. | +| `Internal Prompt` | Setting this to `false` will disable the built-in system prompt for this tool. | +| `Tools` | A comma-separated list of tools that are available to be called by this tool. | +| `Global Tools` | A comma-separated list of tools that are available to be called by all tools. | +| `Parameter` / `Args` | Parameters for the tool. Each parameter is defined in the format `param-name: description`. | +| `Max Tokens` | Set to a number if you wish to limit the maximum number of tokens that can be generated by the LLM. | +| `JSON Response` | Setting to `true` will cause the LLM to respond in a JSON format. If you set true you must also include instructions in the tool. | +| `Temperature` | A floating-point number representing the temperature parameter. By default, the temperature is 0. Set to a higher number for more creativity. | +| `Chat` | Setting it to `true` will enable an interactive chat session for the tool. | +| `Credential` | Credential tool to call to set credentials as environment variables before doing anything else. One per line. | +| `Agents` | A comma-separated list of agents that are available to the tool. | +| `Share Tools` | A comma-separated list of tools that are shared by the tool. | +| `Context` | A comma-separated list of context tools available to the tool. | +| `Share Context` | A comma-separated list of context tools shared by this tool with any tool including this tool in its context. | ## Tool Body -The tool body contains the instructions for the tool which can be a natural language prompt or -a command to execute. Commands must start with `#!` followed by the interpreter (e.g. `#!/bin/bash`, `#!python3`) -a text that will be placed in a file and passed to the interpreter. Arguments can be references in the instructions -using the format `${arg1}`. +The tool body contains the instructions for the tool. It can be a natural language prompt or +a command to execute. Commands must start with `#!` followed by the interpreter (e.g. `#!/bin/bash`, `#!python3`). +Parameters can be referenced in the body using the format `${param1}`. ```yaml -name: echo-ai -description: A tool that echos the input -args: input: The input +Name: echo-ai +Description: A tool that echos the input +Parameter: input: The input Just return only "${input}" --- -name: echo-command -description: A tool that echos the input -args: input: The input +Name: echo-command +Description: A tool that echos the input +Parameter: input: The input #!/bin/bash diff --git a/docs/docs/03-tools/08-workspace.md b/docs/docs/03-tools/08-workspace.md new file mode 100644 index 00000000..8756cb2a --- /dev/null +++ b/docs/docs/03-tools/08-workspace.md @@ -0,0 +1,37 @@ +# Workspace + +One concept in GPTScript is the workspace directory. +This is a directory meant to be used by tools that need to interact with the local file system. +By default, the workspace directory is a one-off temporary directory. +The workspace directory can be set with the `--workspace` argument when running GPTScript, like this: + +```bash +gptscript --workspace . my-script.gpt +``` + +In the above example, the user’s current directory (denoted by `.`) will be set as the workspace. +The workspace directory is no longer temporary if it is explicitly set, and everything in it will persist after the script has finished running. +Both absolute and relative paths are supported. + +Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. + +:::info +GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. +The tools must decide to make use of the workspace environment variable. +::: + +## The Workspace Context Tool + +To make a non-code tool that uses the LLM aware of the workspace, you can reference the workspace context tool: + +``` +Context: github.com/gptscript-ai/context/workspace +``` + +This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, +what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. +This will not, however, have any impact on code-based tools (i.e. Python, Bash, or Go tools). +Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. + +This context tool also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. +This is because if a tool intends to interact with the workspace, it minimally needs these tools. diff --git a/docs/docs/03-tools/09-code-tool-guidelines.md b/docs/docs/03-tools/09-code-tool-guidelines.md new file mode 100644 index 00000000..0f13ac7f --- /dev/null +++ b/docs/docs/03-tools/09-code-tool-guidelines.md @@ -0,0 +1,135 @@ +# Code Tool Guidelines + +GPTScript can handle the packaging and distribution of code-based tools via GitHub repos. +For more information on how this works, see the [authoring guide](02-authoring.md#sharing-tools). + +This guide provides guidelines for setting up GitHub repos for proper tool distribution. + +## Common Guidelines + +### `tool.gpt` or `agent.gpt` file + +Every repo should have a `tool.gpt` or `agent.gpt` file. This is the main logic of the tool. +If both files exist, GPTScript will use the `agent.gpt` file and ignore the `tool.gpt` file. +Your repo can have other `.gpt` files that are referenced by the main file, but there must be a `tool.gpt` or `agent.gpt` file present. + +Under most circumstances, this file should live in the root of the repo. +If you are using a single repo for the distribution of multiple tools (see [gptscript-ai/context](https://github.com/gptscript-ai/context) for an example), +then you can have the `tool.gpt`/`agent.gpt` file in a subdirectory, and the tool will now be able to be referenced as `github.com///`. + +### Name and Description directives + +We recommend including a `Name` and `Description` directive for your tool. +This is useful for both people and LLMs to understand what the tool will do and when to use it. + +### Parameters + +Any parameters specified in the tool will be available as environment variables in your code. +We recommend handling parameters that way, rather than using command-line arguments. + +## Python Guidelines + +### Calling Python in the tool body + +The body of the `tool.gpt`/`agent.gpt` file needs to call Python. This can be done as an inline script like this: + +``` +Name: my-python-tool + +#!python3 + +print('hello world') +``` + +An inline script like this is only recommended for simple use cases that don't need external dependencies. + +If your use case is more complex or requires external dependencies, you can reference a Python script in your repo, like this: + +``` +Name: my-python-tool + +#!/usr/bin/env python3 ${GPTSCRIPT_TOOL_DIR}/tool.py +``` + +(This example assumes that your entrypoint to your Python program is in a file called `tool.py`. You can call it what you want.) + +### `requirements.txt` file + +If your Python program needs any external dependencies, you can create a `requirements.txt` file at the same level as +your `tool.gpt`/`agent.gpt` file. GPTScript will handle downloading the dependencies before it runs the tool. + +The file structure should look something like this: + +``` +. +├── requirements.txt +├── tool.py +└── tool.gpt +``` + +## JavaScript (Node.js) Guidelines + +### Calling Node.js in the tool body + +The body of the `tool.gpt`/`agent.gpt` file needs to call Node. This can be done as an inline script like this: + +``` +Name: my-node-tool + +#!node + +console.log('hello world') +``` + +An inline script like this is only recommended for simple use cases that don't need external dependencies. + +If your use case is more complex or requires external dependencies, you can reference a Node script in your repo, like this: + +``` +Name: my-node-tool + +#!/usr/bin/env node ${GPTSCRIPT_TOOL_DIR}/tool.js +``` + +(This example assumes that your entrypoint to your Node program is in a file called `tool.js`. You can call it what you want.) + +### `package.json` file + +If your Node program needs any external dependencies, you can create a `package.json` file at the same level as +your `tool.gpt`/`agent.gpt` file. GPTScript will handle downloading the dependencies before it runs the tool. + +The file structure should look something like this: + +``` +. +├── package.json +├── tool.js +└── tool.gpt +``` + +## Go Guidelines + +GPTScript does not support inline code for Go, so you must call to an external program from the tool body like this: + +``` +Name: my-go-tool + +#!${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool +``` + +:::important +Unlike the Python and Node cases above where you can name the file anything you want, Go tools must be `#!${GPTSCRIPT_TOOL_DIR}/bin/gptscript-go-tool`. +::: + +GPTScript will build the Go program located at `./main.go` to a file called `./bin/gptscript-go-tool` before running the tool. +All of your dependencies need to be properly specified in a `go.mod` file. + +The file structure should look something like this: + +``` +. +├── go.mod +├── go.sum +├── main.go +└── tool.gpt +``` diff --git a/docs/docs/03-tools/10-daemon.md b/docs/docs/03-tools/10-daemon.md new file mode 100644 index 00000000..128c161a --- /dev/null +++ b/docs/docs/03-tools/10-daemon.md @@ -0,0 +1,108 @@ +# Daemon Tools (Advanced) + +One advanced use case that GPTScript supports is daemon tools. +A daemon tool is a tool that starts a long-running HTTP server in the background, that will continue running until GPTScript is done executing. +Other tools can easily send HTTP POST requests to the daemon tool. + +## Example + +Here is an example of a daemon tool with a simple echo server written in an inline Node.js script: + +``` +Tools: my-daemon +Param: first: the first parameter +Param: second: the second parameter + +#!http://my-daemon.daemon.gptscript.local/myPath + +--- +Name: my-daemon + +#!sys.daemon node + +const http = require('http'); + +const server = http.createServer((req, res) => { + if (req.method === 'GET' || req.method === 'POST') { + // Extract the path from the request URL + const path = req.url; + + let body = ''; + + req.on('data', chunk => { + body += chunk.toString(); + }) + + // Respond with the path and body + req.on('end', () => { + res.writeHead(200, { 'Content-Type': 'text/plain' }); + res.write(`Body: ${body}\n`); + res.end(`Path: ${path}`); + }) + } else { + res.writeHead(405, { 'Content-Type': 'text/plain' }); + res.end('Method Not Allowed'); + } +}); + +const PORT = process.env.PORT || 3000; +server.listen(PORT, () => { + console.log(`Server is listening on port ${PORT}`); +}); +``` + +Let's talk about the daemon tool, called `my-daemon`, first. + +### The Daemon Tool + +The body of this tool begins with `#!sys.daemon`. This tells GPTScript to take the rest of the body as a command to be +run in the background that will listen for HTTP requests. GPTScript will run this command (in this case, a Node script). +GPTScript will assign a port number for the server and set the `PORT` environment variable to that number, so the +server needs to check that variable and listen on the proper port. + +After GPTScript runs the daemon, it will send it an HTTP GET request to make sure that it is running properly. +The daemon needs to respond with a 200 OK to this request. +By default, the request goes to `/`, but this can be configured with the following syntax: + +``` +#!sys.daemon (path=/api/ready) node + +// (node script here) +``` + +### The Entrypoint Tool + +The entrypoint tool at the top of this script sends an HTTP request to the daemon tool. +There are a few important things to note here: + +- The `Tools: my-daemon` directive is needed to show that this tool requires the `my-daemon` tool to already be running. + - When the entrypoint tool runs, GPTScript will check if `my-daemon` is already running. If it is not, GPTScript will start it. +- The `#!http://my-daemon.daemon.gptscript.local/myPath` in the body tells GPTScript to send an HTTP request to the daemon tool. + - The request will be a POST request, with the body of the request being a JSON string of the parameters passed to the entrypoint tool. + - For example, if the script is run like `gptscript script.gpt '{"first":"hello","second":"world"}'`, then the body of the request will be `{"first":"hello","second":"world"}`. + - The path of the request will be `/myPath`. + - The hostname is `my-daemon.daemon.gptscript.local`. When sending a request to a daemon tool, the hostname must always start with the daemon tool's name, followed by `.daemon.gptscript.local`. + - GPTScript recognizes this hostname and determines the correct port number to send the request to, on localhost. + +### Running the Example + +Now let's try running it: + +```bash +gptscript script.gpt '{"first":"hello","second":"world"}' +``` + +``` +OUTPUT: + +Body: {"first":"hello","second":"world"} +Path: /myPath +``` + +This is exactly what we expected. This is a silly, small example just to demonstrate how this feature works. +A real-world situation would involve several different tools sending different HTTP requests to the daemon tool, +likely with an LLM determining when to call which tool. + +## Real-World Example + +To see a real-world example of a daemon tool, check out the [GPTScript Browser tool](https://github.com/gptscript-ai/browser). diff --git a/docs/docs/03-tools/11-input-output-filters.md b/docs/docs/03-tools/11-input-output-filters.md new file mode 100644 index 00000000..1db8f937 --- /dev/null +++ b/docs/docs/03-tools/11-input-output-filters.md @@ -0,0 +1,137 @@ +# Input and Output Filters (Advanced) + +GPTScript supports input and output filters, which are tools that can modify the input to a tool or the output from a tool. +These are best explained with examples. + +## Input Filter Example + +In this example, the entrypoint tool uses an input filter to modify the `message` parameter, before calling the subtool. +Then, the subtool uses another input filter to modify the message, then writes it to a file. + +``` +# File name: script.gpt +Param: message: the message from the user +Tools: subtool +Input Filter: appleToOrange + +Take the message and give it to the subtool. Then say "Done". + +--- +Name: subtool +Param: message: the message from the user +Input Filter: orangeToBanana + +#!python3 + +import os + +message = os.getenv("message", "") +with open("gptscript_output.txt", "w") as f: + f.write(message) + +--- +Name: appleToOrange + +#!python3 + +import os + +def output(input: str): + return input.replace("apple", "orange") + +print(output(os.getenv("INPUT", ""))) + +--- +Name: orangeToBanana + +#!python3 + +import os + +def output(input: str): + return input.replace("orange", "banana") + +print(output(os.getenv("INPUT", ""))) +``` + +Try running this tool with the following command: + +```bash +gptscript script.gpt '{"message":"apple is great"}' + +# Then view the output: +cat gptscript_output.txt +``` + +The output should say "banana is great". +This matches what we expect, because the input filter `appleToOrange` changes "apple" to "orange", +and the input filter `orangeToBanana` changes "orange" to "banana". +If we run the tool again with a different message, like "hello world", the final message will be unmodified, +since it did not include the words "apple" or "orange". + +The input filter tools both read the input from the environment variable `INPUT`. +They write their modified input to stdout. +This variable is set by GPTScript before running the input filter tool. + +### Input Filter Real-World Example + +For a real-world example of an input filter tool, check out the [gptscript-ai/context/at-syntax](https://github.com/gptscript-ai/context/tree/main/at-syntax) tool. + +## Output Filter Example + +In this example, the tool is asked to write a poem about apples. +The output filter then replaces all references to apples with oranges. + +``` +Output Filter: applesToOranges + +Write a poem about apples. + +--- +Name: applesToOranges + +#!python3 + +import os + +replacements = { + "Apples": "Oranges", + "apples": "oranges", + "apple": "orange", + "Apple": "Orange", +} + +def applesToOranges(input: str) -> str: + for key, value in replacements.items(): + if input.startswith(key): + # This approach doesn't maintain whitespace, but it's good enough for this example + input = input.replace(key, value) + return input + +output: str = os.getenv("OUTPUT", "") +new_output: str = "" +for i in output.split(): + new_output += applesToOranges(i) + " " +print(new_output.strip()) +``` + +``` +OUTPUT: + +In orchards where the sunlight gleams, Among the leaves, in golden beams, The oranges hang on branches high, A feast for both the heart and eye. +Their skins, a palette rich and bright, In hues of red and green delight, With every bite, a crisp surprise, A taste of autumn, pure and wise. +From pies to cider, sweet and bold, Their stories through the seasons told, In every crunch, a memory, Of nature's gift, so wild and free. +Oh, oranges, treasures of the earth, In every form, you bring us mirth, A simple fruit, yet so profound, In you, a world of joy is found. +``` + +The output tool reads the output from the environment variable `OUTPUT`. +It can then modify the output as needed, and print the new output to stdout. + +Output filter tools can also access the following environment variables if needed: + +- `CHAT` (boolean): indicates whether the current script is being run in chat mode or not +- `CONTINUATION` (boolean): if `CHAT` is true, indicates whether the current chat will continue executing, or if this is the final message + +### Output Filter Real-World Example + +For a real-world example of an output filter tool, check out the [gptscript-ai/context/chat-summary](https://github.com/gptscript-ai/context/tree/main/chat-summary) tool. diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 0c485603..de29a97f 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -12,37 +12,38 @@ gptscript [flags] PROGRAM_FILE [INPUT...] ### Options ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) - --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) - -h, --help help for gptscript - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) - --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) - --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) - --ui Launch the UI ($GPTSCRIPT_UI) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) + --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) + -h, --help help for gptscript + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) + --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) + --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) + --ui Launch the UI ($GPTSCRIPT_UI) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO @@ -50,5 +51,6 @@ gptscript [flags] PROGRAM_FILE [INPUT...] * [gptscript credential](gptscript_credential.md) - List stored credentials * [gptscript eval](gptscript_eval.md) - * [gptscript fmt](gptscript_fmt.md) - +* [gptscript getenv](gptscript_getenv.md) - Looks up an environment variable for use in GPTScript tools * [gptscript parse](gptscript_parse.md) - diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 0fdd0249..ff9e6446 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -25,27 +25,28 @@ gptscript eval [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index 5780c838..7aceb957 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -19,27 +19,28 @@ gptscript fmt [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_getenv.md b/docs/docs/04-command-line-reference/gptscript_getenv.md new file mode 100644 index 00000000..80fea614 --- /dev/null +++ b/docs/docs/04-command-line-reference/gptscript_getenv.md @@ -0,0 +1,48 @@ +--- +title: "gptscript getenv" +--- +## gptscript getenv + +Looks up an environment variable for use in GPTScript tools + +``` +gptscript getenv [flags] KEY [DEFAULT] +``` + +### Options + +``` + -h, --help help for getenv +``` + +### Options inherited from parent commands + +``` + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) +``` + +### SEE ALSO + +* [gptscript](gptscript.md) - + diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index 680aebf6..3d84622b 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -19,27 +19,28 @@ gptscript parse [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/05-alternative-model-providers.md b/docs/docs/05-alternative-model-providers.md index aa637136..8e32ca21 100644 --- a/docs/docs/05-alternative-model-providers.md +++ b/docs/docs/05-alternative-model-providers.md @@ -1,41 +1,31 @@ # Supported Models and Platforms -## Usage +GPTScript can be used against alternative models that expose an OpenAI-compatible API or have a provider available. +Here is an example using Claude: -GPTScript can be used against alternative models that expose an OpenAI compatible API or have a provider shim available. - -### Using a model with an OpenAI compatible API - -```gptscript -model: mistral-large-latest from https://api.mistral.ai/v1 - -Say hello world -``` - -#### Note -Mistral's La Plateforme has an OpenAI compatible API, but the model does not behave identically to gpt-4. For that reason, we also have a provider for it that might get better results in some cases. - - -### Using a model that requires a provider ```gptscript model: claude-3-haiku-20240307 from github.com/gptscript-ai/claude3-anthropic-provider Say hello world ``` -### Authentication +A note on model compatibility: -For OpenAI compatible providers, GPTScript will look for an API key to be configured with the -prefix `GPTSCRIPT_PROVIDER_`, the base domain converted to environment variable format, and a suffix of `_API_KEY`. -As an example if you are using `mistral-large-latest from https://api.mistral.ai/v1`, the environment variable would -be `GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY` +:::important +While the providers allow GPTScript to work with other models, the effectiveness of using a +different model will depend on a combination of prompt engineering and the quality of the model. You may need to change +wording or add more description if you are not getting the results you want. In some cases, the model might not be +capable of intelligently handling the complex function calls. +::: -Each provider shim has different requirements for authentication. Please check the readme for the provider you are +## Authentication + +Each provider has different requirements for authentication. Please check the readme for the provider you are trying to use. ## Available Model Providers -The following shims are currently available: +The following providers are currently available: * [github.com/gptscript-ai/azure-openai-provider](https://github.com/gptscript-ai/azure-openai-provider) * [github.com/gptscript-ai/azure-other-provider](https://github.com/gptscript-ai/azure-other-provider) @@ -50,16 +40,40 @@ The following shims are currently available: For any provider that supports listing models, you can use this command: ```bash -# With a shim gptscript --list-models github.com/gptscript-ai/claude3-anthropic-provider +``` -# To OpenAI compatible endpoint -gptscript --list-models https://api.mistral.ai/v1 +## OpenAI-Compatible APIs (Advanced) + +:::warning +Even if a non-OpenAI service has an API that claims to be OpenAI-compatible, there are usually subtle differences that cause things to break. +The approach described in this section often does not work. +::: + +You can use a model from an OpenAI-compatible API like this: + +```gptscript +model: mistral-large-latest from https://api.mistral.ai/v1 + +Say hello world ``` -## Compatibility +:::note +Mistral's La Plateforme has an OpenAI-compatible API, but the model does not behave identically to gpt-4. +For that reason, we also have a provider for it that might get better results in some cases. +::: -While the shims provide support for using GPTScript with other models, the effectiveness of using a -different model will depend on a combination of prompt engineering and the quality of the model. You may need to change -wording or add more description if you are not getting the results you want. In some cases, the model might not be -capable of intelligently handling the complex function calls. +### Authentication + +For OpenAI-compatible providers, GPTScript will look for an API key to be configured with the +prefix `GPTSCRIPT_PROVIDER_`, the base domain converted to environment variable format, and a suffix of `_API_KEY`. +For example, if you are using `mistral-large-latest from https://api.mistral.ai/v1`, the environment variable would +be `GPTSCRIPT_PROVIDER_API_MISTRAL_AI_API_KEY`. + +### Listing available models + +You can list models from an OpenAI-compatible API like this: + +```bash +gptscript --list-models https://api.mistral.ai/v1 +``` diff --git a/docs/docs/02-credentials.md b/docs/docs/06-credentials.md similarity index 96% rename from docs/docs/02-credentials.md rename to docs/docs/06-credentials.md index 5ba349b9..1d3431de 100644 --- a/docs/docs/02-credentials.md +++ b/docs/docs/06-credentials.md @@ -18,7 +18,9 @@ The configuration file is located in the following location based on your operat - macOS: `$HOME/Library/Application Support/gptscript/config.json` - Linux: `$XDG_CONFIG_HOME/gptscript/config.json` -(Note: if you set the `XDG_CONFIG_HOME` environment variable on macOS, then the same path as Linux will be used.) +:::note +If you set the `XDG_CONFIG_HOME` environment variable on macOS, then the same path as Linux will be used. +::: The configured credential store will be automatically downloaded and compiled from the [gptscript-ai/gptscript-credential-helpers](https://github.com/gptscript-ai/gptscript-credential-helpers) repository, other than the `file` store, which is built-in to GPTScript itself. diff --git a/docs/docs/09-faqs.md b/docs/docs/09-faqs.md index 00f26700..20196011 100644 --- a/docs/docs/09-faqs.md +++ b/docs/docs/09-faqs.md @@ -2,14 +2,15 @@ ### I don't have Homebrew, how can I install GPTScript? -On MacOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` +On macOS and Linux, you can alternatively install via: `curl https://get.gptscript.ai/install.sh | sh` On all supported systems, you download and install the archive for your platform and architecture from the [releases page](https://github.com/gptscript-ai/gptscript/releases). - ### Does GPTScript have an SDK or API I can program against? -Currently, there are three SDKs being maintained: [Python](https://github.com/gptscript-ai/py-gptscript), [Node](https://github.com/gptscript-ai/node-gptscript), and [Go](https://github.com/gptscript-ai/go-gptscript). They are currently under development and are being iterated on relatively rapidly. The READMEs in each repository contain the most up-to-date documentation for the functionality of each. +Currently, there are three SDKs being maintained: [Python](https://github.com/gptscript-ai/py-gptscript), [Node](https://github.com/gptscript-ai/node-gptscript), and [Go](https://github.com/gptscript-ai/go-gptscript). +They are under development and are being iterated on relatively rapidly. +The READMEs in each repository contain the most up-to-date documentation for the functionality of each. ### I see there's a --disable-cache flag. How does caching working in GPTScript? @@ -17,47 +18,65 @@ GPTScript leverages caching to speed up execution and reduce LLM costs. There ar - Git commit hash lookups for tools - LLM responses -Caching is enabled for both of these by default. It can be disabled via the `--disable-cache` flag. Below is an explanation of how these areas behave when caching is enabled and disabled. +Caching is enabled for both of these by default. It can be disabled via the `--disable-cache` flag. +Below is an explanation of how these areas behave when caching is enabled and disabled. #### Git commit hash lookups for tools -When a remote tool or context is included in your script (like so: `Tools: github.com/gptscript-ai/browser`) and then invoked during script execution, GPTScript will pull the Git repo for that tool and build it. The tool’s repo and build will be stored in your system’s cache directory (at [$XDG_CACHE_HOME](https://pkg.go.dev/os#UserCacheDir)/gptscript/repos). Subsequent invocations of the tool leverage that cache. When the cache is enabled, GPTScript will only check for a newer version of the tool once an hour; if an hour hasn’t passed since the last check, it will just use the one it has. If this is the first invocation and the tool doesn’t yet exist in the cache, it will be pulled and built as normal. +When a remote tool or context is included in your script (like so: `Tools: github.com/gptscript-ai/browser`) and then invoked during script execution, +GPTScript will pull the Git repo for that tool and build it. +The tool's repo and build will be stored in your system's cache directory (at [$XDG_CACHE_HOME](https://pkg.go.dev/os#UserCacheDir)/gptscript/repos). +Subsequent invocations of the tool leverage that cache. +When the cache is enabled, GPTScript will only check for a newer version of the tool once an hour; +if an hour hasn't passed since the last check, it will just use the one it has. +If this is the first invocation and the tool doesn't yet exist in the cache, it will be pulled and built as normal. -When the cache is disabled, GPTScript will check that it has the latest version of the tool (meaning the latest git commit for the repo) on every single invocation of the tool. If GPTScript determines it already has the latest version, that build will be used as-is. In other words, disabling the cache DOES NOT force GPTScript to rebuild the tool, it only forces GPTScript to always check if it has the latest version. +When the cache is disabled, GPTScript will check that it has the latest version of the tool (meaning the latest git commit for the repo) on every single invocation of the tool. +If GPTScript determines it already has the latest version, that build will be used as-is. +In other words, disabling the cache DOES NOT force GPTScript to rebuild the tool, it only forces GPTScript to always check if it has the latest version. #### LLM responses -With regards to LLM responses, when the cache is enabled GPTScript will cache the LLM’s response to a chat completion request. Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. +In regard to LLM responses, when the cache is enabled, GPTScript will cache the LLM's response to a chat completion request. +Each response is stored as a gob-encoded file in $XDG_CACHE_HOME/gptscript, where the file name is a hash of the chat completion request. -It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. This means that every message between user and LLM affects the cache lookup. So, when using GPTScript in chat mode, it is very unlikely you’ll receive a cached LLM response. Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. +It is important to note that all [messages in chat completion request](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) are used to generate the hash that is used as the file name. +This means that every message between user and LLM affects the cache lookup. +So, when using GPTScript in chat mode, it is very unlikely you'll receive a cached LLM response. +Conversely, non-chat GPTScript automations are much more likely to be consistent and thus make use of cached LLM responses. ### I see there's a --workspace flag. How do I make use of that? -Every invocation of GPTScript has a workspace directory available to it. By default, this directory is a one-off temp directory, but you can override this and explicitly set a workspace using the `--workspace` flag, like so: +Every invocation of GPTScript has a workspace directory available to it. +By default, this directory is a one-off temp directory, but you can override this and explicitly set a workspace using the `--workspace` flag, like so: + ``` gptscript --workspace . my-script.gpt ``` -In the above example, the user’s current directory (denoted by `.`) will be set as the workspace. Both absolute and relative paths are supported. -Regardless of whether it is set implicitly or explicitly, the workspace is then made available to the script execution as the `GPTSCRIPT_WORKSPACE_DIR` environment variable. +For more info, see the [Workspace](03-tools/08-workspace.md) page. -:::info -GPTScript does not force scripts or tools to write to, read from, or otherwise use the workspace. The tools must decide to make use of the workspace environment variable. -::: +### I'm hitting GitHub's rate limit for unauthenticated requests when using GPTScript. -To make prompt-based tools workspace aware, you can add our workspace context, like so: -``` -Context: github.com/gptscript-ai/context/workspace +By default, GPTScript makes unauthenticated requests to GitHub when pulling tools. +Since GitHub's rate limits for unauthenticated requests are fairly low, running into them when developing with GPTScript is a common issue. +To avoid this, you can get GPTScript to make authenticated requests -- which have higher rate limits -- by setting the `GITHUB_AUTH_TOKEN` environment variable to your github account's PAT (Personal Access Token). +If you're already authenticated with the `gh` CLI, you can use its token by running: + +```bash +export GITHUB_AUTH_TOKEN="$(gh auth token)" ``` -This tells the LLM (by way of a [system message](https://platform.openai.com/docs/guides/text-generation/chat-completions-api)) what the workspace directory is, what its initial contents are, and that if it decides to create a file or directory, it should do so in the workspace directory. This will not, however, have any impact on code-based tools (ie python, bash, or go tools). Such tools will have the `GPTSCRIPT_WORKSPACE_DIR` environment variable available to them, but they must be written in such a way that they make use of it. -This context also automatically shares the `sys.ls`, `sys.read`, and `sys.write` tools with the tool that is using it as a context. This is because if a tool intends to interact with the workspace, it minimally needs these tools. +### Can I save my chat and resume it later? -### I'm hitting GitHub's rate limit for unauthenticated requests when using GPTScript. +Yes! When you run GPTScript, be sure to specify the `--save-chat-state-file` argument like this: -By default, GPTScript makes unauthenticated requests to GitHub when pulling tools. Since GitHub's rate limits for unauthenticated requests are fairly low, running into them when developing with GPTScript is a common issue. To avoid this, you can get GPTScript to make authenticated requests -- which have higher rate limits -- by setting the `GITHUB_AUTH_TOKEN` environment variable to your github account's PAT (Personal Access Token). -If you're already authenticated with the `gh` CLI, you can use its token by running: +```bash +gptscript --save-chat-state-file chat-state.json my-script.gpt +``` + +Then, when you want to resume your chat, you can use the `--chat-state` argument to specify the file you saved: ```bash -export GITHUB_AUTH_TOKEN="$(gh auth token)" +gptscript --chat-state chat-state.json my-script.gpt ``` diff --git a/examples/gptreview-jenkins/Jenkinsfile b/examples/gptreview-jenkins/Jenkinsfile new file mode 100644 index 00000000..b65780d7 --- /dev/null +++ b/examples/gptreview-jenkins/Jenkinsfile @@ -0,0 +1,56 @@ +pipeline { + agent any + + stages { + stage('Clean Workspace') { + steps { + deleteDir() + } + } + + stage('GPT Review') { + steps { + script { + checkout([ + $class: 'GitSCM', + branches: [[name: '*/main']], // Specify branch + userRemoteConfigs: [[ + url: '' // Provide the URL for your repo that has the codereview.gpt file. + ]] + ]) + + withCredentials([string(credentialsId: 'OPENAI_API_KEY', variable: 'OPENAI_API_KEY')]){ + withCredentials([string(credentialsId: 'GH_TOKEN', variable: 'GH_TOKEN')]) { + // GPTSCript reviews the code + REVIEW = sh(script: "gptscript codereview.gpt --PR_URL=${PR_URL}", returnStdout: true).trim() + + // Construct the JSON payload using Groovy's JSON library + def jsonPayload = groovy.json.JsonOutput.toJson([body: REVIEW]) + + // Post the review comment to the GitHub PR + sh "curl -H \"Authorization: token ${GH_TOKEN}\" -H \"Content-Type: application/json\" -X POST -d '${jsonPayload}' '${PR_COMMENTS_URL}'" + } + } + } + } + } + + stage('Check PR Status') { + steps { + script { + // Check if REVIEW contains 'Require Changes' + if (REVIEW.contains('Require Changes')) { + echo 'Code Requires Changes' + currentBuild.result = 'FAILURE' // Mark the build as failed + error 'Code Requires Changes' // Terminate the build with an error + } + + // Check if REVIEW contains 'Approved' + if (REVIEW.contains('Approved')) { + echo 'Code Approved' + } + } + } + } + } +} \ No newline at end of file diff --git a/examples/gptreview-jenkins/README.md b/examples/gptreview-jenkins/README.md new file mode 100644 index 00000000..60fdb663 --- /dev/null +++ b/examples/gptreview-jenkins/README.md @@ -0,0 +1,31 @@ +# GPTReview With Jenkins + +This folder contains an example of building and implementing your own code reviewer as part of Jenkins Pipeline. + +Below are the files present here: + +- `codereview.gpt`: Contains the GPTScript code and prompts. +- `Jenkinsfile`: Jenkins pipeline file. + +## Pre-requisites + +- An OpenAI API Key. +- GitHub repository. +- Jenkins. +- [GPTScript](https://github.com/gptscript-ai/gptscript) and [GH](https://github.com/cli/cli) CLI installed on the system running Jenkins. + +## How To Run This Example + +- Create a new repository in your GitHub account and create a `codereview.gpt` file in the root of that repo based on the contents provided in this file. +- Configure Jenkins: + - Install required plugins - [GitHub](https://plugins.jenkins.io/github/), [Generic Webhook Trigger Plugin](https://plugins.jenkins.io/generic-webhook-trigger/) & [HTTP Request Plugin](https://plugins.jenkins.io/http_request/). + - Create a Pipeline + - Configure the “Open_AI_API” and “GH_TOKEN” environment variables + +- Congfigure GitHub: + - Setup up Webhook by providing your Jenkins pipeline URL: `http:///generic-webhook-trigger/invoke?token=` + - Add `Jenkinsfile` in the root of the repo. *Note: Replace the repository URL with your repo URL in the Jenkinsfile provided.* + +- Executing the Script: + - Create a new branch, and add some code file to the repository and open a new pull request. + - The Jenkins pipeline will trigger and our GPTReview will review your code and provide review comments. diff --git a/examples/gptreview-jenkins/codereview.gpt b/examples/gptreview-jenkins/codereview.gpt new file mode 100644 index 00000000..f2502b50 --- /dev/null +++ b/examples/gptreview-jenkins/codereview.gpt @@ -0,0 +1,26 @@ +Name: Code Reviewer +Description: A tool to help you perform code review of open PRs +Context: learn-gh +Tools: sys.exec, sys.http.html2text?, sys.find, sys.read, sys.write +Args: PR_URL: The GitHub PR_URL + +You have the gh cli available to you. Use it to perform code review for a pr from the $(repo) provided. + +Perform the following steps in order: +1. Identify the files changed in the pull request ($PR_URL) using the pr number and perform a diff. + 1. Analyze the complete code of each identified file and perform a detailed line by line code review. + 2. Repeat the process for each changed file in the pr. +2. Share your review comments separately for each file. +3. In a new line write "Code: Approved" or "Code: Require Changes" based on the review comments. +--- +Name: learn-gh +Description: A tool to help you learn gh cli + +#!/usr/bin/env bash + +echo "The following is the help text for the gh cli and some of its sub-commands. Use these when figuring out how to construct new commands. Note that the --search flag is used for filtering and sorting as well; there is no dedicate --sort flag." +gh --help +gh repo --help +gh pr --help +gh pr checkout --help +gh pr diff --help diff --git a/go.mod b/go.mod index 9feaef4f..783e638b 100644 --- a/go.mod +++ b/go.mod @@ -16,9 +16,9 @@ require ( github.com/google/uuid v1.6.0 github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 - github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d - github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6 - github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1 + github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb + github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 + github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e github.com/hexops/autogold/v2 v2.2.1 github.com/hexops/valast v1.4.4 github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 @@ -30,6 +30,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 github.com/tidwall/gjson v1.17.1 + github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc golang.org/x/sync v0.7.0 golang.org/x/term v0.20.0 @@ -101,6 +102,8 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/ulikunitz/xz v0.5.10 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yuin/goldmark v1.5.4 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect diff --git a/go.sum b/go.sum index 9c288064..fd165d3f 100644 --- a/go.sum +++ b/go.sum @@ -169,12 +169,12 @@ github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86 h1:m9yLtI github.com/gptscript-ai/broadcaster v0.0.0-20240625175512-c43682019b86/go.mod h1:lK3K5EZx4dyT24UG3yCt0wmspkYqrj4D/8kxdN3relk= github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379 h1:vYnXoIyCXzaCEw0sYifQ4bDpsv3/fO/dZ2suEsTwCIo= github.com/gptscript-ai/chat-completion-client v0.0.0-20240531200700-af8e7ecf0379/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= -github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d h1:sKf7T7twhGXs6AVbvD9pKDVewykkwSAPwEpmIEQIR/4= -github.com/gptscript-ai/cmd v0.0.0-20240625175447-4250b42feb7d/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6 h1:hF9Q8KdQhuoXSGKVh4ywRvwn5RJt9rbPraigpXqbGYU= -github.com/gptscript-ai/go-gptscript v0.9.3-0.20240728044543-20d868b5baa6/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= -github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1 h1:gJXswjjwoiWdOS+s73mliWbN9dyJpiUkb3T+EiV7EFc= -github.com/gptscript-ai/tui v0.0.0-20240728045051-19ba83cd96c1/go.mod h1:Llh3vi87gyry6j/sgJxhkHHvgv9uQRzEiMWuQtmpW1w= +github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb h1:ky2J2CzBOskC7Jgm2VJAQi2x3p7FVGa+2/PcywkFJuc= +github.com/gptscript-ai/cmd v0.0.0-20240802230653-326b7baf6fcb/go.mod h1:DJAo1xTht1LDkNYFNydVjTHd576TC7MlpsVRl3oloVw= +github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17 h1:BTfJ6ls31Roq42lznlZnuPzRf0wrT8jT+tWcvq7wDXY= +github.com/gptscript-ai/go-gptscript v0.9.4-0.20240801203434-840b14393b17/go.mod h1:Dh6vYRAiVcyC3ElZIGzTvNF1FxtYwA07BHfSiFKQY7s= +github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e h1:OO/b8gGQi3jIpDoII+jf7fc4ssqOZdFcb9zB+QjsxRQ= +github.com/gptscript-ai/tui v0.0.0-20240804004233-efc5673dc76e/go.mod h1:KGtCo7cjH6qR6Wp6AyI1dL1R8bln8wVpdDEoopRUckY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -317,6 +317,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -336,6 +337,12 @@ github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95 github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= diff --git a/pkg/cli/getenv.go b/pkg/cli/getenv.go new file mode 100644 index 00000000..6d81944d --- /dev/null +++ b/pkg/cli/getenv.go @@ -0,0 +1,60 @@ +package cli + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/cobra" +) + +type Getenv struct { +} + +func (e *Getenv) Customize(cmd *cobra.Command) { + cmd.Use = "getenv [flags] KEY [DEFAULT]" + cmd.Short = "Looks up an environment variable for use in GPTScript tools" + cmd.Args = cobra.RangeArgs(1, 2) +} + +func (e *Getenv) Run(_ *cobra.Command, args []string) error { + var ( + key = args[0] + def string + ) + if len(args) > 1 { + def = args[1] + } + value := getEnv(key, def) + fmt.Print(value) + return nil +} + +func getEnv(key, def string) string { + v := os.Getenv(key) + if v == "" { + return def + } + + if strings.HasPrefix(v, `{"_gz":"`) && strings.HasSuffix(v, `"}`) { + data, err := base64.StdEncoding.DecodeString(v[8 : len(v)-2]) + if err != nil { + return v + } + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return v + } + strBytes, err := io.ReadAll(gz) + if err != nil { + return v + } + return string(strBytes) + } + + return v +} diff --git a/pkg/cli/getenv_test.go b/pkg/cli/getenv_test.go new file mode 100644 index 00000000..8cc9e05f --- /dev/null +++ b/pkg/cli/getenv_test.go @@ -0,0 +1,57 @@ +package cli + +import ( + "os" + "testing" +) + +func TestGetEnv(t *testing.T) { + // Cleaning up + defer func(currentEnvValue string) { + os.Setenv("testKey", currentEnvValue) + }(os.Getenv("testKey")) + + // Tests + testCases := []struct { + name string + key string + def string + envValue string + expectedResult string + }{ + { + name: "NoValueUseDefault", + key: "testKey", + def: "defaultValue", + envValue: "", + expectedResult: "defaultValue", + }, + { + name: "ValueExistsNoCompress", + key: "testKey", + def: "defaultValue", + envValue: "testValue", + expectedResult: "testValue", + }, + { + name: "ValueExistsCompressed", + key: "testKey", + def: "defaultValue", + envValue: `{"_gz":"H4sIAEosrGYC/ytJLS5RKEvMKU0FACtB3ewKAAAA"}`, + + expectedResult: "test value", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + os.Setenv(test.key, test.envValue) + + result := getEnv(test.key, test.def) + + if result != test.expectedResult { + t.Errorf("expected: %s, got: %s", test.expectedResult, result) + } + }) + } +} diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index c22a25d2..4458d87b 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -54,24 +54,25 @@ type GPTScript struct { Output string `usage:"Save output to a file, or - for stdout" short:"o"` EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` - SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` - Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` - ListModels bool `usage:"List the models available and exit" local:"true"` - ListTools bool `usage:"List built-in tools and exit" local:"true"` - ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` - Chdir string `usage:"Change current working directory" short:"C"` - Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` - Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` - CredentialContext string `usage:"Context name in which to store credentials" default:"default"` - CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` - ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` - ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` - ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` - Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` - UI bool `usage:"Launch the UI" local:"true" name:"ui"` - DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` - SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` + SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` + Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` + ListModels bool `usage:"List the models available and exit" local:"true"` + ListTools bool `usage:"List built-in tools and exit" local:"true"` + ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` + Chdir string `usage:"Change current working directory" short:"C"` + Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` + Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` + CredentialContext string `usage:"Context name in which to store credentials" default:"default"` + CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` + ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` + ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` + ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` + Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` + UI bool `usage:"Launch the UI" local:"true" name:"ui"` + DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` + SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"` readData []byte } @@ -84,6 +85,7 @@ func New() *cobra.Command { &Credential{root: root}, &Parse{}, &Fmt{}, + &Getenv{}, &SDKServer{ GPTScript: root, }, @@ -136,11 +138,12 @@ func (r *GPTScript) NewGPTScriptOpts() (gptscript.Options, error) { CredentialOverrides: r.CredentialOverride, Sequential: r.ForceSequential, }, - Quiet: r.Quiet, - Env: os.Environ(), - CredentialContext: r.CredentialContext, - Workspace: r.Workspace, - DisablePromptServer: r.UI, + Quiet: r.Quiet, + Env: os.Environ(), + CredentialContext: r.CredentialContext, + Workspace: r.Workspace, + DisablePromptServer: r.UI, + DefaultModelProvider: r.DefaultModelProvider, } if r.Confirm { @@ -466,9 +469,10 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) { // Don't use cmd.Context() because then sigint will cancel everything return tui.Run(context.Background(), args[0], tui.RunOptions{ ClientOpts: &gptscript2.GlobalOptions{ - OpenAIAPIKey: r.OpenAIOptions.APIKey, - OpenAIBaseURL: r.OpenAIOptions.BaseURL, - DefaultModel: r.DefaultModel, + OpenAIAPIKey: r.OpenAIOptions.APIKey, + OpenAIBaseURL: r.OpenAIOptions.BaseURL, + DefaultModel: r.DefaultModel, + DefaultModelProvider: r.DefaultModelProvider, }, TrustedRepoPrefixes: []string{"github.com/gptscript-ai"}, DisableCache: r.DisableCache, diff --git a/pkg/cli/main.go b/pkg/cli/main.go index d06f614f..b607281b 100644 --- a/pkg/cli/main.go +++ b/pkg/cli/main.go @@ -1,7 +1,9 @@ package cli import ( + "context" "os" + "os/signal" "github.com/gptscript-ai/cmd" "github.com/gptscript-ai/gptscript/pkg/daemon" @@ -18,5 +20,7 @@ func Main() { } os.Exit(0) } - cmd.Main(New()) + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + cmd.MainCtx(ctx, New()) } diff --git a/pkg/engine/cmd.go b/pkg/engine/cmd.go index 9e4b94fc..d62aad2e 100644 --- a/pkg/engine/cmd.go +++ b/pkg/engine/cmd.go @@ -2,7 +2,9 @@ package engine import ( "bytes" + "compress/gzip" "context" + "encoding/base64" "encoding/json" "fmt" "io" @@ -44,6 +46,25 @@ func (o *outputWriter) Write(p []byte) (n int, err error) { return len(p), nil } +func compressEnv(envs []string) (result []string) { + for _, env := range envs { + k, v, ok := strings.Cut(env, "=") + if !ok || len(v) < 40_000 { + result = append(result, env) + continue + } + + out := bytes.NewBuffer(nil) + b64 := base64.NewEncoder(base64.StdEncoding, out) + gz := gzip.NewWriter(b64) + _, _ = gz.Write([]byte(v)) + _ = gz.Close() + _ = b64.Close() + result = append(result, k+`={"_gz":"`+out.String()+`"}`) + } + return +} + func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCategory ToolCategory) (cmdOut string, cmdErr error) { id := counter.Next() @@ -95,10 +116,10 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate for _, inputContext := range ctx.InputContext { instructions = append(instructions, inputContext.Content) } + var extraEnv = []string{ strings.TrimSpace("GPTSCRIPT_CONTEXT=" + strings.Join(instructions, "\n")), } - cmd, stop, err := e.newCommand(ctx.Ctx, extraEnv, tool, input) if err != nil { return "", err @@ -113,24 +134,34 @@ func (e *Engine) runCommand(ctx Context, tool types.Tool, input string, toolCate }, } - output := &bytes.Buffer{} - all := &bytes.Buffer{} - cmd.Stderr = io.MultiWriter(all, os.Stderr) - cmd.Stdout = io.MultiWriter(all, output, &outputWriter{ - id: id, - progress: e.Progress, - }) + var ( + stdout = &bytes.Buffer{} + stdoutAndErr = &bytes.Buffer{} + progressOut = &outputWriter{ + id: id, + progress: e.Progress, + } + result *bytes.Buffer + ) + + cmd.Stdout = io.MultiWriter(stdout, stdoutAndErr, progressOut) + if toolCategory == NoCategory || toolCategory == ContextToolCategory { + cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut) + result = stdoutAndErr + } else { + cmd.Stderr = io.MultiWriter(stdoutAndErr, progressOut, os.Stderr) + result = stdout + } if err := cmd.Run(); err != nil { if toolCategory == NoCategory { - return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, all), nil + return fmt.Sprintf("ERROR: got (%v) while running tool, OUTPUT: %s", err, stdoutAndErr), nil } - _, _ = os.Stderr.Write(output.Bytes()) log.Errorf("failed to run tool [%s] cmd %v: %v", tool.Parameters.Name, cmd.Args, err) - return "", fmt.Errorf("ERROR: %s: %w", all, err) + return "", fmt.Errorf("ERROR: %s: %w", result, err) } - return output.String(), IsChatFinishMessage(output.String()) + return result.String(), IsChatFinishMessage(result.String()) } func (e *Engine) getRuntimeEnv(ctx context.Context, tool types.Tool, cmd, env []string) ([]string, error) { @@ -277,6 +308,6 @@ func (e *Engine) newCommand(ctx context.Context, extraEnv []string, tool types.T } cmd := exec.CommandContext(ctx, env.Lookup(envvars, args[0]), cmdArgs...) - cmd.Env = envvars + cmd.Env = compressEnv(envvars) return cmd, stop, nil } diff --git a/pkg/engine/openapi.go b/pkg/engine/openapi.go index 2e338ca4..0bd5f599 100644 --- a/pkg/engine/openapi.go +++ b/pkg/engine/openapi.go @@ -8,83 +8,148 @@ import ( "mime/multipart" "net/http" "net/url" + "os" "strings" "github.com/gptscript-ai/gptscript/pkg/env" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/types" "github.com/tidwall/gjson" - "golang.org/x/exp/maps" ) -var ( - SupportedMIMETypes = []string{"application/json", "text/plain", "multipart/form-data"} - SupportedSecurityTypes = []string{"apiKey", "http"} -) +func (e *Engine) runOpenAPIRevamp(tool types.Tool, input string) (*Return, error) { + envMap := make(map[string]string, len(e.Env)) + for _, env := range e.Env { + k, v, _ := strings.Cut(env, "=") + envMap[k] = v + } -type Parameter struct { - Name string `json:"name"` - Style string `json:"style"` - Explode *bool `json:"explode"` -} + _, inst, _ := strings.Cut(tool.Instructions, types.OpenAPIPrefix+" ") + args := strings.Fields(inst) -// A SecurityInfo represents a security scheme in OpenAPI. -type SecurityInfo struct { - Name string `json:"name"` // name as defined in the security schemes - Type string `json:"type"` // http or apiKey - Scheme string `json:"scheme"` // bearer or basic, for type==http - APIKeyName string `json:"apiKeyName"` // name of the API key, for type==apiKey - In string `json:"in"` // header, query, or cookie, for type==apiKey -} + if len(args) != 3 { + return nil, fmt.Errorf("expected 3 arguments to %s", types.OpenAPIPrefix) + } -func (i SecurityInfo) GetCredentialToolStrings(hostname string) []string { - vars := i.getCredentialNamesAndEnvVars(hostname) - var tools []string - - for cred, v := range vars { - field := "value" - switch i.Type { - case "apiKey": - field = i.APIKeyName - case "http": - if i.Scheme == "bearer" { - field = "bearer token" - } else { - if strings.Contains(v, "PASSWORD") { - field = "password" - } else { - field = "username" - } + command := args[0] + source := args[1] + filter := args[2] + + var res *Return + switch command { + case openapi.ListTool: + t, err := openapi.Load(source) + if err != nil { + return nil, fmt.Errorf("failed to load OpenAPI file %s: %w", source, err) + } + + opList, err := openapi.List(t, filter) + if err != nil { + return nil, fmt.Errorf("failed to list operations: %w", err) + } + + opListJSON, err := json.MarshalIndent(opList, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal operation list: %w", err) + } + + res = &Return{ + Result: ptr(string(opListJSON)), + } + case openapi.GetSchemaTool: + operation := gjson.Get(input, "operation").String() + + if filter != "" && filter != openapi.NoFilter { + match, err := openapi.MatchFilters(strings.Split(filter, "|"), operation) + if err != nil { + return nil, err + } else if !match { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil } } - tools = append(tools, fmt.Sprintf("github.com/gptscript-ai/credential as %s with %s as env and %q as message and %q as field", - cred, v, "Please provide a value for the "+v+" environment variable", field)) - } - return tools -} + t, err := openapi.Load(source) + if err != nil { + return nil, fmt.Errorf("failed to load OpenAPI file %s: %w", source, err) + } -func (i SecurityInfo) getCredentialNamesAndEnvVars(hostname string) map[string]string { - if i.Type == "http" && i.Scheme == "basic" { - return map[string]string{ - hostname + i.Name + "Username": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_USERNAME", - hostname + i.Name + "Password": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_PASSWORD", + var defaultHost string + if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + u, err := url.Parse(source) + if err != nil { + return nil, fmt.Errorf("failed to parse server URL %s: %w", source, err) + } + defaultHost = u.Scheme + "://" + u.Hostname() + } + + schema, _, found, err := openapi.GetSchema(operation, defaultHost, t) + if err != nil { + return nil, fmt.Errorf("failed to get schema: %w", err) + } + if !found { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil + } + + schemaJSON, err := json.MarshalIndent(schema, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal schema: %w", err) + } + + res = &Return{ + Result: ptr(string(schemaJSON)), + } + case openapi.RunTool: + operation := gjson.Get(input, "operation").String() + args := gjson.Get(input, "args").String() + + if filter != "" && filter != openapi.NoFilter { + match, err := openapi.MatchFilters(strings.Split(filter, "|"), operation) + if err != nil { + return nil, err + } else if !match { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil + } + } + + t, err := openapi.Load(source) + if err != nil { + return nil, fmt.Errorf("failed to load OpenAPI file %s: %w", source, err) + } + + var defaultHost string + if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + u, err := url.Parse(source) + if err != nil { + return nil, fmt.Errorf("failed to parse server URL %s: %w", source, err) + } + defaultHost = u.Scheme + "://" + u.Hostname() + } + + result, found, err := openapi.Run(operation, defaultHost, args, t, e.Env) + if err != nil { + return nil, fmt.Errorf("failed to run operation %s: %w", operation, err) + } else if !found { + // Report to the LLM that the operation was not found + return &Return{ + Result: ptr(fmt.Sprintf("operation %s not found", operation)), + }, nil + } + + res = &Return{ + Result: ptr(result), } } - return map[string]string{ - hostname + i.Name: "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name), - } -} -type OpenAPIInstructions struct { - Server string `json:"server"` - Path string `json:"path"` - Method string `json:"method"` - BodyContentMIME string `json:"bodyContentMIME"` - SecurityInfos [][]SecurityInfo `json:"apiKeyInfos"` - QueryParameters []Parameter `json:"queryParameters"` - PathParameters []Parameter `json:"pathParameters"` - HeaderParameters []Parameter `json:"headerParameters"` - CookieParameters []Parameter `json:"cookieParameters"` + return res, nil } // runOpenAPI runs a tool that was generated from an OpenAPI definition. @@ -92,6 +157,10 @@ type OpenAPIInstructions struct { // The tools Instructions field will be in the format "#!sys.openapi '{Instructions JSON}'", // where {Instructions JSON} is a JSON string of type OpenAPIInstructions. func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { + if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { + return e.runOpenAPIRevamp(tool, input) + } + envMap := map[string]string{} for _, env := range e.Env { @@ -100,7 +169,7 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { } // Extract the instructions from the tool to determine server, path, method, etc. - var instructions OpenAPIInstructions + var instructions openapi.OperationInfo _, inst, _ := strings.Cut(tool.Instructions, types.OpenAPIPrefix+" ") inst = strings.TrimPrefix(inst, "'") inst = strings.TrimSuffix(inst, "'") @@ -109,7 +178,7 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { } // Handle path parameters - instructions.Path = handlePathParameters(instructions.Path, instructions.PathParameters, input) + instructions.Path = openapi.HandlePathParameters(instructions.Path, instructions.PathParams, input) // Parse the URL path, err := url.JoinPath(instructions.Server, instructions.Path) @@ -131,7 +200,7 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { // Check for authentication (only if using HTTPS or localhost) if u.Scheme == "https" || u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" { if len(instructions.SecurityInfos) > 0 { - if err := handleAuths(req, envMap, instructions.SecurityInfos); err != nil { + if err := openapi.HandleAuths(req, envMap, instructions.SecurityInfos); err != nil { return nil, fmt.Errorf("error setting up authentication: %w", err) } } @@ -145,11 +214,11 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { } // Handle query parameters - req.URL.RawQuery = handleQueryParameters(req.URL.Query(), instructions.QueryParameters, input).Encode() + req.URL.RawQuery = openapi.HandleQueryParameters(req.URL.Query(), instructions.QueryParams, input).Encode() // Handle header and cookie parameters - handleHeaderParameters(req, instructions.HeaderParameters, input) - handleCookieParameters(req, instructions.CookieParameters, input) + openapi.HandleHeaderParameters(req, instructions.HeaderParams, input) + openapi.HandleCookieParameters(req, instructions.CookieParams, input) // Handle request body if instructions.BodyContentMIME != "" { @@ -217,299 +286,6 @@ func (e *Engine) runOpenAPI(tool types.Tool, input string) (*Return, error) { }, nil } -// handleAuths will set up the request with the necessary authentication information. -// A set of sets of SecurityInfo is passed in, where each represents a possible set of security options. -func handleAuths(req *http.Request, envMap map[string]string, infoSets [][]SecurityInfo) error { - var missingVariables [][]string - - // We need to find a set of infos where we have all the needed environment variables. - for _, infoSet := range infoSets { - var missing []string // Keep track of any missing environment variables - for _, info := range infoSet { - vars := info.getCredentialNamesAndEnvVars(req.URL.Hostname()) - - for _, envName := range vars { - if _, ok := envMap[envName]; !ok { - missing = append(missing, envName) - } - } - } - if len(missing) > 0 { - missingVariables = append(missingVariables, missing) - continue - } - - // We're using this info set, because no environment variables were missing. - // Set up the request as needed. - for _, info := range infoSet { - envNames := maps.Values(info.getCredentialNamesAndEnvVars(req.URL.Hostname())) - switch info.Type { - case "apiKey": - switch info.In { - case "header": - req.Header.Set(info.APIKeyName, envMap[envNames[0]]) - case "query": - v := url.Values{} - v.Add(info.APIKeyName, envMap[envNames[0]]) - req.URL.RawQuery = v.Encode() - case "cookie": - req.AddCookie(&http.Cookie{ - Name: info.APIKeyName, - Value: envMap[envNames[0]], - }) - } - case "http": - switch info.Scheme { - case "bearer": - req.Header.Set("Authorization", "Bearer "+envMap[envNames[0]]) - case "basic": - req.SetBasicAuth(envMap[envNames[0]], envMap[envNames[1]]) - } - } - } - return nil - } - - return fmt.Errorf("did not find the needed environment variables for any of the security options. "+ - "At least one of these sets of environment variables must be provided: %v", missingVariables) -} - -// handleQueryParameters extracts each query parameter from the input JSON and adds it to the URL query. -func handleQueryParameters(q url.Values, params []Parameter, input string) url.Values { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - // If it's an array or object, handle the serialization style - if res.IsArray() { - switch param.Style { - case "form", "": // form is the default style for query parameters - if param.Explode == nil || *param.Explode { // default is to explode - for _, item := range res.Array() { - q.Add(param.Name, item.String()) - } - } else { - var strs []string - for _, item := range res.Array() { - strs = append(strs, item.String()) - } - q.Add(param.Name, strings.Join(strs, ",")) - } - case "spaceDelimited": - if param.Explode == nil || *param.Explode { - for _, item := range res.Array() { - q.Add(param.Name, item.String()) - } - } else { - var strs []string - for _, item := range res.Array() { - strs = append(strs, item.String()) - } - q.Add(param.Name, strings.Join(strs, " ")) - } - case "pipeDelimited": - if param.Explode == nil || *param.Explode { - for _, item := range res.Array() { - q.Add(param.Name, item.String()) - } - } else { - var strs []string - for _, item := range res.Array() { - strs = append(strs, item.String()) - } - q.Add(param.Name, strings.Join(strs, "|")) - } - } - } else if res.IsObject() { - switch param.Style { - case "form", "": // form is the default style for query parameters - if param.Explode == nil || *param.Explode { // default is to explode - for k, v := range res.Map() { - q.Add(k, v.String()) - } - } else { - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - q.Add(param.Name, strings.Join(strs, ",")) - } - case "deepObject": - for k, v := range res.Map() { - q.Add(param.Name+"["+k+"]", v.String()) - } - } - } else { - q.Add(param.Name, res.String()) - } - } - } - return q -} - -// handlePathParameters extracts each path parameter from the input JSON and replaces its placeholder in the URL path. -func handlePathParameters(path string, params []Parameter, input string) string { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - // If it's an array or object, handle the serialization style - if res.IsArray() { - switch param.Style { - case "simple", "": // simple is the default style for path parameters - // simple looks the same regardless of whether explode is true - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) - case "label": - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - - if param.Explode == nil || !*param.Explode { // default is to not explode - path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) - } else { - path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, "."), 1) - } - case "matrix": - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - - if param.Explode == nil || !*param.Explode { // default is to not explode - path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) - } else { - s := "" - for _, str := range strs { - s += ";" + param.Name + "=" + str - } - path = strings.Replace(path, "{"+param.Name+"}", s, 1) - } - } - } else if res.IsObject() { - switch param.Style { - case "simple", "": - if param.Explode == nil || !*param.Explode { // default is to not explode - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) - } else { - var strs []string - for k, v := range res.Map() { - strs = append(strs, k+"="+v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) - } - case "label": - if param.Explode == nil || !*param.Explode { // default is to not explode - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) - } else { - s := "" - for k, v := range res.Map() { - s += "." + k + "=" + v.String() - } - path = strings.Replace(path, "{"+param.Name+"}", s, 1) - } - case "matrix": - if param.Explode == nil || !*param.Explode { // default is to not explode - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) - } else { - s := "" - for k, v := range res.Map() { - s += ";" + k + "=" + v.String() - } - path = strings.Replace(path, "{"+param.Name+"}", s, 1) - } - } - } else { - // Serialization is handled slightly differently even for basic types. - // Explode doesn't do anything though. - switch param.Style { - case "simple", "": - path = strings.Replace(path, "{"+param.Name+"}", res.String(), 1) - case "label": - path = strings.Replace(path, "{"+param.Name+"}", "."+res.String(), 1) - case "matrix": - path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+res.String(), 1) - } - } - } - } - return path -} - -// handleHeaderParameters extracts each header parameter from the input JSON and adds it to the request headers. -func handleHeaderParameters(req *http.Request, params []Parameter, input string) { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - if res.IsArray() { - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - req.Header.Add(param.Name, strings.Join(strs, ",")) - } else if res.IsObject() { - // Handle explosion - var strs []string - if param.Explode == nil || !*param.Explode { // default is to not explode - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - } else { - for k, v := range res.Map() { - strs = append(strs, k+"="+v.String()) - } - } - req.Header.Add(param.Name, strings.Join(strs, ",")) - } else { // basic type - req.Header.Add(param.Name, res.String()) - } - } - } -} - -// handleCookieParameters extracts each cookie parameter from the input JSON and adds it to the request cookies. -func handleCookieParameters(req *http.Request, params []Parameter, input string) { - for _, param := range params { - res := gjson.Get(input, param.Name) - if res.Exists() { - if res.IsArray() { - strs := make([]string, len(res.Array())) - for i, item := range res.Array() { - strs[i] = item.String() - } - req.AddCookie(&http.Cookie{ - Name: param.Name, - Value: strings.Join(strs, ","), - }) - } else if res.IsObject() { - var strs []string - for k, v := range res.Map() { - strs = append(strs, k, v.String()) - } - req.AddCookie(&http.Cookie{ - Name: param.Name, - Value: strings.Join(strs, ","), - }) - } else { // basic type - req.AddCookie(&http.Cookie{ - Name: param.Name, - Value: res.String(), - }) - } - } - } +func ptr[T any](t T) *T { + return &t } diff --git a/pkg/engine/openapi_test.go b/pkg/engine/openapi_test.go index df1e00fc..9fd5d34e 100644 --- a/pkg/engine/openapi_test.go +++ b/pkg/engine/openapi_test.go @@ -5,6 +5,7 @@ import ( "net/url" "testing" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/stretchr/testify/require" ) @@ -89,7 +90,7 @@ func TestPathParameterSerialization(t *testing.T) { t.Run(test.name, func(t *testing.T) { path := path params := getParameters(test.style, test.explode) - path = handlePathParameters(path, params, string(inputStr)) + path = openapi.HandlePathParameters(path, params, string(inputStr)) require.Contains(t, test.expectedPaths, path) }) } @@ -111,13 +112,13 @@ func TestQueryParameterSerialization(t *testing.T) { tests := []struct { name string input string - param Parameter + param openapi.Parameter expectedQueries []string // We use multiple expected queries due to randomness in map iteration }{ { name: "value", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "v", }, expectedQueries: []string{"v=42"}, @@ -125,7 +126,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array form + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "form", Explode: boolPointer(true), @@ -135,7 +136,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array form + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "form", Explode: boolPointer(false), @@ -145,7 +146,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array spaceDelimited + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "spaceDelimited", Explode: boolPointer(true), @@ -155,7 +156,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array spaceDelimited + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "spaceDelimited", Explode: boolPointer(false), @@ -165,7 +166,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array pipeDelimited + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "pipeDelimited", Explode: boolPointer(true), @@ -175,7 +176,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "array pipeDelimited + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "a", Style: "pipeDelimited", Explode: boolPointer(false), @@ -185,7 +186,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "object form + explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "o", Style: "form", Explode: boolPointer(true), @@ -198,7 +199,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "object form + no explode", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "o", Style: "form", Explode: boolPointer(false), @@ -211,7 +212,7 @@ func TestQueryParameterSerialization(t *testing.T) { { name: "object deepObject", input: string(inputStr), - param: Parameter{ + param: openapi.Parameter{ Name: "o", Style: "deepObject", }, @@ -224,14 +225,14 @@ func TestQueryParameterSerialization(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - q := handleQueryParameters(url.Values{}, []Parameter{test.param}, test.input) + q := openapi.HandleQueryParameters(url.Values{}, []openapi.Parameter{test.param}, test.input) require.Contains(t, test.expectedQueries, q.Encode()) }) } } -func getParameters(style string, explode bool) []Parameter { - return []Parameter{ +func getParameters(style string, explode bool) []openapi.Parameter { + return []openapi.Parameter{ { Name: "v", Style: style, diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 462ee8b5..43f429fc 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -40,15 +40,16 @@ type GPTScript struct { } type Options struct { - Cache cache.Options - OpenAI openai.Options - Monitor monitor.Options - Runner runner.Options - CredentialContext string - Quiet *bool - Workspace string - DisablePromptServer bool - Env []string + Cache cache.Options + OpenAI openai.Options + Monitor monitor.Options + Runner runner.Options + DefaultModelProvider string + CredentialContext string + Quiet *bool + Workspace string + DisablePromptServer bool + Env []string } func Complete(opts ...Options) Options { @@ -64,6 +65,7 @@ func Complete(opts ...Options) Options { result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) result.Env = append(result.Env, opt.Env...) result.DisablePromptServer = types.FirstSet(opt.DisablePromptServer, result.DisablePromptServer) + result.DefaultModelProvider = types.FirstSet(opt.DefaultModelProvider, result.DefaultModelProvider) } if result.Quiet == nil { @@ -106,16 +108,18 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { return nil, err } - oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ - Cache: cacheClient, - SetSeed: true, - }) - if err != nil { - return nil, err - } + if opts.DefaultModelProvider == "" { + oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ + Cache: cacheClient, + SetSeed: true, + }) + if err != nil { + return nil, err + } - if err := registry.AddClient(oaiClient); err != nil { - return nil, err + if err := registry.AddClient(oaiClient); err != nil { + return nil, err + } } if opts.Runner.MonitorFactory == nil { @@ -143,7 +147,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { fullEnv := append(opts.Env, extraEnv...) - remoteClient := remote.New(runner, fullEnv, cacheClient, credStore) + remoteClient := remote.New(runner, fullEnv, cacheClient, credStore, opts.DefaultModelProvider) if err := registry.AddClient(remoteClient); err != nil { closeServer() return nil, err diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go index d7634058..3d2ae8ed 100644 --- a/pkg/loader/loader.go +++ b/pkg/loader/loader.go @@ -8,26 +8,23 @@ import ( "fmt" "io" "io/fs" + "os" "path" "path/filepath" - "strconv" "strings" "time" "unicode/utf8" - "github.com/getkin/kin-openapi/openapi2" - "github.com/getkin/kin-openapi/openapi2conv" "github.com/getkin/kin-openapi/openapi3" "github.com/gptscript-ai/gptscript/internal" "github.com/gptscript-ai/gptscript/pkg/assemble" "github.com/gptscript-ai/gptscript/pkg/builtin" "github.com/gptscript-ai/gptscript/pkg/cache" "github.com/gptscript-ai/gptscript/pkg/hash" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/parser" "github.com/gptscript-ai/gptscript/pkg/system" "github.com/gptscript-ai/gptscript/pkg/types" - "gopkg.in/yaml.v3" - kyaml "sigs.k8s.io/yaml" ) const CacheTimeout = time.Hour @@ -157,33 +154,8 @@ func loadOpenAPI(prg *types.Program, data []byte) *openapi3.T { prg.OpenAPICache = map[string]any{} } - switch isOpenAPI(data) { - case 2: - // Convert OpenAPI v2 to v3 - jsondata := data - if !json.Valid(data) { - jsondata, err = kyaml.YAMLToJSON(data) - if err != nil { - return nil - } - } - - doc := &openapi2.T{} - if err := doc.UnmarshalJSON(jsondata); err != nil { - return nil - } - - openAPIDocument, err = openapi2conv.ToV3(doc) - if err != nil { - return nil - } - case 3: - // Use OpenAPI v3 as is - openAPIDocument, err = openapi3.NewLoader().LoadFromData(data) - if err != nil { - return nil - } - default: + openAPIDocument, err = openapi.LoadFromBytes(data) + if err != nil { return nil } @@ -202,14 +174,18 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base return []types.Tool{tool}, nil } - var tools []types.Tool + var ( + tools []types.Tool + isOpenAPI bool + ) if openAPIDocument := loadOpenAPI(prg, data); openAPIDocument != nil { + isOpenAPI = true var err error if base.Remote { - tools, err = getOpenAPITools(openAPIDocument, base.Location) + tools, err = getOpenAPITools(openAPIDocument, base.Location, base.Location, targetToolName) } else { - tools, err = getOpenAPITools(openAPIDocument, "") + tools, err = getOpenAPITools(openAPIDocument, "", base.Name, targetToolName) } if err != nil { return nil, fmt.Errorf("error parsing OpenAPI definition: %w", err) @@ -257,10 +233,6 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base // Probably a better way to come up with an ID tool.ID = tool.Source.Location + ":" + tool.Name - if i == 0 && targetToolName == "" { - targetTools = append(targetTools, tool) - } - if i != 0 && tool.Parameters.Name == "" { return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have no name")) } @@ -273,16 +245,35 @@ func readTool(ctx context.Context, cache *cache.Client, prg *types.Program, base return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, fmt.Errorf("only the first tool in a file can have global tools")) } - if targetToolName != "" && tool.Parameters.Name != "" { - if strings.EqualFold(tool.Parameters.Name, targetToolName) { + // Determine targetTools + if isOpenAPI && os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { + targetTools = append(targetTools, tool) + } else { + if i == 0 && targetToolName == "" { targetTools = append(targetTools, tool) - } else if strings.Contains(targetToolName, "*") { - match, err := filepath.Match(strings.ToLower(targetToolName), strings.ToLower(tool.Parameters.Name)) - if err != nil { - return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, err) - } - if match { + } + + if targetToolName != "" && tool.Parameters.Name != "" { + if strings.EqualFold(tool.Parameters.Name, targetToolName) { targetTools = append(targetTools, tool) + } else if strings.Contains(targetToolName, "*") { + var patterns []string + if strings.Contains(targetToolName, "|") { + patterns = strings.Split(targetToolName, "|") + } else { + patterns = []string{targetToolName} + } + + for _, pattern := range patterns { + match, err := filepath.Match(strings.ToLower(pattern), strings.ToLower(tool.Parameters.Name)) + if err != nil { + return nil, parser.NewErrLine(tool.Source.Location, tool.Source.LineNo, err) + } + if match { + targetTools = append(targetTools, tool) + break + } + } } } } @@ -491,42 +482,3 @@ func input(ctx context.Context, cache *cache.Client, base *source, name string) return nil, fmt.Errorf("can not load tools path=%s name=%s", base.Path, name) } - -// isOpenAPI checks if the data is an OpenAPI definition and returns the version if it is. -func isOpenAPI(data []byte) int { - var fragment struct { - Paths map[string]any `json:"paths,omitempty"` - Swagger string `json:"swagger,omitempty"` - OpenAPI string `json:"openapi,omitempty"` - } - - if err := json.Unmarshal(data, &fragment); err != nil { - if err := yaml.Unmarshal(data, &fragment); err != nil { - return 0 - } - } - if len(fragment.Paths) == 0 { - return 0 - } - - if v, _, _ := strings.Cut(fragment.OpenAPI, "."); v != "" { - ver, err := strconv.Atoi(v) - if err != nil { - log.Debugf("invalid OpenAPI version: openapi=%q", fragment.OpenAPI) - return 0 - } - return ver - } - - if v, _, _ := strings.Cut(fragment.Swagger, "."); v != "" { - ver, err := strconv.Atoi(v) - if err != nil { - log.Debugf("invalid Swagger version: swagger=%q", fragment.Swagger) - return 0 - } - return ver - } - - log.Debugf("no OpenAPI version found in input data: openapi=%q, swagger=%q", fragment.OpenAPI, fragment.Swagger) - return 0 -} diff --git a/pkg/loader/loader_test.go b/pkg/loader/loader_test.go index d70c45f5..7c480034 100644 --- a/pkg/loader/loader_test.go +++ b/pkg/loader/loader_test.go @@ -10,6 +10,7 @@ import ( "path/filepath" "testing" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/hexops/autogold/v2" "github.com/stretchr/testify/require" ) @@ -53,17 +54,17 @@ Stuff func TestIsOpenAPI(t *testing.T) { datav2, err := os.ReadFile("testdata/openapi_v2.yaml") require.NoError(t, err) - v := isOpenAPI(datav2) + v := openapi.IsOpenAPI(datav2) require.Equal(t, 2, v, "(yaml) expected openapi v2") datav2, err = os.ReadFile("testdata/openapi_v2.json") require.NoError(t, err) - v = isOpenAPI(datav2) + v = openapi.IsOpenAPI(datav2) require.Equal(t, 2, v, "(json) expected openapi v2") datav3, err := os.ReadFile("testdata/openapi_v3.yaml") require.NoError(t, err) - v = isOpenAPI(datav3) + v = openapi.IsOpenAPI(datav3) require.Equal(t, 3, v, "(json) expected openapi v3") } diff --git a/pkg/loader/openapi.go b/pkg/loader/openapi.go index 45254c9d..bc469a4e 100644 --- a/pkg/loader/openapi.go +++ b/pkg/loader/openapi.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/url" + "os" "regexp" "slices" "sort" @@ -11,7 +12,7 @@ import ( "time" "github.com/getkin/kin-openapi/openapi3" - "github.com/gptscript-ai/gptscript/pkg/engine" + "github.com/gptscript-ai/gptscript/pkg/openapi" "github.com/gptscript-ai/gptscript/pkg/types" ) @@ -20,8 +21,12 @@ var toolNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_-]+`) // getOpenAPITools parses an OpenAPI definition and generates a set of tools from it. // Each operation will become a tool definition. // The tool's Instructions will be in the format "#!sys.openapi '{JSON Instructions}'", -// where the JSON Instructions are a JSON-serialized engine.OpenAPIInstructions struct. -func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { +// where the JSON Instructions are a JSON-serialized openapi.OperationInfo struct. +func getOpenAPITools(t *openapi3.T, defaultHost, source, targetToolName string) ([]types.Tool, error) { + if os.Getenv("GPTSCRIPT_OPENAPI_REVAMP") == "true" { + return getOpenAPIToolsRevamp(t, source, targetToolName) + } + if log.IsDebug() { start := time.Now() defer func() { @@ -51,7 +56,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { for _, item := range t.Security { current := map[string]struct{}{} for name := range item { - if scheme, ok := t.Components.SecuritySchemes[name]; ok && slices.Contains(engine.SupportedSecurityTypes, scheme.Value.Type) { + if scheme, ok := t.Components.SecuritySchemes[name]; ok && slices.Contains(openapi.GetSupportedSecurityTypes(), scheme.Value.Type) { current[name] = struct{}{} } } @@ -134,10 +139,10 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { // - C // D auths []map[string]struct{} - queryParameters []engine.Parameter - pathParameters []engine.Parameter - headerParameters []engine.Parameter - cookieParameters []engine.Parameter + queryParameters []openapi.Parameter + pathParameters []openapi.Parameter + headerParameters []openapi.Parameter + cookieParameters []openapi.Parameter bodyMIME string ) tool := types.Tool{ @@ -177,7 +182,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { } // Add the parameter to the appropriate list for the tool's instructions - p := engine.Parameter{ + p := openapi.Parameter{ Name: param.Value.Name, Style: param.Value.Style, Explode: param.Value.Explode, @@ -199,7 +204,7 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { for mime, content := range operation.RequestBody.Value.Content { // Each MIME type needs to be handled individually, so we // keep a list of the ones we support. - if !slices.Contains(engine.SupportedMIMETypes, mime) { + if !slices.Contains(openapi.GetSupportedMIMETypes(), mime) { continue } bodyMIME = mime @@ -250,18 +255,18 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { } // For each set of auths, turn them into SecurityInfos, and drop ones that contain unsupported types. - var infos [][]engine.SecurityInfo + var infos [][]openapi.SecurityInfo outer: for _, auth := range auths { - var current []engine.SecurityInfo + var current []openapi.SecurityInfo for name := range auth { if scheme, ok := t.Components.SecuritySchemes[name]; ok { - if !slices.Contains(engine.SupportedSecurityTypes, scheme.Value.Type) { + if !slices.Contains(openapi.GetSupportedSecurityTypes(), scheme.Value.Type) { // There is an unsupported type in this auth, so move on to the next one. continue outer } - current = append(current, engine.SecurityInfo{ + current = append(current, openapi.SecurityInfo{ Type: scheme.Value.Type, Name: name, In: scheme.Value.In, @@ -324,17 +329,17 @@ func getOpenAPITools(t *openapi3.T, defaultHost string) ([]types.Tool, error) { return tools, nil } -func instructionString(server, method, path, bodyMIME string, queryParameters, pathParameters, headerParameters, cookieParameters []engine.Parameter, infos [][]engine.SecurityInfo) (string, error) { - inst := engine.OpenAPIInstructions{ - Server: server, - Path: path, - Method: method, - BodyContentMIME: bodyMIME, - SecurityInfos: infos, - QueryParameters: queryParameters, - PathParameters: pathParameters, - HeaderParameters: headerParameters, - CookieParameters: cookieParameters, +func instructionString(server, method, path, bodyMIME string, queryParameters, pathParameters, headerParameters, cookieParameters []openapi.Parameter, infos [][]openapi.SecurityInfo) (string, error) { + inst := openapi.OperationInfo{ + Server: server, + Path: path, + Method: method, + BodyContentMIME: bodyMIME, + SecurityInfos: infos, + QueryParams: queryParameters, + PathParams: pathParameters, + HeaderParams: headerParameters, + CookieParams: cookieParameters, } instBytes, err := json.Marshal(inst) if err != nil { @@ -362,3 +367,95 @@ func parseServer(server *openapi3.Server) (string, error) { } return s, nil } + +func getOpenAPIToolsRevamp(t *openapi3.T, source, targetToolName string) ([]types.Tool, error) { + if t == nil { + return nil, fmt.Errorf("OpenAPI spec is nil") + } else if t.Info == nil { + return nil, fmt.Errorf("OpenAPI spec is missing info field") + } + + if targetToolName == "" { + targetToolName = openapi.NoFilter + } + + list := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: types.ToolNormalizer("list-operations-" + t.Info.Title), + Description: fmt.Sprintf("List available operations for %s. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", t.Info.Title), + }, + Instructions: fmt.Sprintf("%s %s %s %s", types.OpenAPIPrefix, openapi.ListTool, source, targetToolName), + }, + Source: types.ToolSource{ + LineNo: 0, + }, + } + + getSchema := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: types.ToolNormalizer("get-schema-" + t.Info.Title), + Description: fmt.Sprintf("Get the JSONSchema for the arguments for an operation for %s. You must do this before you run the operation.", t.Info.Title), + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeObject}, + Properties: openapi3.Schemas{ + "operation": { + Value: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeString}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }, + }, + }, + }, + }, + Instructions: fmt.Sprintf("%s %s %s %s", types.OpenAPIPrefix, openapi.GetSchemaTool, source, targetToolName), + }, + Source: types.ToolSource{ + LineNo: 1, + }, + } + + run := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: types.ToolNormalizer("run-operation-" + t.Info.Title), + Description: fmt.Sprintf("Run an operation for %s. You MUST call %s for the operation before you use this tool.", t.Info.Title, openapi.GetSchemaTool), + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeObject}, + Properties: openapi3.Schemas{ + "operation": { + Value: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeString}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }, + }, + "args": { + Value: &openapi3.Schema{ + Type: &openapi3.Types{openapi3.TypeString}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }, + }, + }, + }, + }, + Instructions: fmt.Sprintf("%s %s %s %s", types.OpenAPIPrefix, openapi.RunTool, source, targetToolName), + }, + } + + exportTool := types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Export: []string{list.Parameters.Name, getSchema.Parameters.Name, run.Parameters.Name}, + }, + }, + } + + return []types.Tool{exportTool, list, getSchema, run}, nil +} diff --git a/pkg/loader/openapi_test.go b/pkg/loader/openapi_test.go index d00ffcca..1a7eaa76 100644 --- a/pkg/loader/openapi_test.go +++ b/pkg/loader/openapi_test.go @@ -86,3 +86,42 @@ func TestOpenAPIv2(t *testing.T) { autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) } + +func TestOpenAPIv3Revamp(t *testing.T) { + os.Setenv("GPTSCRIPT_OPENAPI_REVAMP", "true") + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) +} + +func TestOpenAPIv3NoOperationIDsRevamp(t *testing.T) { + os.Setenv("GPTSCRIPT_OPENAPI_REVAMP", "true") + prgv3 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav3, err := os.ReadFile("testdata/openapi_v3_no_operation_ids.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv3, &source{Content: datav3}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv3.ToolSet, autogold.Dir("testdata/openapi")) +} + +func TestOpenAPIv2Revamp(t *testing.T) { + os.Setenv("GPTSCRIPT_OPENAPI_REVAMP", "true") + prgv2 := types.Program{ + ToolSet: types.ToolSet{}, + } + datav2, err := os.ReadFile("testdata/openapi_v2.yaml") + require.NoError(t, err) + _, err = readTool(context.Background(), nil, &prgv2, &source{Content: datav2}, "") + require.NoError(t, err) + + autogold.ExpectFile(t, prgv2.ToolSet, autogold.Dir("testdata/openapi")) +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden index 90dd1967..39b0b2c1 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv2.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2.golden @@ -38,7 +38,7 @@ types.ToolSet{ Description: "Create a pet", ModelName: "gpt-4o", }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":createPets", ToolMapping: map[string][]types.ToolReference{}, @@ -68,7 +68,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":listPets", ToolMapping: map[string][]types.ToolReference{}, @@ -95,7 +95,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, }, ID: ":showPetById", ToolMapping: map[string][]types.ToolReference{}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden new file mode 100644 index 00000000..ebe68cc2 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv2Revamp.golden @@ -0,0 +1,116 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + ModelName: "gpt-4o", + Export: []string{ + "listOperationsSwaggerPetstore", + "getSchemaSwaggerPetstore", + "runOperationSwaggerPetstore", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "getSchemaSwaggerPetstore": {{ + Reference: "getSchemaSwaggerPetstore", + ToolID: ":getSchemaSwaggerPetstore", + }}, + "listOperationsSwaggerPetstore": {{ + Reference: "listOperationsSwaggerPetstore", + ToolID: ":listOperationsSwaggerPetstore", + }}, + "runOperationSwaggerPetstore": {{ + Reference: "runOperationSwaggerPetstore", + ToolID: ":runOperationSwaggerPetstore", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":getSchemaSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "getSchemaSwaggerPetstore", + Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }}}, + }, + }, + Instructions: "#!sys.openapi get-schema ", + }, + ID: ":getSchemaSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":listOperationsSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listOperationsSwaggerPetstore", + Description: "List available operations for Swagger Petstore. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", + ModelName: "gpt-4o", + }, + Instructions: "#!sys.openapi list ", + }, + ID: ":listOperationsSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":runOperationSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "runOperationSwaggerPetstore", + Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }}, + "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }}, + }, + }, + }, + Instructions: "#!sys.openapi run ", + }, + ID: ":runOperationSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden index 72ccafae..37ac2fe2 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3.golden @@ -63,7 +63,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":createPets", ToolMapping: map[string][]types.ToolReference{}, @@ -92,7 +92,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":listPets", ToolMapping: map[string][]types.ToolReference{}, @@ -119,7 +119,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, }, ID: ":showPetById", ToolMapping: map[string][]types.ToolReference{}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden index 3bcfd9e5..e950e19c 100644 --- a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDs.golden @@ -50,7 +50,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":[{"name":"limit","style":"","explode":null}],"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":get_pets", ToolMapping: map[string][]types.ToolReference{}, @@ -77,7 +77,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","apiKeyInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets/{petId}","method":"GET","bodyContentMIME":"","securityInfos":null,"queryParameters":null,"pathParameters":[{"name":"petId","style":"","explode":null}],"headerParameters":null,"cookieParameters":null}'`, }, ID: ":get_pets_petId", ToolMapping: map[string][]types.ToolReference{}, @@ -119,7 +119,7 @@ types.ToolSet{ }}}, }, }, - Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","apiKeyInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, + Instructions: `#!sys.openapi '{"server":"http://petstore.swagger.io/v1","path":"/pets","method":"POST","bodyContentMIME":"application/json","securityInfos":null,"queryParameters":null,"pathParameters":null,"headerParameters":null,"cookieParameters":null}'`, }, ID: ":post_pets", ToolMapping: map[string][]types.ToolReference{}, diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden new file mode 100644 index 00000000..ebe68cc2 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3NoOperationIDsRevamp.golden @@ -0,0 +1,116 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + ModelName: "gpt-4o", + Export: []string{ + "listOperationsSwaggerPetstore", + "getSchemaSwaggerPetstore", + "runOperationSwaggerPetstore", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "getSchemaSwaggerPetstore": {{ + Reference: "getSchemaSwaggerPetstore", + ToolID: ":getSchemaSwaggerPetstore", + }}, + "listOperationsSwaggerPetstore": {{ + Reference: "listOperationsSwaggerPetstore", + ToolID: ":listOperationsSwaggerPetstore", + }}, + "runOperationSwaggerPetstore": {{ + Reference: "runOperationSwaggerPetstore", + ToolID: ":runOperationSwaggerPetstore", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":getSchemaSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "getSchemaSwaggerPetstore", + Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }}}, + }, + }, + Instructions: "#!sys.openapi get-schema ", + }, + ID: ":getSchemaSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":listOperationsSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listOperationsSwaggerPetstore", + Description: "List available operations for Swagger Petstore. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", + ModelName: "gpt-4o", + }, + Instructions: "#!sys.openapi list ", + }, + ID: ":listOperationsSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":runOperationSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "runOperationSwaggerPetstore", + Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }}, + "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }}, + }, + }, + }, + Instructions: "#!sys.openapi run ", + }, + ID: ":runOperationSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, +} diff --git a/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden new file mode 100644 index 00000000..ebe68cc2 --- /dev/null +++ b/pkg/loader/testdata/openapi/TestOpenAPIv3Revamp.golden @@ -0,0 +1,116 @@ +types.ToolSet{ + ":": types.Tool{ + ToolDef: types.ToolDef{Parameters: types.Parameters{ + ModelName: "gpt-4o", + Export: []string{ + "listOperationsSwaggerPetstore", + "getSchemaSwaggerPetstore", + "runOperationSwaggerPetstore", + }, + }}, + ID: ":", + ToolMapping: map[string][]types.ToolReference{ + "getSchemaSwaggerPetstore": {{ + Reference: "getSchemaSwaggerPetstore", + ToolID: ":getSchemaSwaggerPetstore", + }}, + "listOperationsSwaggerPetstore": {{ + Reference: "listOperationsSwaggerPetstore", + ToolID: ":listOperationsSwaggerPetstore", + }}, + "runOperationSwaggerPetstore": {{ + Reference: "runOperationSwaggerPetstore", + ToolID: ":runOperationSwaggerPetstore", + }}, + }, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":getSchemaSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "getSchemaSwaggerPetstore", + Description: "Get the JSONSchema for the arguments for an operation for Swagger Petstore. You must do this before you run the operation.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{ + "object", + }, + Properties: openapi3.Schemas{"operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to get the schema for", + Required: []string{"operation"}, + }}}, + }, + }, + Instructions: "#!sys.openapi get-schema ", + }, + ID: ":getSchemaSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + Source: types.ToolSource{LineNo: 1}, + }, + ":listOperationsSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "listOperationsSwaggerPetstore", + Description: "List available operations for Swagger Petstore. Each of these operations is an OpenAPI operation. Run this tool before you do anything else.", + ModelName: "gpt-4o", + }, + Instructions: "#!sys.openapi list ", + }, + ID: ":listOperationsSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, + ":runOperationSwaggerPetstore": types.Tool{ + ToolDef: types.ToolDef{ + Parameters: types.Parameters{ + Name: "runOperationSwaggerPetstore", + Description: "Run an operation for Swagger Petstore. You MUST call get-schema for the operation before you use this tool.", + ModelName: "gpt-4o", + Arguments: &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{ + "args": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "args", + Description: "the JSON string containing arguments; must match the JSONSchema for the operation", + Required: []string{"args"}, + }}, + "operation": &openapi3.SchemaRef{Value: &openapi3.Schema{ + Type: &openapi3.Types{"string"}, + Title: "operation", + Description: "the name of the operation to run", + Required: []string{"operation"}, + }}, + }, + }, + }, + Instructions: "#!sys.openapi run ", + }, + ID: ":runOperationSwaggerPetstore", + ToolMapping: map[string][]types.ToolReference{}, + LocalTools: map[string]string{ + "": ":", + "getschemaswaggerpetstore": ":getSchemaSwaggerPetstore", + "listoperationsswaggerpetstore": ":listOperationsSwaggerPetstore", + "runoperationswaggerpetstore": ":runOperationSwaggerPetstore", + }, + }, +} diff --git a/pkg/openapi/getschema.go b/pkg/openapi/getschema.go new file mode 100644 index 00000000..3550afcf --- /dev/null +++ b/pkg/openapi/getschema.go @@ -0,0 +1,285 @@ +package openapi + +import ( + "encoding/json" + "fmt" + "slices" + "strings" + + "github.com/getkin/kin-openapi/openapi3" +) + +type Parameter struct { + Name string `json:"name"` + Style string `json:"style"` + Explode *bool `json:"explode"` +} + +type OperationInfo struct { + Server string `json:"server"` + Path string `json:"path"` + Method string `json:"method"` + BodyContentMIME string `json:"bodyContentMIME"` + SecurityInfos [][]SecurityInfo `json:"securityInfos"` + QueryParams []Parameter `json:"queryParameters"` + PathParams []Parameter `json:"pathParameters"` + HeaderParams []Parameter `json:"headerParameters"` + CookieParams []Parameter `json:"cookieParameters"` +} + +var ( + supportedMIMETypes = []string{"application/json", "application/x-www-form-urlencoded", "multipart/form-data"} + supportedSecurityTypes = []string{"apiKey", "http"} +) + +const GetSchemaTool = "get-schema" + +func GetSupportedMIMETypes() []string { + return supportedMIMETypes +} + +func GetSupportedSecurityTypes() []string { + return supportedSecurityTypes +} + +// GetSchema returns the JSONSchema and OperationInfo for a particular OpenAPI operation. +// Return values in order: JSONSchema (string), OperationInfo, found (bool), error. +func GetSchema(operationID, defaultHost string, t *openapi3.T) (string, OperationInfo, bool, error) { + arguments := &openapi3.Schema{ + Type: &openapi3.Types{"object"}, + Properties: openapi3.Schemas{}, + Required: []string{}, + } + + info := OperationInfo{} + + // Determine the default server. + var ( + defaultServer = defaultHost + err error + ) + if len(t.Servers) > 0 { + defaultServer, err = parseServer(t.Servers[0]) + if err != nil { + return "", OperationInfo{}, false, err + } + } + + var globalSecurity []map[string]struct{} + if t.Security != nil { + for _, item := range t.Security { + current := map[string]struct{}{} + for name := range item { + if scheme, ok := t.Components.SecuritySchemes[name]; ok && slices.Contains(supportedSecurityTypes, scheme.Value.Type) { + current[name] = struct{}{} + } + } + if len(current) > 0 { + globalSecurity = append(globalSecurity, current) + } + } + } + + for path, pathItem := range t.Paths.Map() { + // Handle path-level server override, if one exists. + pathServer := defaultServer + if pathItem.Servers != nil && len(pathItem.Servers) > 0 { + pathServer, err = parseServer(pathItem.Servers[0]) + if err != nil { + return "", OperationInfo{}, false, err + } + } + + for method, operation := range pathItem.Operations() { + if operation.OperationID == operationID { + // Handle operation-level server override, if one exists. + operationServer := pathServer + if operation.Servers != nil && len(*operation.Servers) > 0 { + operationServer, err = parseServer((*operation.Servers)[0]) + if err != nil { + return "", OperationInfo{}, false, err + } + } + + info.Server = operationServer + info.Path = path + info.Method = method + + // We found our operation. Now we need to process it and build the arguments. + // Handle query, path, header, and cookie parameters first. + for _, param := range append(operation.Parameters, pathItem.Parameters...) { + removeRefs(param.Value.Schema) + arg := param.Value.Schema.Value + + if arg.Description == "" { + arg.Description = param.Value.Description + } + + // Store the arg + arguments.Properties[param.Value.Name] = &openapi3.SchemaRef{Value: arg} + + // Check whether it is required + if param.Value.Required { + arguments.Required = append(arguments.Required, param.Value.Name) + } + + // Save the parameter to the correct set of params. + p := Parameter{ + Name: param.Value.Name, + Style: param.Value.Style, + Explode: param.Value.Explode, + } + switch param.Value.In { + case "query": + info.QueryParams = append(info.QueryParams, p) + case "path": + info.PathParams = append(info.PathParams, p) + case "header": + info.HeaderParams = append(info.HeaderParams, p) + case "cookie": + info.CookieParams = append(info.CookieParams, p) + } + } + + // Next, handle the request body, if one exists. + if operation.RequestBody != nil { + for mime, content := range operation.RequestBody.Value.Content { + // Each MIME type needs to be handled individually, so we keep a list of the ones we support. + if !slices.Contains(supportedMIMETypes, mime) { + continue + } + info.BodyContentMIME = mime + + removeRefs(content.Schema) + + arg := content.Schema.Value + if arg.Description == "" { + arg.Description = content.Schema.Value.Description + } + + // Read Only cannot be sent in the request body, so we remove it + for key, property := range arg.Properties { + if property.Value.ReadOnly { + delete(arg.Properties, key) + } + } + + // Unfortunately, the request body doesn't contain any good descriptor for it, + // so we just use "requestBodyContent" as the name of the arg. + arguments.Properties["requestBodyContent"] = &openapi3.SchemaRef{Value: arg} + arguments.Required = append(arguments.Required, "requestBodyContent") + break + } + + if info.BodyContentMIME == "" { + return "", OperationInfo{}, false, fmt.Errorf("no supported MIME type found for request body in operation %s", operationID) + } + } + + // See if there is any auth defined for this operation + var ( + noAuth bool + auths []map[string]struct{} + ) + if operation.Security != nil { + if len(*operation.Security) == 0 { + noAuth = true + } + for _, req := range *operation.Security { + current := map[string]struct{}{} + for name := range req { + current[name] = struct{}{} + } + if len(current) > 0 { + auths = append(auths, current) + } + } + } + + // Use the global security if it was not overridden for this operation + if !noAuth && len(auths) == 0 { + auths = append(auths, globalSecurity...) + } + + // For each set of auths, turn them into SecurityInfos, and drop ones that contain unsupported types. + outer: + for _, auth := range auths { + var current []SecurityInfo + for name := range auth { + if scheme, ok := t.Components.SecuritySchemes[name]; ok { + if !slices.Contains(supportedSecurityTypes, scheme.Value.Type) { + // There is an unsupported type in this auth, so move on to the next one. + continue outer + } + + current = append(current, SecurityInfo{ + Type: scheme.Value.Type, + Name: name, + In: scheme.Value.In, + Scheme: scheme.Value.Scheme, + APIKeyName: scheme.Value.Name, + }) + } + } + + if len(current) > 0 { + info.SecurityInfos = append(info.SecurityInfos, current) + } + } + + argumentsJSON, err := json.MarshalIndent(arguments, "", " ") + if err != nil { + return "", OperationInfo{}, false, err + } + return string(argumentsJSON), info, true, nil + } + } + } + + return "", OperationInfo{}, false, nil +} + +func parseServer(server *openapi3.Server) (string, error) { + s := server.URL + for name, variable := range server.Variables { + if variable == nil { + continue + } + + if variable.Default != "" { + s = strings.Replace(s, "{"+name+"}", variable.Default, 1) + } else if len(variable.Enum) > 0 { + s = strings.Replace(s, "{"+name+"}", variable.Enum[0], 1) + } + } + + if !strings.HasPrefix(s, "http") { + return "", fmt.Errorf("invalid server URL: %s (must use HTTP or HTTPS; relative URLs not supported)", s) + } + return s, nil +} + +func removeRefs(r *openapi3.SchemaRef) { + if r == nil { + return + } + + r.Ref = "" + r.Value.Discriminator = nil // Discriminators are not very useful and can junk up the schema. + + for i := range r.Value.OneOf { + removeRefs(r.Value.OneOf[i]) + } + for i := range r.Value.AnyOf { + removeRefs(r.Value.AnyOf[i]) + } + for i := range r.Value.AllOf { + removeRefs(r.Value.AllOf[i]) + } + removeRefs(r.Value.Not) + removeRefs(r.Value.Items) + + for i := range r.Value.Properties { + removeRefs(r.Value.Properties[i]) + } +} diff --git a/pkg/openapi/list.go b/pkg/openapi/list.go new file mode 100644 index 00000000..857c7014 --- /dev/null +++ b/pkg/openapi/list.go @@ -0,0 +1,68 @@ +package openapi + +import ( + "path/filepath" + "strings" + + "github.com/getkin/kin-openapi/openapi3" +) + +type OperationList struct { + Operations map[string]Operation `json:"operations"` +} + +type Operation struct { + Description string `json:"description,omitempty"` + Summary string `json:"summary,omitempty"` +} + +const ( + ListTool = "list" + NoFilter = "" +) + +func List(t *openapi3.T, filter string) (OperationList, error) { + operations := make(map[string]Operation) + for _, pathItem := range t.Paths.Map() { + for _, operation := range pathItem.Operations() { + var ( + match bool + err error + ) + if filter != "" && filter != NoFilter { + if strings.Contains(filter, "*") { + match, err = MatchFilters(strings.Split(filter, "|"), operation.OperationID) + if err != nil { + return OperationList{}, err + } + } else { + match = operation.OperationID == filter + } + } else { + match = true + } + + if match { + operations[operation.OperationID] = Operation{ + Description: operation.Description, + Summary: operation.Summary, + } + } + } + } + + return OperationList{Operations: operations}, nil +} + +func MatchFilters(filters []string, operationID string) (bool, error) { + for _, filter := range filters { + match, err := filepath.Match(filter, operationID) + if err != nil { + return false, err + } + if match { + return true, nil + } + } + return false, nil +} diff --git a/pkg/openapi/load.go b/pkg/openapi/load.go new file mode 100644 index 00000000..0ff82fdb --- /dev/null +++ b/pkg/openapi/load.go @@ -0,0 +1,121 @@ +package openapi + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strconv" + "strings" + + "github.com/getkin/kin-openapi/openapi2" + "github.com/getkin/kin-openapi/openapi2conv" + "github.com/getkin/kin-openapi/openapi3" + "gopkg.in/yaml.v3" + kyaml "sigs.k8s.io/yaml" +) + +func Load(source string) (*openapi3.T, error) { + if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") { + return loadFromURL(source) + } + return loadFromFile(source) +} + +func loadFromURL(source string) (*openapi3.T, error) { + resp, err := http.DefaultClient.Get(source) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + contents, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return LoadFromBytes(contents) +} + +func loadFromFile(source string) (*openapi3.T, error) { + contents, err := os.ReadFile(source) + if err != nil { + return nil, err + } + + return LoadFromBytes(contents) +} + +func LoadFromBytes(content []byte) (*openapi3.T, error) { + var ( + openAPIDocument *openapi3.T + err error + ) + + switch IsOpenAPI(content) { + case 2: + // Convert OpenAPI v2 to v3 + if !json.Valid(content) { + content, err = kyaml.YAMLToJSON(content) + if err != nil { + return nil, err + } + } + + doc := &openapi2.T{} + if err := doc.UnmarshalJSON(content); err != nil { + return nil, fmt.Errorf("failed to unmarshal OpenAPI v2 document: %w", err) + } + + openAPIDocument, err = openapi2conv.ToV3(doc) + if err != nil { + return nil, fmt.Errorf("failed to convert OpenAPI v2 to v3: %w", err) + } + case 3: + openAPIDocument, err = openapi3.NewLoader().LoadFromData(content) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported OpenAPI version") + } + + return openAPIDocument, nil +} + +// IsOpenAPI checks if the data is an OpenAPI definition and returns the version if it is. +func IsOpenAPI(data []byte) int { + var fragment struct { + Paths map[string]any `json:"paths,omitempty"` + Swagger string `json:"swagger,omitempty"` + OpenAPI string `json:"openapi,omitempty"` + } + + if err := json.Unmarshal(data, &fragment); err != nil { + if err := yaml.Unmarshal(data, &fragment); err != nil { + return 0 + } + } + if len(fragment.Paths) == 0 { + return 0 + } + + if v, _, _ := strings.Cut(fragment.OpenAPI, "."); v != "" { + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver + } + + if v, _, _ := strings.Cut(fragment.Swagger, "."); v != "" { + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver + } + + return 0 +} diff --git a/pkg/openapi/run.go b/pkg/openapi/run.go new file mode 100644 index 00000000..17199851 --- /dev/null +++ b/pkg/openapi/run.go @@ -0,0 +1,451 @@ +package openapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "os" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/gptscript-ai/gptscript/pkg/env" + "github.com/tidwall/gjson" + "github.com/xeipuuv/gojsonschema" + "golang.org/x/exp/maps" +) + +const RunTool = "run" + +func Run(operationID, defaultHost, args string, t *openapi3.T, envs []string) (string, bool, error) { + envMap := make(map[string]string, len(envs)) + for _, e := range envs { + k, v, _ := strings.Cut(e, "=") + envMap[k] = v + } + + if args == "" { + args = "{}" + } + schemaJSON, opInfo, found, err := GetSchema(operationID, defaultHost, t) + if err != nil || !found { + return "", false, err + } + + // Validate args against the schema. + validationResult, err := gojsonschema.Validate(gojsonschema.NewStringLoader(schemaJSON), gojsonschema.NewStringLoader(args)) + if err != nil { + return "", false, err + } + + if !validationResult.Valid() { + return "", false, fmt.Errorf("invalid arguments for operation %s: %s", operationID, validationResult.Errors()) + } + + // Construct and execute the HTTP request. + + // Handle path parameters. + opInfo.Path = HandlePathParameters(opInfo.Path, opInfo.PathParams, args) + + // Parse the URL + path, err := url.JoinPath(opInfo.Server, opInfo.Path) + if err != nil { + return "", false, fmt.Errorf("failed to join server and path: %w", err) + } + + u, err := url.Parse(path) + if err != nil { + return "", false, fmt.Errorf("failed to parse server URL %s: %w", opInfo.Server+opInfo.Path, err) + } + + // Set up the request + req, err := http.NewRequest(opInfo.Method, u.String(), nil) + if err != nil { + return "", false, fmt.Errorf("failed to create request: %w", err) + } + + // Check for authentication (only if using HTTPS or localhost) + if u.Scheme == "https" || u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" { + if len(opInfo.SecurityInfos) > 0 { + if err := HandleAuths(req, envMap, opInfo.SecurityInfos); err != nil { + return "", false, fmt.Errorf("error setting up authentication: %w", err) + } + } + + // If there is a bearer token set for the whole server, and no Authorization header has been defined, use it. + if token, ok := envMap["GPTSCRIPT_"+env.ToEnvLike(u.Hostname())+"_BEARER_TOKEN"]; ok { + if req.Header.Get("Authorization") == "" { + req.Header.Set("Authorization", "Bearer "+token) + } + } + } else { + fmt.Fprintf(os.Stderr, "no auth") + } + + // Handle query parameters + req.URL.RawQuery = HandleQueryParameters(req.URL.Query(), opInfo.QueryParams, args).Encode() + + // Handle header and cookie parameters + HandleHeaderParameters(req, opInfo.HeaderParams, args) + HandleCookieParameters(req, opInfo.CookieParams, args) + + // Handle request body + if opInfo.BodyContentMIME != "" { + res := gjson.Get(args, "requestBodyContent") + var body bytes.Buffer + switch opInfo.BodyContentMIME { + case "application/json": + var reqBody any = struct{}{} + if res.Exists() { + reqBody = res.Value() + } + if err := json.NewEncoder(&body).Encode(reqBody); err != nil { + return "", false, fmt.Errorf("failed to encode JSON: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + case "text/plain": + reqBody := "" + if res.Exists() { + reqBody = res.String() + } + body.WriteString(reqBody) + + req.Header.Set("Content-Type", "text/plain") + + case "multipart/form-data": + multiPartWriter := multipart.NewWriter(&body) + req.Header.Set("Content-Type", multiPartWriter.FormDataContentType()) + if res.Exists() && res.IsObject() { + for k, v := range res.Map() { + if err := multiPartWriter.WriteField(k, v.String()); err != nil { + return "", false, fmt.Errorf("failed to write multipart field: %w", err) + } + } + } else { + return "", false, fmt.Errorf("multipart/form-data requires an object as the requestBodyContent") + } + if err := multiPartWriter.Close(); err != nil { + return "", false, fmt.Errorf("failed to close multipart writer: %w", err) + } + + default: + return "", false, fmt.Errorf("unsupported MIME type: %s", opInfo.BodyContentMIME) + } + req.Body = io.NopCloser(&body) + } + + // Make the request + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", false, fmt.Errorf("failed to make request: %w", err) + } + defer resp.Body.Close() + + result, err := io.ReadAll(resp.Body) + if err != nil { + return "", false, fmt.Errorf("failed to read response: %w", err) + } + + return string(result), true, nil +} + +// HandleAuths will set up the request with the necessary authentication information. +// A set of sets of SecurityInfo is passed in, where each represents a possible set of security options. +func HandleAuths(req *http.Request, envMap map[string]string, infoSets [][]SecurityInfo) error { + var missingVariables [][]string + + // We need to find a set of infos where we have all the needed environment variables. + for _, infoSet := range infoSets { + var missing []string // Keep track of any missing environment variables + for _, info := range infoSet { + vars := info.getCredentialNamesAndEnvVars(req.URL.Hostname()) + + for _, envName := range vars { + if _, ok := envMap[envName]; !ok { + missing = append(missing, envName) + } + } + } + if len(missing) > 0 { + missingVariables = append(missingVariables, missing) + continue + } + + // We're using this info set, because no environment variables were missing. + // Set up the request as needed. + for _, info := range infoSet { + envNames := maps.Values(info.getCredentialNamesAndEnvVars(req.URL.Hostname())) + switch info.Type { + case "apiKey": + switch info.In { + case "header": + req.Header.Set(info.APIKeyName, envMap[envNames[0]]) + case "query": + v := url.Values{} + v.Add(info.APIKeyName, envMap[envNames[0]]) + req.URL.RawQuery = v.Encode() + case "cookie": + req.AddCookie(&http.Cookie{ + Name: info.APIKeyName, + Value: envMap[envNames[0]], + }) + } + case "http": + switch info.Scheme { + case "bearer": + req.Header.Set("Authorization", "Bearer "+envMap[envNames[0]]) + case "basic": + req.SetBasicAuth(envMap[envNames[0]], envMap[envNames[1]]) + } + } + } + return nil + } + + return fmt.Errorf("did not find the needed environment variables for any of the security options. "+ + "At least one of these sets of environment variables must be provided: %v", missingVariables) +} + +// HandlePathParameters extracts each path parameter from the input JSON and replaces its placeholder in the URL path. +func HandlePathParameters(path string, params []Parameter, input string) string { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + // If it's an array or object, handle the serialization style + if res.IsArray() { + switch param.Style { + case "simple", "": // simple is the default style for path parameters + // simple looks the same regardless of whether explode is true + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) + case "label": + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + + if param.Explode == nil || !*param.Explode { // default is to not explode + path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) + } else { + path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, "."), 1) + } + case "matrix": + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + + if param.Explode == nil || !*param.Explode { // default is to not explode + path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) + } else { + s := "" + for _, str := range strs { + s += ";" + param.Name + "=" + str + } + path = strings.Replace(path, "{"+param.Name+"}", s, 1) + } + } + } else if res.IsObject() { + switch param.Style { + case "simple", "": + if param.Explode == nil || !*param.Explode { // default is to not explode + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) + } else { + var strs []string + for k, v := range res.Map() { + strs = append(strs, k+"="+v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", strings.Join(strs, ","), 1) + } + case "label": + if param.Explode == nil || !*param.Explode { // default is to not explode + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", "."+strings.Join(strs, ","), 1) + } else { + s := "" + for k, v := range res.Map() { + s += "." + k + "=" + v.String() + } + path = strings.Replace(path, "{"+param.Name+"}", s, 1) + } + case "matrix": + if param.Explode == nil || !*param.Explode { // default is to not explode + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+strings.Join(strs, ","), 1) + } else { + s := "" + for k, v := range res.Map() { + s += ";" + k + "=" + v.String() + } + path = strings.Replace(path, "{"+param.Name+"}", s, 1) + } + } + } else { + // Serialization is handled slightly differently even for basic types. + // Explode doesn't do anything though. + switch param.Style { + case "simple", "": + path = strings.Replace(path, "{"+param.Name+"}", res.String(), 1) + case "label": + path = strings.Replace(path, "{"+param.Name+"}", "."+res.String(), 1) + case "matrix": + path = strings.Replace(path, "{"+param.Name+"}", ";"+param.Name+"="+res.String(), 1) + } + } + } + } + return path +} + +// HandleQueryParameters extracts each query parameter from the input JSON and adds it to the URL query. +func HandleQueryParameters(q url.Values, params []Parameter, input string) url.Values { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + // If it's an array or object, handle the serialization style + if res.IsArray() { + switch param.Style { + case "form", "": // form is the default style for query parameters + if param.Explode == nil || *param.Explode { // default is to explode + for _, item := range res.Array() { + q.Add(param.Name, item.String()) + } + } else { + var strs []string + for _, item := range res.Array() { + strs = append(strs, item.String()) + } + q.Add(param.Name, strings.Join(strs, ",")) + } + case "spaceDelimited": + if param.Explode == nil || *param.Explode { + for _, item := range res.Array() { + q.Add(param.Name, item.String()) + } + } else { + var strs []string + for _, item := range res.Array() { + strs = append(strs, item.String()) + } + q.Add(param.Name, strings.Join(strs, " ")) + } + case "pipeDelimited": + if param.Explode == nil || *param.Explode { + for _, item := range res.Array() { + q.Add(param.Name, item.String()) + } + } else { + var strs []string + for _, item := range res.Array() { + strs = append(strs, item.String()) + } + q.Add(param.Name, strings.Join(strs, "|")) + } + } + } else if res.IsObject() { + switch param.Style { + case "form", "": // form is the default style for query parameters + if param.Explode == nil || *param.Explode { // default is to explode + for k, v := range res.Map() { + q.Add(k, v.String()) + } + } else { + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + q.Add(param.Name, strings.Join(strs, ",")) + } + case "deepObject": + for k, v := range res.Map() { + q.Add(param.Name+"["+k+"]", v.String()) + } + } + } else { + q.Add(param.Name, res.String()) + } + } + } + return q +} + +// HandleHeaderParameters extracts each header parameter from the input JSON and adds it to the request headers. +func HandleHeaderParameters(req *http.Request, params []Parameter, input string) { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + if res.IsArray() { + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + req.Header.Add(param.Name, strings.Join(strs, ",")) + } else if res.IsObject() { + // Handle explosion + var strs []string + if param.Explode == nil || !*param.Explode { // default is to not explode + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + } else { + for k, v := range res.Map() { + strs = append(strs, k+"="+v.String()) + } + } + req.Header.Add(param.Name, strings.Join(strs, ",")) + } else { // basic type + req.Header.Add(param.Name, res.String()) + } + } + } +} + +// HandleCookieParameters extracts each cookie parameter from the input JSON and adds it to the request cookies. +func HandleCookieParameters(req *http.Request, params []Parameter, input string) { + for _, param := range params { + res := gjson.Get(input, param.Name) + if res.Exists() { + if res.IsArray() { + strs := make([]string, len(res.Array())) + for i, item := range res.Array() { + strs[i] = item.String() + } + req.AddCookie(&http.Cookie{ + Name: param.Name, + Value: strings.Join(strs, ","), + }) + } else if res.IsObject() { + var strs []string + for k, v := range res.Map() { + strs = append(strs, k, v.String()) + } + req.AddCookie(&http.Cookie{ + Name: param.Name, + Value: strings.Join(strs, ","), + }) + } else { // basic type + req.AddCookie(&http.Cookie{ + Name: param.Name, + Value: res.String(), + }) + } + } + } +} diff --git a/pkg/openapi/security.go b/pkg/openapi/security.go new file mode 100644 index 00000000..dd4521fc --- /dev/null +++ b/pkg/openapi/security.go @@ -0,0 +1,56 @@ +package openapi + +import ( + "fmt" + "strings" + + "github.com/gptscript-ai/gptscript/pkg/env" +) + +// A SecurityInfo represents a security scheme in OpenAPI. +type SecurityInfo struct { + Name string `json:"name"` // name as defined in the security schemes + Type string `json:"type"` // http or apiKey + Scheme string `json:"scheme"` // bearer or basic, for type==http + APIKeyName string `json:"apiKeyName"` // name of the API key, for type==apiKey + In string `json:"in"` // header, query, or cookie, for type==apiKey +} + +func (i SecurityInfo) GetCredentialToolStrings(hostname string) []string { + vars := i.getCredentialNamesAndEnvVars(hostname) + var tools []string + + for cred, v := range vars { + field := "value" + switch i.Type { + case "apiKey": + field = i.APIKeyName + case "http": + if i.Scheme == "bearer" { + field = "bearer token" + } else { + if strings.Contains(v, "PASSWORD") { + field = "password" + } else { + field = "username" + } + } + } + + tools = append(tools, fmt.Sprintf("github.com/gptscript-ai/credential as %s with %s as env and %q as message and %q as field", + cred, v, "Please provide a value for the "+v+" environment variable", field)) + } + return tools +} + +func (i SecurityInfo) getCredentialNamesAndEnvVars(hostname string) map[string]string { + if i.Type == "http" && i.Scheme == "basic" { + return map[string]string{ + hostname + i.Name + "Username": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_USERNAME", + hostname + i.Name + "Password": "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name) + "_PASSWORD", + } + } + return map[string]string{ + hostname + i.Name: "GPTSCRIPT_" + env.ToEnvLike(hostname) + "_" + env.ToEnvLike(i.Name), + } +} diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index d12f838e..ff5d1374 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -150,6 +150,8 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) { tool.Parameters.Credentials = append(tool.Parameters.Credentials, value) case "sharecredentials", "sharecreds", "sharecredential", "sharecred", "sharedcredentials", "sharedcreds", "sharedcredential", "sharedcred": tool.Parameters.ExportCredentials = append(tool.Parameters.ExportCredentials, value) + case "type": + tool.Type = types.ToolType(strings.ToLower(value)) default: return false, nil } diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go index c1b693be..44cb20f1 100644 --- a/pkg/prompt/prompt.go +++ b/pkg/prompt/prompt.go @@ -61,9 +61,13 @@ func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string for _, env := range envs { if url, ok := strings.CutPrefix(env, types.PromptURLEnvVar+"="); ok { + var fields []string + if params.Fields != "" { + fields = strings.Split(params.Fields, ",") + } httpPrompt := types.Prompt{ Message: params.Message, - Fields: strings.Split(params.Fields, ","), + Fields: fields, Sensitive: params.Sensitive == "true", } return sysPromptHTTP(ctx, envs, url, httpPrompt) @@ -76,7 +80,7 @@ func SysPrompt(ctx context.Context, envs []string, input string, _ chan<- string func sysPrompt(ctx context.Context, req types.Prompt) (_ string, err error) { defer context2.GetPauseFuncFromCtx(ctx)()() - if req.Message != "" && len(req.Fields) == 1 && strings.TrimSpace(req.Fields[0]) == "" { + if req.Message != "" && len(req.Fields) == 0 { var errs []error _, err := fmt.Fprintln(os.Stderr, req.Message) errs = append(errs, err) diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 6cb3644e..6d83e6cc 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -22,21 +22,23 @@ import ( ) type Client struct { - clientsLock sync.Mutex - cache *cache.Client - clients map[string]*openai.Client - models map[string]*openai.Client - runner *runner.Runner - envs []string - credStore credentials.CredentialStore + clientsLock sync.Mutex + cache *cache.Client + clients map[string]*openai.Client + models map[string]*openai.Client + runner *runner.Runner + envs []string + credStore credentials.CredentialStore + defaultProvider string } -func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credentials.CredentialStore) *Client { +func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credentials.CredentialStore, defaultProvider string) *Client { return &Client{ - cache: cache, - runner: r, - envs: envs, - credStore: credStore, + cache: cache, + runner: r, + envs: envs, + credStore: credStore, + defaultProvider: defaultProvider, } } @@ -49,7 +51,12 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques return nil, fmt.Errorf("failed to find remote model %s", messageRequest.Model) } - _, modelName := types.SplitToolRef(messageRequest.Model) + toolName, modelName := types.SplitToolRef(messageRequest.Model) + if modelName == "" { + // modelName is empty, then the messageRequest.Model is not of the form 'modelName from provider' + // Therefore, the modelName is the toolName + modelName = toolName + } messageRequest.Model = modelName return client.Call(ctx, messageRequest, status) } @@ -73,13 +80,23 @@ func (c *Client) ListModels(ctx context.Context, providers ...string) (result [] return } -func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { - toolName, modelNameSuffix := types.SplitToolRef(modelName) - if modelNameSuffix == "" { +func (c *Client) parseModel(modelString string) (modelName, providerName string) { + toolName, subTool := types.SplitToolRef(modelString) + if subTool == "" { + // This is just a plain model string "gpt4o" + return toolName, c.defaultProvider + } + // This is a provider string "modelName from provider" + return subTool, toolName +} + +func (c *Client) Supports(ctx context.Context, modelString string) (bool, error) { + _, providerName := c.parseModel(modelString) + if providerName == "" { return false, nil } - client, err := c.load(ctx, toolName) + client, err := c.load(ctx, providerName) if err != nil { return false, err } @@ -91,7 +108,7 @@ func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { c.models = map[string]*openai.Client{} } - c.models[modelName] = client + c.models[modelString] = client return true, nil } diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 9e8695a7..3a33c720 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -332,7 +332,7 @@ func getToolRefInput(prg *types.Program, ref types.ToolReference, input string) } func (r *Runner) getContext(callCtx engine.Context, state *State, monitor Monitor, env []string, input string) (result []engine.InputContext, _ *State, _ error) { - toolRefs, err := callCtx.Program.GetContextToolRefs(callCtx.Tool.ID) + toolRefs, err := callCtx.Tool.GetContextTools(*callCtx.Program) if err != nil { return nil, nil, err } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index e0977c9e..2e709e3f 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -1,7 +1,6 @@ package sdkserver import ( - "context" "encoding/json" "fmt" "io" @@ -10,7 +9,6 @@ import ( "sort" "strings" "sync" - "time" "github.com/gptscript-ai/broadcaster" "github.com/gptscript-ai/gptscript/pkg/cache" @@ -26,8 +24,6 @@ import ( "github.com/gptscript-ai/gptscript/pkg/version" ) -const toolRunTimeout = 15 * time.Minute - type server struct { gptscriptOpts gptscript.Options address, token string @@ -158,8 +154,6 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { ctx := gserver.ContextWithNewRunID(r.Context()) runID := gserver.RunIDFromContext(ctx) - ctx, cancel := context.WithTimeout(ctx, toolRunTimeout) - defer cancel() // Ensure chat state is not empty. if reqObject.ChatState == "" { @@ -204,6 +198,7 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { CredentialOverrides: reqObject.CredentialOverrides, Sequential: reqObject.ForceSequential, }, + DefaultModelProvider: reqObject.DefaultModelProvider, } if reqObject.Confirm { diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 06119c35..9736f045 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -52,17 +52,18 @@ type toolOrFileRequest struct { cacheOptions `json:",inline"` openAIOptions `json:",inline"` - ToolDefs toolDefs `json:"toolDefs,inline"` - SubTool string `json:"subTool"` - Input string `json:"input"` - ChatState string `json:"chatState"` - Workspace string `json:"workspace"` - Env []string `json:"env"` - CredentialContext string `json:"credentialContext"` - CredentialOverrides []string `json:"credentialOverrides"` - Confirm bool `json:"confirm"` - Location string `json:"location,omitempty"` - ForceSequential bool `json:"forceSequential"` + ToolDefs toolDefs `json:"toolDefs,inline"` + SubTool string `json:"subTool"` + Input string `json:"input"` + ChatState string `json:"chatState"` + Workspace string `json:"workspace"` + Env []string `json:"env"` + CredentialContext string `json:"credentialContext"` + CredentialOverrides []string `json:"credentialOverrides"` + Confirm bool `json:"confirm"` + Location string `json:"location,omitempty"` + ForceSequential bool `json:"forceSequential"` + DefaultModelProvider string `json:"DefaultModelProvider,omitempty"` } type content struct { diff --git a/pkg/tests/runner_test.go b/pkg/tests/runner_test.go index 70d5346c..12eff23a 100644 --- a/pkg/tests/runner_test.go +++ b/pkg/tests/runner_test.go @@ -1,8 +1,11 @@ package tests import ( + "bytes" + "compress/gzip" "context" "encoding/json" + "io" "os" "runtime" "testing" @@ -919,6 +922,35 @@ func TestOutput(t *testing.T) { autogold.ExpectFile(t, toJSONString(t, resp), autogold.Name(t.Name()+"/step3")) } +func TestEnvOverflow(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + r := tester.NewRunner(t) + + out := r.RunDefault() + autogold.Expect(`{"_gz":"H4sIAAAAAAAA/+zAgQAAAADCMNb8JQK4wjYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgHgAA//+94pKFQBkBAA=="} +`).Equal(t, out) + + data, err := os.ReadFile("testdata/TestEnvOverflow/context.json") + require.NoError(t, err) + + compressed := struct { + Data []byte `json:"_gz"` + }{} + err = json.Unmarshal(data, &compressed) + require.NoError(t, err) + + gunzip, err := gzip.NewReader(bytes.NewReader(compressed.Data)) + require.NoError(t, err) + + content, err := io.ReadAll(gunzip) + require.NoError(t, err) + + autogold.Expect("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").Equal(t, string(content)) +} + func TestSysContext(t *testing.T) { if runtime.GOOS == "windows" { t.Skip() @@ -963,3 +995,8 @@ func TestMissingTool(t *testing.T) { r.AssertResponded(t) autogold.Expect("TEST RESULT CALL: 2").Equal(t, resp) } + +func TestToolRefAll(t *testing.T) { + r := tester.NewRunner(t) + r.RunDefault() +} diff --git a/pkg/tests/testdata/TestCwd/subtool/test.gpt b/pkg/tests/testdata/TestCwd/subtool/test.gpt index 41314bbe..29cf1ed0 100644 --- a/pkg/tests/testdata/TestCwd/subtool/test.gpt +++ b/pkg/tests/testdata/TestCwd/subtool/test.gpt @@ -1,6 +1,6 @@ # #!/usr/bin/env X=${GPTSCRIPT_TOOL_DIR} /bin/bash -set -e -x +set -e [ ${X} = ${GPTSCRIPT_TOOL_DIR} ] cd $X diff --git a/pkg/tests/testdata/TestCwd/test.gpt b/pkg/tests/testdata/TestCwd/test.gpt index 5185635a..e053571c 100644 --- a/pkg/tests/testdata/TestCwd/test.gpt +++ b/pkg/tests/testdata/TestCwd/test.gpt @@ -6,7 +6,7 @@ noop name: local #!/bin/bash -set -e -x +set -e [ "" = "${TOOL_DIR}" ] P=$(pwd) diff --git a/pkg/tests/testdata/TestEnvOverflow/context.json b/pkg/tests/testdata/TestEnvOverflow/context.json new file mode 100644 index 00000000..eb99ddb6 --- /dev/null +++ b/pkg/tests/testdata/TestEnvOverflow/context.json @@ -0,0 +1 @@ +{"_gz":"H4sIAAAAAAAA/+zAgQAAAADCMNb8JQK4wjYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgHgAA//+94pKFQBkBAA=="} diff --git a/pkg/tests/testdata/TestEnvOverflow/test.gpt b/pkg/tests/testdata/TestEnvOverflow/test.gpt new file mode 100644 index 00000000..406e6de7 --- /dev/null +++ b/pkg/tests/testdata/TestEnvOverflow/test.gpt @@ -0,0 +1,14 @@ +context: c + +#!/bin/bash + +echo "${GPTSCRIPT_CONTEXT}" +echo "${GPTSCRIPT_CONTEXT}" > ${GPTSCRIPT_TOOL_DIR}/context.json + +--- +name: c + +#!/bin/bash + +string=$(printf 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa%.0s' {1..1000}) +echo "$string" \ No newline at end of file diff --git a/pkg/tests/testdata/TestToolRefAll/call1-resp.golden b/pkg/tests/testdata/TestToolRefAll/call1-resp.golden new file mode 100644 index 00000000..2861a036 --- /dev/null +++ b/pkg/tests/testdata/TestToolRefAll/call1-resp.golden @@ -0,0 +1,9 @@ +`{ + "role": "assistant", + "content": [ + { + "text": "TEST RESULT CALL: 1" + } + ], + "usage": {} +}` diff --git a/pkg/tests/testdata/TestToolRefAll/call1.golden b/pkg/tests/testdata/TestToolRefAll/call1.golden new file mode 100644 index 00000000..4957014d --- /dev/null +++ b/pkg/tests/testdata/TestToolRefAll/call1.golden @@ -0,0 +1,61 @@ +`{ + "model": "gpt-4o", + "tools": [ + { + "function": { + "toolID": "testdata/TestToolRefAll/test.gpt:tool", + "name": "tool", + "parameters": { + "properties": { + "toolArg": { + "description": "stuff", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestToolRefAll/test.gpt:none", + "name": "none", + "parameters": { + "properties": { + "noneArg": { + "description": "stuff", + "type": "string" + } + }, + "type": "object" + } + } + }, + { + "function": { + "toolID": "testdata/TestToolRefAll/test.gpt:agentAssistant", + "name": "agent", + "parameters": { + "properties": { + "defaultPromptParameter": { + "description": "Prompt to send to the tool. This may be an instruction or question.", + "type": "string" + } + }, + "type": "object" + } + } + } + ], + "messages": [ + { + "role": "system", + "content": [ + { + "text": "\nContext Body\nMain tool" + } + ], + "usage": {} + } + ] +}` diff --git a/pkg/tests/testdata/TestToolRefAll/test.gpt b/pkg/tests/testdata/TestToolRefAll/test.gpt new file mode 100644 index 00000000..93c4ea05 --- /dev/null +++ b/pkg/tests/testdata/TestToolRefAll/test.gpt @@ -0,0 +1,30 @@ +tools: tool, agentAssistant, context, none + +Main tool + +--- +name: agentAssistant +type: agent + +Agent body + +--- +name: context +type: context + +#!sys.echo + +Context Body + +--- +name: none +param: noneArg: stuff + +Default type + +--- +name: tool +type: Tool +param: toolArg: stuff + +Typed tool \ No newline at end of file diff --git a/pkg/types/tool.go b/pkg/types/tool.go index 82effad4..54d5d817 100644 --- a/pkg/types/tool.go +++ b/pkg/types/tool.go @@ -26,6 +26,20 @@ var ( DefaultFiles = []string{"agent.gpt", "tool.gpt"} ) +type ToolType string + +const ( + ToolTypeContext = ToolType("context") + ToolTypeAgent = ToolType("agent") + ToolTypeOutput = ToolType("output") + ToolTypeInput = ToolType("input") + ToolTypeAssistant = ToolType("assistant") + ToolTypeTool = ToolType("tool") + ToolTypeCredential = ToolType("credential") + ToolTypeProvider = ToolType("provider") + ToolTypeDefault = ToolType("") +) + type ErrToolNotFound struct { ToolName string } @@ -77,28 +91,6 @@ type ToolReference struct { ToolID string `json:"toolID,omitempty"` } -func (p Program) GetContextToolRefs(toolID string) ([]ToolReference, error) { - return p.ToolSet[toolID].GetContextTools(p) -} - -func (p Program) GetCompletionTools() (result []CompletionTool, err error) { - return Tool{ - ToolDef: ToolDef{ - Parameters: Parameters{ - Tools: []string{"main"}, - }, - }, - ToolMapping: map[string][]ToolReference{ - "main": { - { - Reference: "main", - ToolID: p.EntryToolID, - }, - }, - }, - }.GetCompletionTools(p) -} - func (p Program) TopLevelTools() (result []Tool) { for _, tool := range p.ToolSet[p.EntryToolID].LocalTools { if target, ok := p.ToolSet[tool]; ok { @@ -145,6 +137,7 @@ type Parameters struct { OutputFilters []string `json:"outputFilters,omitempty"` ExportOutputFilters []string `json:"exportOutputFilters,omitempty"` Blocking bool `json:"-"` + Type ToolType `json:"type,omitempty"` } func (p Parameters) ToolRefNames() []string { @@ -347,6 +340,13 @@ func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { return nil, err } + genericToolRefs, err := t.getCompletionToolRefs(prg, nil, ToolTypeAgent) + if err != nil { + return nil, err + } + + toolRefs = append(toolRefs, genericToolRefs...) + // Agent Tool refs must be named for i, toolRef := range toolRefs { if toolRef.Named != "" { @@ -358,7 +358,9 @@ func (t Tool) GetAgents(prg Program) (result []ToolReference, _ error) { name = toolRef.Reference } normed := ToolNormalizer(name) - normed = strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant") + if trimmed := strings.TrimSuffix(strings.TrimSuffix(normed, "Agent"), "Assistant"); trimmed != "" { + normed = trimmed + } toolRefs[i].Named = normed } @@ -404,6 +406,9 @@ func (t ToolDef) String() string { if t.Parameters.Description != "" { _, _ = fmt.Fprintf(buf, "Description: %s\n", t.Parameters.Description) } + if t.Parameters.Type != ToolTypeDefault { + _, _ = fmt.Fprintf(buf, "Type: %s\n", strings.ToUpper(string(t.Type[0]))+string(t.Type[1:])) + } if len(t.Parameters.Agents) != 0 { _, _ = fmt.Fprintf(buf, "Agents: %s\n", strings.Join(t.Parameters.Agents, ", ")) } @@ -486,7 +491,7 @@ func (t ToolDef) String() string { return buf.String() } -func (t Tool) GetExportedContext(prg Program) ([]ToolReference, error) { +func (t Tool) getExportedContext(prg Program) ([]ToolReference, error) { result := &toolRefSet{} exportRefs, err := t.GetToolRefsFromNames(t.ExportContext) @@ -498,13 +503,13 @@ func (t Tool) GetExportedContext(prg Program) ([]ToolReference, error) { result.Add(exportRef) tool := prg.ToolSet[exportRef.ToolID] - result.AddAll(tool.GetExportedContext(prg)) + result.AddAll(tool.getExportedContext(prg)) } return result.List() } -func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { +func (t Tool) getExportedTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} exportRefs, err := t.GetToolRefsFromNames(t.Export) @@ -514,7 +519,7 @@ func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { for _, exportRef := range exportRefs { result.Add(exportRef) - result.AddAll(prg.ToolSet[exportRef.ToolID].GetExportedTools(prg)) + result.AddAll(prg.ToolSet[exportRef.ToolID].getExportedTools(prg)) } return result.List() @@ -524,6 +529,15 @@ func (t Tool) GetExportedTools(prg Program) ([]ToolReference, error) { // contexts that are exported by the context tools. This will recurse all exports. func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { result := &toolRefSet{} + result.AddAll(t.getDirectContextToolRefs(prg)) + result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeContext)) + return result.List() +} + +// GetContextTools returns all tools that are in the context of the tool including all the +// contexts that are exported by the context tools. This will recurse all exports. +func (t Tool) getDirectContextToolRefs(prg Program) ([]ToolReference, error) { + result := &toolRefSet{} contextRefs, err := t.GetToolRefsFromNames(t.Context) if err != nil { @@ -531,7 +545,7 @@ func (t Tool) GetContextTools(prg Program) ([]ToolReference, error) { } for _, contextRef := range contextRefs { - result.AddAll(prg.ToolSet[contextRef.ToolID].GetExportedContext(prg)) + result.AddAll(prg.ToolSet[contextRef.ToolID].getExportedContext(prg)) result.Add(contextRef) } @@ -550,7 +564,9 @@ func (t Tool) GetOutputFilterTools(program Program) ([]ToolReference, error) { result.Add(outputFilterRef) } - contextRefs, err := t.GetContextTools(program) + result.AddAll(t.getCompletionToolRefs(program, nil, ToolTypeOutput)) + + contextRefs, err := t.getDirectContextToolRefs(program) if err != nil { return nil, err } @@ -575,7 +591,9 @@ func (t Tool) GetInputFilterTools(program Program) ([]ToolReference, error) { result.Add(inputFilterRef) } - contextRefs, err := t.GetContextTools(program) + result.AddAll(t.getCompletionToolRefs(program, nil, ToolTypeInput)) + + contextRefs, err := t.getDirectContextToolRefs(program) if err != nil { return nil, err } @@ -602,11 +620,28 @@ func (t Tool) GetNextAgentGroup(prg Program, agentGroup []ToolReference, toolID return agentGroup, nil } +func filterRefs(prg Program, refs []ToolReference, types ...ToolType) (result []ToolReference) { + for _, ref := range refs { + if slices.Contains(types, prg.ToolSet[ref.ToolID].Type) { + result = append(result, ref) + } + } + return +} + func (t Tool) GetCompletionTools(prg Program, agentGroup ...ToolReference) (result []CompletionTool, err error) { - refs, err := t.getCompletionToolRefs(prg, agentGroup) + toolSet := &toolRefSet{} + toolSet.AddAll(t.getCompletionToolRefs(prg, agentGroup, ToolTypeDefault, ToolTypeTool)) + + if err := t.addAgents(prg, toolSet); err != nil { + return nil, err + } + + refs, err := toolSet.List() if err != nil { return nil, err } + return toolRefsToCompletionTools(refs, prg), nil } @@ -638,26 +673,30 @@ func (t Tool) addReferencedTools(prg Program, result *toolRefSet) error { result.Add(subToolRef) // Get all tools exports - result.AddAll(prg.ToolSet[subToolRef.ToolID].GetExportedTools(prg)) + result.AddAll(prg.ToolSet[subToolRef.ToolID].getExportedTools(prg)) } return nil } func (t Tool) addContextExportedTools(prg Program, result *toolRefSet) error { - contextTools, err := t.GetContextTools(prg) + contextTools, err := t.getDirectContextToolRefs(prg) if err != nil { return err } for _, contextTool := range contextTools { - result.AddAll(prg.ToolSet[contextTool.ToolID].GetExportedTools(prg)) + result.AddAll(prg.ToolSet[contextTool.ToolID].getExportedTools(prg)) } return nil } -func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { +func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference, types ...ToolType) ([]ToolReference, error) { + if len(types) == 0 { + types = []ToolType{ToolTypeDefault, ToolTypeTool} + } + result := toolRefSet{} if t.Chat { @@ -677,11 +716,8 @@ func (t Tool) getCompletionToolRefs(prg Program, agentGroup []ToolReference) ([] return nil, err } - if err := t.addAgents(prg, &result); err != nil { - return nil, err - } - - return result.List() + refs, err := result.List() + return filterRefs(prg, refs, types...), err } func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]ToolReference, error) { @@ -689,6 +725,8 @@ func (t Tool) GetCredentialTools(prg Program, agentGroup []ToolReference) ([]Too result.AddAll(t.GetToolRefsFromNames(t.Credentials)) + result.AddAll(t.getCompletionToolRefs(prg, nil, ToolTypeCredential)) + toolRefs, err := t.getCompletionToolRefs(prg, agentGroup) if err != nil { return nil, err diff --git a/pkg/types/tool_test.go b/pkg/types/tool_test.go index 43af6cee..a47014a1 100644 --- a/pkg/types/tool_test.go +++ b/pkg/types/tool_test.go @@ -33,6 +33,8 @@ func TestToolDef_String(t *testing.T) { ExportInputFilters: []string{"SharedFilter1", "SharedFilter2"}, OutputFilters: []string{"Filter1", "Filter2"}, ExportOutputFilters: []string{"SharedFilter1", "SharedFilter2"}, + ExportCredentials: []string{"ExportCredential1", "ExportCredential2"}, + Type: ToolTypeContext, }, Instructions: "This is a sample instruction", } @@ -41,6 +43,7 @@ func TestToolDef_String(t *testing.T) { Global Tools: GlobalTool1, GlobalTool2 Name: Tool Sample Description: This is a sample tool +Type: Context Agents: Agent1, Agent2 Tools: Tool1, Tool2 Share Tools: Export1, Export2 @@ -60,6 +63,8 @@ Parameter: arg2: desc2 Internal Prompt: true Credential: Credential1 Credential: Credential2 +Share Credential: ExportCredential1 +Share Credential: ExportCredential2 Chat: true This is a sample instruction