From 45f0aba87189371ac6f9ec557b2225e154b33d17 Mon Sep 17 00:00:00 2001 From: evalstate <1936278+evalstate@users.noreply.github.com> Date: Sat, 5 Apr 2025 20:04:08 +0100 Subject: [PATCH 1/3] agent created/edited --- agent/agent.py | 37 +++-- agent/docs/README.md | 45 ++++++ agent/docs/getting-started.md | 140 +++++++++++++++++ agent/fastagent.config.yaml | 4 +- docs/agents/prompting.md | 47 ++++-- docs/get-started/index.md | 221 +++++++++++++++++++++++++++ docs/get-started/interactive.md | 204 +++++++++++++++++++++++++ docs/get-started/mcp-config.md | 261 ++++++++++++++++++++++++++++++++ docs/mcp/aggregator.md | 179 ++++++++++++++++++++++ docs/mcp/index.md | 68 ++++++++- docs/mcp/types.md | 157 +++++++++++++++++-- mkdocs.yml | 21 ++- requirements.txt | 1 + 13 files changed, 1331 insertions(+), 54 deletions(-) create mode 100644 agent/docs/README.md create mode 100644 agent/docs/getting-started.md create mode 100644 docs/get-started/index.md create mode 100644 docs/get-started/interactive.md create mode 100644 docs/get-started/mcp-config.md create mode 100644 docs/mcp/aggregator.md diff --git a/agent/agent.py b/agent/agent.py index c2239d7..e4d430c 100644 --- a/agent/agent.py +++ b/agent/agent.py @@ -1,5 +1,7 @@ import asyncio +from pathlib import Path from mcp_agent.core.fastagent import FastAgent +from mcp_agent.core.prompt import Prompt import subprocess # Create the application @@ -8,20 +10,35 @@ # Define the agent @fast.agent( - instruction="You are a documentation production assistant", servers=["filesystem"] + instruction="You are a documentation production assistant. We are maintaining a documentation site using mkdocs" \ + "with the material theme. Your role is to assist the Human with maintaining, creating and ensuring the veracity of" \ + "the documentation. We can create test/example programs as needed to prove it working", servers=["filesystem"] ) async def main(): - # use the --model command line switch or agent arguments to change model async with fast.run() as agent: - # Execute shell command - result = subprocess.run( - ["repomix", "../fast-agent/", "repo.xml"], capture_output=True, text=True - ) - result = result.stdout # Or use result.stdout + result.stderr if you want both - # You can print or process the result if needed - print(f"Command output: {result}") - # Continue with agent interaction if needed + chunks = {"core": "core", + "agents": "agents", + "mcp":"mcp", + "llm":"llm"} + + for part,file in chunks.items(): + result = subprocess.run( + ["repomix", str(Path.home() / f"source/fast-agent/src/mcp_agent/{part}"), + "--ignore", "**/*.csv,resources/examples","--output",f"{file}.xml"], + cwd=".", + capture_output=True, + text=True + ) + print(f"Command output: {result}") + + repomix = Prompt.user("Here is the content of the repository we are documenting", + Path("core.xml"), + Path("agents.xml"), + Path("mcp.xml"), + str(Path.home() / "/source/fast-agent/README.md"), + "Await further instructions") + await agent(repomix) await agent() diff --git a/agent/docs/README.md b/agent/docs/README.md new file mode 100644 index 0000000..30e1041 --- /dev/null +++ b/agent/docs/README.md @@ -0,0 +1,45 @@ +# FastAgent Documentation + +Welcome to the official documentation for FastAgent (fast-agent-mcp), a Python framework for building effective AI agents using the Model Context Protocol (MCP). + +## Overview + +FastAgent is a framework designed to make it easy to create, compose, and deploy AI agents that can use tools, access resources, and apply techniques from the "Building Effective Agents" paper by Anthropic. The framework provides a simple, declarative API for creating agents while handling all the complexity of interacting with language models and MCP servers. + +## Table of Contents + +- [Getting Started](getting-started.md) +- Architecture + - [Core Concepts](architecture/core-concepts.md) + - [Agent Types](architecture/agent-types.md) + - [MCP Integration](architecture/mcp-integration.md) +- Workflow Patterns + - [Router Pattern](patterns/router.md) + - [Chain Pattern](patterns/chain.md) + - [Evaluator-Optimizer Pattern](patterns/evaluator-optimizer.md) + - [Orchestrator Pattern](patterns/orchestrator.md) + - [Parallel Pattern](patterns/parallel.md) +- Advanced Topics + - [Prompt Management](advanced/prompt-management.md) + - [Resource Handling](advanced/resource-handling.md) + - [Human Input](advanced/human-input.md) + - [Configuration](advanced/configuration.md) + - [Logging and Tracing](advanced/logging.md) + +## Key Features + +- **Agent Composition**: Build complex agents by composing simpler ones +- **Workflow Patterns**: Implement proven patterns for agent effectiveness +- **MCP Integration**: Seamless integration with MCP servers for tools and resources +- **Multiple LLM Support**: Works with OpenAI, Anthropic, DeepSeek, and more +- **Interactive Console**: Rich console UI for agent interaction +- **Prompt Management**: Load and apply prompt templates +- **Resource Handling**: Easily incorporate text, images, and other resources + +## License + +[License information] + +## Contributing + +[Contribution guidelines] \ No newline at end of file diff --git a/agent/docs/getting-started.md b/agent/docs/getting-started.md new file mode 100644 index 0000000..c2bb54a --- /dev/null +++ b/agent/docs/getting-started.md @@ -0,0 +1,140 @@ +# Getting Started with FastAgent + +This guide will help you install and set up FastAgent, create your first agent, and understand the basic workflow. + +## Installation + +You can install FastAgent using pip: + +```bash +pip install fast-agent-mcp +``` + +Or using a virtual environment tool like `uv`: + +```bash +uv pip install fast-agent-mcp +``` + +## Setting Up Your Environment + +FastAgent requires configuration for API keys and MCP servers. You can either set environment variables or use configuration files. + +### Using the CLI to Create a New Project + +FastAgent comes with a CLI tool that helps you scaffold a new project: + +```bash +fastagent setup +``` + +This will create the following files in your current directory: +- `fastagent.config.yaml`: Configuration for MCP servers and defaults +- `fastagent.secrets.yaml`: Configuration for API keys +- `agent.py`: A simple example agent +- `.gitignore`: Configured to ignore secrets + +### Configuration + +Edit `fastagent.secrets.yaml` to add your API keys: + +```yaml +# FastAgent Secrets Configuration +# WARNING: Keep this file secure and never commit to version control + +anthropic: + api_key: your-anthropic-api-key + +openai: + api_key: your-openai-api-key +``` + +Edit `fastagent.config.yaml` to configure MCP servers: + +```yaml +# Default model configuration +default_model: haiku # Alias for Claude 3.5 Haiku + +# Logging and Console Configuration +logger: + progress_display: true + show_chat: true + show_tools: true + +# MCP Servers +mcp: + servers: + fetch: + command: "uvx" + args: ["mcp-server-fetch"] + filesystem: + command: "npx" + args: ["-y", "@modelcontextprotocol/server-filesystem", "."] +``` + +## Creating Your First Agent + +Here's a simple example of creating an agent using the FastAgent framework: + +```python +import asyncio +from mcp_agent.core.fastagent import FastAgent + +# Create the application +fast = FastAgent("My First Agent") + +# Define an agent +@fast.agent( + instruction="You are a helpful AI assistant with a great sense of humor", + servers=["fetch"] +) +async def main(): + async with fast.run() as agent: + # Start an interactive session with the agent + await agent() + +if __name__ == "__main__": + asyncio.run(main()) +``` + +Save this code as `agent.py` and run it: + +```bash +python agent.py +``` + +This will start an interactive session with your agent that has access to the fetch server for web searches. + +## Using Different Models + +You can specify different LLM models by using the `model` parameter: + +```python +@fast.agent( + instruction="You are a helpful AI assistant", + servers=["fetch"], + model="gpt-4o" # Use OpenAI's GPT-4o model +) +``` + +Or by using the command line: + +```bash +python agent.py --model=gpt-4o +``` + +FastAgent supports various models through aliases: +- `haiku`: Claude 3.5 Haiku +- `sonnet`: Claude 3.5 Sonnet +- `opus`: Claude 3 Opus +- `gpt-4o`: OpenAI's GPT-4o +- `o3-mini`: OpenAI's o3-mini + +## Next Steps + +Now that you have a basic agent running, explore these next steps: + +1. Learn how to [compose agents using workflow patterns](patterns/router.md) +2. Add [resource handling](advanced/resource-handling.md) capabilities +3. Integrate [human input](advanced/human-input.md) for interactive agents +4. Create [prompts](advanced/prompt-management.md) for consistent behavior diff --git a/agent/fastagent.config.yaml b/agent/fastagent.config.yaml index c93360d..31b0bdb 100644 --- a/agent/fastagent.config.yaml +++ b/agent/fastagent.config.yaml @@ -10,7 +10,7 @@ # If not specified, defaults to "haiku". # Can be overriden with a command line switch --model=, or within the Agent constructor. -default_model: haiku +default_model: sonnet # Logging and Console Configuration: logger: @@ -37,5 +37,5 @@ mcp: [ "-y", "@modelcontextprotocol/server-filesystem", - "~/source/docs-build/", + "~/source/fast-agent-docs/", ] diff --git a/docs/agents/prompting.md b/docs/agents/prompting.md index 0564604..e73d3ef 100644 --- a/docs/agents/prompting.md +++ b/docs/agents/prompting.md @@ -12,7 +12,7 @@ The simplest way of sending a message to an agent is the `send` method: response: str = await agent.send("how are you?") ``` -To attach files, use the `Prompt.user()` convenience method: +To attach files, use `Prompt.user()`: ```python from mcp_agent.core.prompt import Prompt @@ -28,9 +28,37 @@ plans: str = await agent.send( Attached files are converted to the appropriate MCP Type (e.g. ImageContent for Images, EmbeddedResource for PDF and TextResource). +> Note there is also `Prompt.assistant()` which produces messages for the `assistant` role. + +### MCP Prompts + +Apply a Prompt from an MCP Server to the agent with: + +```python +response: str = await agent.apply_prompt( + "setup_sizing", + arguments: {"units","metric"} +) +``` + +You can list and get Prompts from attached MCP Servers: +```python +from mcp.types import GetPromptResult, PromptMessage + +prompt: GetPromptResult = await agent.get_prompt("setup_sizing") +first_message: PromptMessage = prompt[0] +``` + +and send the native MCP `PromptMessage` to the agent with: +```python +response: str = agent.send(first_message) +``` + +> If the last message in the conversation is from the `assistant`, that content is returned as the response. + ### MCP Resources -`Prompt.user` also works with MCP Resources: +You can use `Prompt.user` to work with MCP Resources: ```python from mcp.types import ReadResourceResult @@ -54,22 +82,9 @@ response: str = agent.with_resource( ``` -### MCP Prompts - -You can also use the MCP `PromptMessage` type directly: - -```python -from mcp.types import PromptMessage, TextContent - -mcp_prompt: PromptMessage = PromptMessage( - role="user", content=TextContent(type="text", text="how are you?") -) -result: str = await agent.send(mcp_prompt) -``` +## Structured Outputs -> Note there is also `Prompt.assistant()` which produces a `PromptMessageMultipart` for the assistant role. -## Structured Outputs ## Multiturn Conversations diff --git a/docs/get-started/index.md b/docs/get-started/index.md new file mode 100644 index 0000000..ef8daff --- /dev/null +++ b/docs/get-started/index.md @@ -0,0 +1,221 @@ +--- +title: Getting Started with FastAgent +--- + +# Getting Started with FastAgent + +Welcome to FastAgent! This guide will help you get up and running quickly with the core features of the framework. FastAgent makes it easy to create, compose, and deploy AI agents that can use tools, access resources, and understand complex prompts. + +## Installation + +Install FastAgent using pip: + +```bash +pip install fast-agent-mcp +``` + +For development, you might want to install the package in editable mode: + +```bash +pip install -e . +``` + +## Creating Your First Agent + +FastAgent uses a simple, decorator-based API for creating agents. Here's a minimal example: + +```python +from fastagent import FastAgent + +# Create an application +fast = FastAgent("my-first-app") + +# Define an agent using the decorator +@fast.agent(name="assistant", instruction="You are a helpful AI assistant.") +def main(): + pass + +# Run the application +if __name__ == "__main__": + import asyncio + + async def run(): + async with fast.run() as agent_app: + # Send a message to the agent + response = await agent_app.assistant.send("Hello, who are you?") + print(response) + + asyncio.run(run()) +``` + +## Using the Interactive Console + +FastAgent includes a rich interactive console for working with agents. This is great for testing and development: + +```python +from fastagent import FastAgent + +fast = FastAgent("interactive-demo") + +@fast.agent(name="assistant", instruction="You are a helpful AI assistant.") +def main(): + pass + +if __name__ == "__main__": + import asyncio + + async def run(): + async with fast.run() as agent_app: + # Start an interactive session with the agent + await agent_app.interactive(agent_name="assistant") + + asyncio.run(run()) +``` + +The interactive console supports: + +- Multi-line input (toggle with `Ctrl+T`) +- Command history (navigate with up/down arrows) +- Command completion +- Agent switching with `@agent_name` +- Special commands: + - `/help` - Show available commands + - `/clear` - Clear the screen + - `/agents` - List available agents + - `/prompts` - List and select MCP prompts + - `/prompt ` - Apply a specific prompt by name + +## Configuring Models + +You can specify which model to use when creating an agent: + +```python +@fast.agent( + name="assistant", + instruction="You are a helpful AI assistant.", + model="gpt-4o" # Use OpenAI's GPT-4o model +) +def main(): + pass +``` + +FastAgent supports multiple LLM providers including: + +- OpenAI (gpt-4o, gpt-3.5-turbo, etc.) +- Anthropic (claude-3-5-sonnet, claude-3-opus, etc.) +- Open source models through Ollama (llama3, mistral, etc.) + +## Working with MCP Servers + +### Configuration + +Create a `fastagent.config.yaml` file in your project directory: + +```yaml +# MCP servers configuration +mcp: + servers: + prompt_server: + transport: stdio + command: python + args: ["-m", "mcp_agent.mcp.prompts.prompt_server", "prompts/*.txt"] +``` + +This configuration sets up an MCP server that can serve prompt templates from text files. + +### Using Prompts + +Once you've configured an MCP server for prompts, you can use them in your agents: + +```python +async def run(): + async with fast.run() as agent_app: + # List available prompts + prompts = await agent_app.assistant.list_prompts() + print(prompts) + + # Apply a prompt template + result = await agent_app.assistant.apply_prompt( + "analyze_code", # Prompt name + {"language": "Python", "code": "print('Hello World')"} # Template variables + ) + print(result) +``` + +### Accessing Resources + +MCP servers can also provide resources like images, documents, or other data: + +```python +async def run(): + async with fast.run() as agent_app: + # List available resources + resources = await agent_app.assistant.list_resources() + print(resources) + + # Use a resource in a prompt + response = await agent_app.assistant.with_resource( + "Please analyze this image:", # Text prompt + "resource://image_server/example.jpg" # Resource URI + ) + print(response) +``` + +## Agent Composition + +FastAgent shines when composing multiple agents into workflows. Here's a simple example with two agents: + +```python +from fastagent import FastAgent + +fast = FastAgent("composition-demo") + +@fast.agent(name="researcher", instruction="You research facts and provide accurate information.") +def researcher(): + pass + +@fast.agent(name="writer", instruction="You write engaging content based on information.") +def writer(): + pass + +async def run(): + async with fast.run() as agent_app: + # First get research from the researcher + research = await agent_app.researcher.send("Provide facts about quantum computing") + + # Then have the writer create content based on the research + content = await agent_app.writer.send(f"Write a blog post based on this research: {research}") + + print(content) +``` + +## Next Steps + +Now that you have a basic understanding of FastAgent, you can explore: + +- [Advanced Agent Types](../agents/architecture.md) - Learn about router, chain, orchestrator patterns +- [MCP Integration](../mcp/index.md) - Dive deeper into working with MCP servers +- [Model Configuration](../models/index.md) - Learn more about configuring different LLM providers + +## Troubleshooting + +### Common Issues + +1. **Missing API Keys**: Ensure you've set the appropriate environment variables for your LLM provider: + ```bash + export OPENAI_API_KEY=your_key_here + # Or for Anthropic + export ANTHROPIC_API_KEY=your_key_here + ``` + +2. **MCP Server Connection**: If you're having trouble connecting to an MCP server, check: + - Server configuration in `fastagent.config.yaml` + - Server logs for errors + - Network connectivity if using remote servers + +3. **Model Availability**: Ensure you're using a model that's available to you. If you don't have access to a specific model, try using an alternative: + ```python + @fast.agent(name="assistant", model="gpt-3.5-turbo") # Fallback to a more accessible model + ``` + +For more help, check the [documentation](https://fast-agent.ai) or raise an issue on our [GitHub repository](https://github.com/evalstate/fast-agent). \ No newline at end of file diff --git a/docs/get-started/interactive.md b/docs/get-started/interactive.md new file mode 100644 index 0000000..8929b00 --- /dev/null +++ b/docs/get-started/interactive.md @@ -0,0 +1,204 @@ +--- +title: Interactive Mode +--- + +# Using Interactive Mode + +FastAgent's interactive mode provides a powerful command-line interface for interacting with your agents in real-time. This is especially useful during development and testing. + +## Starting Interactive Mode + +You can start interactive mode using the `agent_app.interactive()` method: + +```python +from fastagent import FastAgent + +fast = FastAgent("interactive-demo") + +@fast.agent(name="assistant", instruction="You are a helpful AI assistant.") +def main(): + pass + +if __name__ == "__main__": + import asyncio + + async def run(): + async with fast.run() as agent_app: + # Start an interactive session + await agent_app.interactive(agent_name="assistant") + + asyncio.run(run()) +``` + +This will launch an interactive console session where you can chat with your agent. + +## Interactive Console Features + +### Basic Chat + +Simply type your messages and press Enter to send them to the agent: + +``` +assistant > What can you tell me about FastAgent? + +FastAgent is a Python framework for building effective AI agents using the Model Context Protocol (MCP). It's designed to make it easy to create, compose, and deploy AI agents that can use tools, access resources, and apply techniques from the "Building Effective Agents" paper by Anthropic. + +Key features include: +... +``` + +### Multi-line Input + +For longer messages, you can toggle multi-line mode with `Ctrl+T`: + +1. Press `Ctrl+T` to enter multi-line mode +2. Type your message, using Enter for new lines +3. Press `Ctrl+J` (or `Ctrl+Enter` depending on your terminal) to send +4. Press `Ctrl+T` again to return to single-line mode + +The toolbar at the bottom of the terminal shows your current mode. + +### Command History + +Use up and down arrow keys to navigate through your previous messages. + +### Special Commands + +FastAgent's interactive mode supports several special commands: + +- `/help` - Show available commands +- `/clear` - Clear the screen +- `/agents` - List available agents in your application +- `/prompts` - Browse and select MCP prompts to apply +- `/prompt ` - Apply a specific prompt by name +- `STOP` - Stop the current prompting session +- `EXIT` - Exit fast-agent completely + +### Agent Switching + +If your application has multiple agents, you can switch between them using the `@agent_name` syntax: + +``` +assistant > @researcher + +researcher > What's the capital of France? + +The capital of France is Paris. +``` + +## Working with Prompts in Interactive Mode + +The interactive console makes it easy to work with MCP prompts: + +### Listing Available Prompts + +Use the `/prompts` command to see all available prompts: + +``` +assistant > /prompts + +Fetching prompts for agent assistant... + +prompt_server: + analyze_text + customer_service + explain_code + ... +``` + +### Selecting a Prompt + +You can apply a prompt interactively by selecting it from the menu: + +``` +assistant > /prompts + +Available MCP Prompts +┌────┬──────────────┬────────────────┬────────────────────────────┬──────┐ +│ # │ Server │ Prompt Name │ Description │ Args │ +├────┼──────────────┼────────────────┼────────────────────────────┼──────┤ +│ 1 │ prompt_server│ analyze_text │ Analyze text for sentiment │ 1 │ +│ 2 │ prompt_server│ explain_code │ Explain code in detail │ 2 │ +└────┴──────────────┴────────────────┴────────────────────────────┴──────┘ + +Enter prompt number to select: 1 + +Enter value for text (required): The service was excellent and I really enjoyed my experience. +``` + +### Applying a Prompt Directly + +You can also apply a prompt directly using the `/prompt` command: + +``` +assistant > /prompt analyze_text + +Enter value for text (required): The service was excellent and I really enjoyed my experience. + +Applying prompt prompt_server-analyze_text... + +Sentiment Analysis: +- Overall Sentiment: Positive +- Key Positive Terms: "excellent", "enjoyed" +... +``` + +## Advanced Usage + +### Setting Default Prompts + +You can provide a default prompt when starting interactive mode: + +```python +await agent_app.interactive(agent_name="assistant", default_prompt="Hello, how can I help you today?") +``` + +### Human Input Integration + +If your agent is configured with `human_input=True`, it can request input from the user during processing: + +```python +@fast.agent(name="assistant", human_input=True) +def main(): + pass +``` + +In your interactive session, the agent might ask: + +``` +assistant > I need to verify some information. + +[AGENT REQUESTING INPUT]: Please confirm your date of birth in YYYY-MM-DD format: +``` + +This is useful for verification or when the agent needs additional context to complete a task. + +### Integration with MCP Resources + +You can use resources in interactive mode by applying prompts that include them: + +``` +assistant > /prompt analyze_image + +Enter value for image_url (required): https://example.com/image.jpg + +Applying prompt prompt_server-analyze_image... + +Image Analysis: +- Content: The image shows a mountain landscape with a lake +... +``` + +## Tips for Effective Interactive Sessions + +1. **Use Multi-line Mode for Complex Prompts**: Toggle with `Ctrl+T` when you need to format your input carefully. + +2. **Leverage Prompt Templates**: Use `/prompts` to discover and apply well-crafted prompts rather than creating complex prompts from scratch. + +3. **Switch Between Agents**: Use `@agent_name` to leverage specialized agents for different tasks. + +4. **Save Important Responses**: Copy valuable responses to a separate document since the interactive session isn't automatically saved. + +5. **Clear Regularly**: Use `/clear` to keep your terminal uncluttered during extended sessions. + +With these features, the interactive console becomes a powerful tool for developing, testing, and using your FastAgent applications. \ No newline at end of file diff --git a/docs/get-started/mcp-config.md b/docs/get-started/mcp-config.md new file mode 100644 index 0000000..c71e16a --- /dev/null +++ b/docs/get-started/mcp-config.md @@ -0,0 +1,261 @@ +--- +title: Configuring MCP Servers +--- + +# Configuring MCP Servers + +Model Context Protocol (MCP) servers provide additional capabilities to your agents, such as access to tools, resources, and prompt templates. This guide will help you configure and use MCP servers with FastAgent. + +## Understanding MCP Servers + +MCP servers provide several capabilities: + +- **Prompts**: Reusable prompt templates +- **Tools**: Functions that can be called by your agents +- **Resources**: External data like images, documents, or databases +- **Sampling**: Text generation services + +FastAgent can connect to multiple MCP servers simultaneously, giving your agents access to a wide range of capabilities. + +## Configuration File + +MCP servers are configured in the `fastagent.config.yaml` file in your project directory: + +```yaml +# Basic configuration for FastAgent +default_model: "gpt-4o" # Default model if not specified on agent + +# MCP servers configuration +mcp: + servers: + # Prompt server - serves prompt templates from text files + prompt_server: + transport: stdio # Communication method (stdio or sse) + command: python # Command to run the server + args: ["-m", "mcp_agent.mcp.prompts.prompt_server", "prompts/*.txt"] + + # Tool server - provides mathematical tools + math_server: + transport: stdio + command: python + args: ["-m", "math_tools.server"] + + # Resource server - provides access to files and images + resource_server: + transport: sse # Server-Sent Events over HTTP + url: "http://localhost:8000" # URL for SSE connection +``` + +### Server Configuration Options + +Each server configuration can include: + +- `transport`: Communication method (`stdio` or `sse`) +- `command`: Command to run the server (for `stdio`) +- `args`: Arguments for the command (for `stdio`) +- `url`: URL for SSE connection (for `sse`) +- `env`: Environment variables for the server process +- `sampling`: Configuration for sampling capabilities +- `read_timeout_seconds`: Timeout for server responses +- `roots`: Resource root configurations + +## Built-in MCP Servers + +FastAgent includes several built-in MCP servers: + +### Prompt Server + +The prompt server serves prompt templates from text files: + +```yaml +prompt_server: + transport: stdio + command: python + args: ["-m", "mcp_agent.mcp.prompts.prompt_server", "prompts/*.txt"] +``` + +This server loads all `.txt` files in the `prompts` directory as prompt templates. + +#### Creating Prompt Templates + +Prompt templates use a simple delimiter format: + +``` +---USER +I need you to analyze the following {{language}} code: + +{{code}} +---ASSISTANT +Here's my analysis of the {{language}} code: + +1. Purpose: +2. Structure: +3. Potential issues: +``` + +The template contains variables in double curly braces (`{{variable}}`) that can be filled in when the prompt is applied. + +## Connecting to MCP Servers + +FastAgent automatically connects to all configured MCP servers when your application starts. You specify which servers an agent can access in its decorator: + +```python +@fast.agent( + name="coder", + instruction="You are a coding assistant.", + servers=["prompt_server", "resource_server"] # Servers this agent can access +) +def code_agent(): + pass +``` + +## Using MCP Features in Your Agents + +### Working with Prompts + +```python +# List available prompts +prompts = await agent.list_prompts() +print(prompts) + +# Apply a prompt template +result = await agent.apply_prompt( + "analyze_code", # Prompt name + { + "language": "Python", + "code": "print('Hello World')" + } # Template variables +) +``` + +### Working with Resources + +```python +# List available resources +resources = await agent.list_resources() +print(resources) + +# Use a resource in a prompt +response = await agent.with_resource( + "Please analyze this image:", # Text prompt + "resource://image_server/example.jpg" # Resource URI +) +``` + +### Calling Tools + +```python +# List available tools +tools = await agent.list_tools() +print(tools) + +# Call a tool +result = await agent.call_tool( + "math_server-calculate", # Tool name with server prefix + {"expression": "2 + 2 * 3"} # Tool arguments +) +``` + +## Creating a Basic Prompt Server + +The simplest way to add capabilities to your agents is through a prompt server. Here's how to set one up: + +1. Create a `prompts` directory in your project +2. Add prompt template files (`.txt`) to this directory +3. Configure the prompt server in your config file: + +```yaml +mcp: + servers: + prompt_server: + transport: stdio + command: python + args: ["-m", "mcp_agent.mcp.prompts.prompt_server", "prompts/*.txt"] +``` + +4. Access prompts in your agent: + +```python +@fast.agent(name="assistant", servers=["prompt_server"]) +def main(): + pass + +async def run(): + async with fast.run() as agent_app: + # Apply a prompt template + result = await agent_app.assistant.apply_prompt( + "explain_topic", + {"topic": "quantum computing"} + ) + print(result) +``` + +## Advanced: Creating Custom MCP Servers + +You can create custom MCP servers to provide specialized tools or resources: + +1. Create a new Python file for your server: + +```python +# math_tools.py +from mcp.server.fastmcp import FastMCP + +app = FastMCP("Math Tools Server") + +@app.tool +async def calculate(expression: str) -> float: + """Calculate the result of a mathematical expression.""" + return eval(expression) + +if __name__ == "__main__": + import asyncio + asyncio.run(app.run_stdio_async()) +``` + +2. Configure your server in `fastagent.config.yaml`: + +```yaml +mcp: + servers: + math_server: + transport: stdio + command: python + args: ["math_tools.py"] +``` + +3. Use the tool in your agent: + +```python +@fast.agent(name="calculator", servers=["math_server"]) +def main(): + pass + +async def run(): + async with fast.run() as agent_app: + result = await agent_app.calculator.call_tool( + "math_server-calculate", + {"expression": "2 + 2 * 3"} + ) + print(result) # Output: 8.0 +``` + +## Troubleshooting MCP Servers + +If you encounter issues with MCP servers: + +1. **Check Configuration**: Ensure your `fastagent.config.yaml` is correctly formatted. + +2. **Verify File Paths**: Make sure the paths to your prompt files or server scripts are correct. + +3. **Check Permissions**: Ensure the command has permission to execute. + +4. **Monitor Server Output**: FastAgent logs server output to help with debugging. + +5. **Test Servers Independently**: You can run MCP servers directly to test them: + ```bash + python -m mcp_agent.mcp.prompts.prompt_server prompts/*.txt + ``` + +6. **Check for Port Conflicts**: If using SSE transport, ensure the specified port is available. + +By properly configuring MCP servers, you can significantly enhance your agents' capabilities with tools, prompts, and resources. \ No newline at end of file diff --git a/docs/mcp/aggregator.md b/docs/mcp/aggregator.md new file mode 100644 index 0000000..a64aeca --- /dev/null +++ b/docs/mcp/aggregator.md @@ -0,0 +1,179 @@ +# MCP Aggregator and Connection Management + +The MCP Aggregator system provides a unified interface to multiple MCP servers, allowing agents to seamlessly access tools, resources, and prompts across distributed servers. + +## MCPAggregator + +The `MCPAggregator` class is the core component that enables multi-server access: + +```python +from mcp_agent.mcp.mcp_aggregator import MCPAggregator + +# Create an aggregator with multiple servers +aggregator = MCPAggregator( + server_names=["server1", "server2", "server3"], + connection_persistence=True +) + +# Initialize the aggregator +await aggregator.__aenter__() + +# Use the aggregator to access tools, prompts, etc. +tools = await aggregator.list_tools() +``` + +### Key Features + +- **Server Discovery**: Automatically discovers tools, prompts, and resources from each server +- **Unified Access**: Provides a single interface for accessing multiple servers +- **Namespaced Resources**: Uses namespacing to avoid conflicts between servers +- **Persistent Connections**: Maintains persistent connections to servers for better performance +- **Error Handling**: Gracefully handles server errors and disconnections + +### Common Operations + +#### Working with Tools + +```python +# List all tools across all servers +tools_result = await aggregator.list_tools() + +# Call a tool by name - automatically routes to the right server +result = await aggregator.call_tool( + name="server1-tool_name", # Namespaced tool name + arguments={"param1": "value1"} +) + +# Call a tool without namespace - searches all servers +result = await aggregator.call_tool( + name="tool_name", + arguments={"param1": "value1"} +) +``` + +#### Working with Prompts + +```python +# List all prompts from all servers +prompts_map = await aggregator.list_prompts() + +# Get a prompt by name from a specific server +prompt_result = await aggregator.get_prompt( + prompt_name="my_prompt", + server_name="server1" +) + +# Apply template variables to a prompt +prompt_result = await aggregator.get_prompt( + prompt_name="server1-template_prompt", + arguments={"variable": "value"} +) +``` + +#### Working with Resources + +```python +# List resources from all servers +resources_map = await aggregator.list_resources() + +# Get a resource by URI +resource_result = await aggregator.get_resource( + resource_uri="resource://fast-agent/example.txt", + server_name="server1" # Optional - will search all servers if not provided +) +``` + +## MCPConnectionManager + +The `MCPConnectionManager` handles the lifecycle of connections to MCP servers: + +```python +from mcp_agent.mcp.mcp_connection_manager import MCPConnectionManager + +# Create a connection manager +connection_manager = MCPConnectionManager(server_registry) + +# Get a server connection (launches the server if needed) +server_conn = await connection_manager.get_server( + server_name="server1", + client_session_factory=MCPAgentClientSession +) + +# Use the server connection +session = server_conn.session +result = await session.list_tools() + +# Disconnect when done +await connection_manager.disconnect_server("server1") +``` + +### Connection Lifecycle + +The connection manager handles: + +1. **Server Launch**: Starts the server process if needed +2. **Connection Setup**: Establishes communication channels +3. **Session Creation**: Creates an MCP client session +4. **Error Recovery**: Handles disconnections and errors +5. **Clean Shutdown**: Properly terminates servers when done + +### Server Capabilities + +You can check a server's capabilities to determine what features it supports: + +```python +# Get server capabilities +capabilities = await connection_manager.get_server_capabilities("server1") + +# Check if server supports prompts +if capabilities and capabilities.prompts: + # Server supports prompts + pass + +# Check if server supports tools +if capabilities and capabilities.tools: + # Server supports tools + pass +``` + +## Working with Server Connections + +For most use cases, the `MCPAggregator` provides the simplest interface. However, for more control over server connections, you can use the connection manager directly: + +```python +# Launch a specific server +server_conn = await connection_manager.launch_server( + server_name="server1", + client_session_factory=MCPAgentClientSession +) + +# Wait for the server to be fully initialized +await server_conn.wait_for_initialized() + +# Check if the server is healthy +if server_conn.is_healthy(): + # Server is ready to use + pass + +# Shut down the server when done +await connection_manager.disconnect_server("server1") +``` + +## CompoundServer + +For advanced use cases, FastAgent also provides a `MCPCompoundServer` that aggregates multiple MCP servers and presents them as a single MCP server: + +```python +from mcp_agent.mcp.mcp_aggregator import MCPCompoundServer + +# Create a compound server +compound_server = MCPCompoundServer( + server_names=["server1", "server2", "server3"], + name="CompoundServer" +) + +# Run the server +await compound_server.run_stdio_async() +``` + +This allows you to create a unified MCP server that aggregates tools, prompts, and resources from multiple underlying servers. \ No newline at end of file diff --git a/docs/mcp/index.md b/docs/mcp/index.md index 88d704d..02daf9e 100644 --- a/docs/mcp/index.md +++ b/docs/mcp/index.md @@ -1,17 +1,69 @@ --- -title: MCP Features +title: MCP Integration --- -# MCP Features +# MCP Integration in FastAgent -## Tools +FastAgent integrates deeply with the Model Context Protocol (MCP) to provide a powerful, flexible framework for working with language models. This integration allows agents to access various MCP capabilities such as tools, resources, and prompt templates across multiple servers. -## Roots +## Core MCP Features -## Sampling +FastAgent supports all major MCP features: -## Prompts +- **Tools**: Execute specialized functions on MCP servers +- **Resources**: Access and utilize external data sources +- **Prompts**: Manage and apply reusable prompt templates +- **Sampling**: Generate text completions from language models +- **Instructions**: Use system instructions for guiding model behavior +- **Multi-Server Support**: Connect to and aggregate services from multiple MCP servers -## Resources +## Architecture -## Instructions +The MCP integration in FastAgent is built around several key components: + +- **MCPAggregator**: Manages connections to multiple MCP servers and provides a unified interface +- **MCPConnectionManager**: Handles server connections lifecycle +- **PromptMessageMultipart**: Extended message type that supports multiple content parts +- **Agent Interface**: Provides methods for accessing MCP features through agents + +```mermaid +graph TD + A[Agent] -->|Uses| B[MCPAggregator] + B -->|Manages| C[MCP Servers] + B -->|Creates| D[MCPConnectionManager] + D -->|Connects to| C + A -->|Sends/Receives| E[PromptMessageMultipart] + E -->|Converted to| F[MCP Types] + F -->|Sent to| C +``` + +## Core Components + +### MCPAggregator + +The `MCPAggregator` class serves as a bridge between your agents and multiple MCP servers. It: + +- Manages connections to multiple servers +- Discovers and indexes available tools and prompts +- Routes requests to the appropriate server +- Provides unified access to distributed capabilities + +### PromptMessageMultipart + +The `PromptMessageMultipart` class extends MCP's native message format to support: + +- Multiple content parts in a single message +- Mixed content types (text, images, resources) +- Convenient methods for accessing and manipulating content + +### MCPConnectionManager + +Handles low-level connection management: + +- Creates and maintains persistent connections to servers +- Handles reconnection and error recovery +- Manages server lifecycle (startup, shutdown) + +## Usage Examples + +See the [detailed examples](./examples.md) for more information on how to use MCP features in your agents. \ No newline at end of file diff --git a/docs/mcp/types.md b/docs/mcp/types.md index f5a35fc..ceaa15a 100644 --- a/docs/mcp/types.md +++ b/docs/mcp/types.md @@ -1,25 +1,150 @@ -# Integration with MCP Types +# MCP Types and Interfaces -## MCP Type Compatibility +FastAgent extends and enhances the MCP type system to provide a more flexible and powerful interface for working with language models. -FastAgent is built to seamlessly integrate with the MCP SDK type system: +## PromptMessageMultipart -Conversations with assistants are based on `PromptMessageMultipart` - an extension the the mcp `PromptMessage` type, with support for multiple content sections. This type is expected to become native in a future version of MCP: https://github.com/modelcontextprotocol/specification/pull/198 +The `PromptMessageMultipart` class extends MCP's native `PromptMessage` type to support multiple content parts within a single message. This allows for more complex interactions, such as messages containing both text and images. + +```python +from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart +from mcp.types import TextContent, ImageContent + +# Create a message with multiple content parts +message = PromptMessageMultipart( + role="user", + content=[ + TextContent(type="text", text="Analyze this image:"), + ImageContent(type="image", data="base64_encoded_data", mimeType="image/png") + ] +) + +# Extract text content +text = message.first_text() # Gets the first text content +all_text = message.all_text() # Combines all text content +``` + +### Converting Between Types + +PromptMessageMultipart provides methods for converting between different message formats: + +```python +from mcp.types import PromptMessage +from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart + +# Convert a list of standard PromptMessages to PromptMessageMultipart objects +standard_messages = [...] # List of PromptMessage objects +multipart_messages = PromptMessageMultipart.to_multipart(standard_messages) + +# Convert back to standard PromptMessages +multipart_message = PromptMessageMultipart(role="user", content=[...]) +standard_messages = multipart_message.from_multipart() +``` + +### From Server Responses + +When working with MCP server responses, you can convert them directly: + +```python +# Convert GetPromptResult to PromptMessageMultipart objects +prompt_result = await agent.get_prompt("my-prompt") +multipart_messages = PromptMessageMultipart.from_get_prompt_result(prompt_result) +``` + +## Protocol Interfaces + +FastAgent defines several protocol interfaces to enable flexible implementation and testing: + +### AgentProtocol + +The `AgentProtocol` defines the standard interface that all agent implementations must follow: + +```python +class AgentProtocol(AugmentedLLMProtocol, Protocol): + """Protocol defining the standard agent interface""" + + name: str + + async def send(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str: + """Send a message to the agent and get a response""" + ... + + async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str: + """Apply an MCP prompt template by name""" + ... + + async def list_prompts(self, server_name: str | None = None) -> Mapping[str, List[Prompt]]: + """List available prompts from all servers or a specific server""" + ... + + # Additional methods... +``` + +### AugmentedLLMProtocol + +The `AugmentedLLMProtocol` defines the core LLM interaction interface: + +```python +class AugmentedLLMProtocol(Protocol): + """Protocol defining the interface for augmented LLMs""" + + async def structured( + self, + prompt: List[PromptMessageMultipart], + model: Type[ModelT], + request_params: RequestParams | None = None, + ) -> Tuple[ModelT | None, PromptMessageMultipart]: + """Apply the prompt and return the result as a Pydantic model""" + ... + + async def generate( + self, + multipart_messages: List[PromptMessageMultipart], + request_params: RequestParams | None = None, + ) -> PromptMessageMultipart: + """Apply messages to the LLM and get a response""" + ... + + @property + def message_history(self) -> List[PromptMessageMultipart]: + """Get the LLM's message history""" + ... +``` ## Message History Transfer -FastAgent makes it easy to transfer conversation history between agents: +One of the key benefits of the unified type system is the ability to transfer conversation history between different agents or LLMs: + +```python +# Start conversation with Claude +response = await claude_agent.send("Tell me about quantum computing") + +# Transfer the entire conversation history to GPT-4 +await gpt4_agent.generate(claude_agent.message_history) + +# Continue conversation with GPT-4 +response = await gpt4_agent.send("Can you elaborate on quantum entanglement?") +``` + +This capability enables seamless handoffs between different agents, allowing for specialized agents to handle different parts of a conversation or task. + +## Content Helpers + +The `content_helpers` module provides utility functions for working with various content types: -```python title="history_transfer.py" -@fast.agent(name="haiku", model="haiku") -@fast.agent(name="openai", model="o3-mini.medium") +```python +from mcp_agent.mcp.helpers.content_helpers import get_text, is_image_content -async def main() -> None: - async with fast.run() as agent: - # Start an interactive session with "haiku" - await agent.prompt(agent_name="haiku") - # Transfer the message history top "openai" (using PromptMessageMultipart) - await agent.openai.generate(agent.haiku.message_history) - # Continue the conversation - await agent.prompt(agent_name="openai") +# Extract text from a content object +text = get_text(content) + +# Check content type +if is_text_content(content): + # Handle text content +elif is_image_content(content): + # Handle image content +elif is_resource_content(content): + # Handle embedded resource ``` + +These helpers simplify working with the different content types in MCP messages. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index d699202..8c5e629 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -51,9 +51,18 @@ markdown_extensions: - pymdownx.emoji: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:material.extensions.emoji.to_svg - + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format plugins: - social + - llmstxt: + files: + - output: llms.txt + inputs: + - agents/index.md extra: social: - icon: fontawesome/brands/x-twitter @@ -64,9 +73,17 @@ copyright: © 2025 llmindset.co.uk nav: - fast-agent: index.md + - Get Started: + - get-started/index.md + - get-started/interactive.md + - get-started/mcp-config.md - Agents: + - agents/index.md - agents/prompting.md - - Models: models/ + - agents/architecture.md + - Models: + - models/index.md - MCP: - mcp/index.md - mcp/types.md + - mcp/aggregator.md diff --git a/requirements.txt b/requirements.txt index c242bfb..7c4654c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ MarkupSafe==3.0.2 mergedeep==1.3.4 mkdocs==1.6.1 mkdocs-get-deps==0.2.0 +mkdocs-llmstxt==0.1.0 mkdocs-material==9.6.11 mkdocs-material-extensions==1.3.1 mkdocs-minify-plugin==0.8.0 From 87a93936a2902cbd49b215c26b836e2e0d04bc6d Mon Sep 17 00:00:00 2001 From: evalstate <1936278+evalstate@users.noreply.github.com> Date: Sun, 6 Apr 2025 17:23:20 +0100 Subject: [PATCH 2/3] drafts --- agent/agent.py | 4 +- docs/get-started/index.md | 113 +++++++++++++++++++++++++----- docs/models/index.md | 144 +++++++++++++++++++++++++++++++++++++- 3 files changed, 239 insertions(+), 22 deletions(-) diff --git a/agent/agent.py b/agent/agent.py index e4d430c..133e913 100644 --- a/agent/agent.py +++ b/agent/agent.py @@ -32,12 +32,12 @@ async def main(): ) print(f"Command output: {result}") - repomix = Prompt.user("Here is the content of the repository we are documenting", + repomix = Prompt.user("The following content contains the repo we are documenting. AWAIT FURTHER INSTRUCTIONS before taking action", Path("core.xml"), Path("agents.xml"), Path("mcp.xml"), str(Path.home() / "/source/fast-agent/README.md"), - "Await further instructions") + "AWAIT FURTHER INSTRUCTIONS - ASK FOR THE NEXT STEP") await agent(repomix) await agent() diff --git a/docs/get-started/index.md b/docs/get-started/index.md index ef8daff..4abe06a 100644 --- a/docs/get-started/index.md +++ b/docs/get-started/index.md @@ -1,23 +1,100 @@ --- -title: Getting Started with FastAgent +title: Installation --- -# Getting Started with FastAgent +# Getting Started -Welcome to FastAgent! This guide will help you get up and running quickly with the core features of the framework. FastAgent makes it easy to create, compose, and deploy AI agents that can use tools, access resources, and understand complex prompts. +This guide will help you get up and running quickly with the core features of **fast-agent**. ## Installation -Install FastAgent using pip: +Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Python. Then: ```bash -pip install fast-agent-mcp +uv pip install fast-agent-mcp # install fast-agent! + +fast-agent setup # create an example agent and config files +uv run agent.py # run your first agent +``` + +After running setup, either edit the `fastagent.secrets.yaml` file to enter your LLM API keys, or set the appropriate environment variables. Read more about model configuration [here](/models/). + + + +### Basic Agents + +Defining an agent is as simple as: + +```python +@fast.agent( + instruction="Given an object, respond only with an estimate of its size." +) +``` + +We can then send messages to the Agent: + +```python +async with fast.run() as agent: + moon_size = await agent("the moon") + print(moon_size) +``` + +Or start an interactive chat with the Agent: + +```python +async with fast.run() as agent: + await agent() +``` + +Here is the complete `sizer.py` Agent application, with boilerplate code: + +```python +import asyncio +from mcp_agent.core.fastagent import FastAgent + +# Create the application +fast = FastAgent("Agent Example") + +@fast.agent( + instruction="Given an object, respond only with an estimate of its size." +) +async def main(): + async with fast.run() as agent: + await agent() + +if __name__ == "__main__": + asyncio.run(main()) ``` -For development, you might want to install the package in editable mode: + +!!! note + + Windows Users - there are a couple of configuration changes needed for the Filesystem and Docker MCP Servers - necessary changes are detailed within the configuration files. + + + +The Agent can then be run with `uv run sizer.py`. + +Specify a model with the `--model` switch - for example `uv run sizer.py --model sonnet`. + +Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Python. + +Then from you shell, install [**fast-agent**](https://pypi.org/project/fast-agent-mcp/) from PyPi with: ```bash -pip install -e . +uv pip install fast-agent-mcp # install fast-agent! +``` + +## Running your first agent + +Once installed, run `fast-agent setup`. This will create a configuration file, secrets file and + +````bash +fast-agent setup # create an example agent and config files +uv run agent.py # run your first agent ``` ## Creating Your First Agent @@ -38,15 +115,15 @@ def main(): # Run the application if __name__ == "__main__": import asyncio - + async def run(): async with fast.run() as agent_app: # Send a message to the agent response = await agent_app.assistant.send("Hello, who are you?") print(response) - + asyncio.run(run()) -``` +```` ## Using the Interactive Console @@ -63,12 +140,12 @@ def main(): if __name__ == "__main__": import asyncio - + async def run(): async with fast.run() as agent_app: # Start an interactive session with the agent await agent_app.interactive(agent_name="assistant") - + asyncio.run(run()) ``` @@ -133,7 +210,7 @@ async def run(): # List available prompts prompts = await agent_app.assistant.list_prompts() print(prompts) - + # Apply a prompt template result = await agent_app.assistant.apply_prompt( "analyze_code", # Prompt name @@ -152,7 +229,7 @@ async def run(): # List available resources resources = await agent_app.assistant.list_resources() print(resources) - + # Use a resource in a prompt response = await agent_app.assistant.with_resource( "Please analyze this image:", # Text prompt @@ -182,10 +259,10 @@ async def run(): async with fast.run() as agent_app: # First get research from the researcher research = await agent_app.researcher.send("Provide facts about quantum computing") - + # Then have the writer create content based on the research content = await agent_app.writer.send(f"Write a blog post based on this research: {research}") - + print(content) ``` @@ -202,6 +279,7 @@ Now that you have a basic understanding of FastAgent, you can explore: ### Common Issues 1. **Missing API Keys**: Ensure you've set the appropriate environment variables for your LLM provider: + ```bash export OPENAI_API_KEY=your_key_here # Or for Anthropic @@ -209,6 +287,7 @@ Now that you have a basic understanding of FastAgent, you can explore: ``` 2. **MCP Server Connection**: If you're having trouble connecting to an MCP server, check: + - Server configuration in `fastagent.config.yaml` - Server logs for errors - Network connectivity if using remote servers @@ -218,4 +297,4 @@ Now that you have a basic understanding of FastAgent, you can explore: @fast.agent(name="assistant", model="gpt-3.5-turbo") # Fallback to a more accessible model ``` -For more help, check the [documentation](https://fast-agent.ai) or raise an issue on our [GitHub repository](https://github.com/evalstate/fast-agent). \ No newline at end of file +For more help, check the [documentation](https://fast-agent.ai) or raise an issue on our [GitHub repository](https://github.com/evalstate/fast-agent). diff --git a/docs/models/index.md b/docs/models/index.md index 542221b..7a2d65b 100644 --- a/docs/models/index.md +++ b/docs/models/index.md @@ -6,22 +6,124 @@ Models in fast-agent are specified using a model string, that takes the format ` ### Precedence +Model specifications in fast-agent follow this precedence order (highest to lowest): + +1. Command line arguments with `--model` flag +2. Explicitly set in agent decorators +3. Configuration in `fastagent.config.yaml` +4. Default model configured in the application + ### Format -### Reasoning +Model strings follow this format: `provider.model_name.reasoning_effort` + +- **provider**: The LLM provider (e.g., `anthropic`, `openai`, `deepseek`, `generic`) +- **model_name**: The specific model to use +- **reasoning_effort** (optional): Controls the reasoning effort (`low`, `high`) for supported models + +Examples: +- `anthropic.claude-3-5-sonnet` +- `openai.gpt-4o` +- `openai.o3-mini.high` +- `generic.llama3:latest` + +### Reasoning Effort + +For models that support it, you can specify a reasoning effort: + +- **high**: More thorough reasoning, typically resulting in more detailed, accurate responses +- **low**: Less thorough reasoning, typically faster but potentially less detailed + +Example: `openai.o3-mini.high` ## Parameters +For each model provider, you can configure parameters either through environment variables or in your `fastagent.config.yaml` file. + +### Common Configuration Format + +In your `fastagent.config.yaml`: + +```yaml +models: + : + api_key: "your_api_key" # Override with API_KEY env var + base_url: "https://api.example.com" # Base URL for API calls + timeout_seconds: 60 # Request timeout in seconds +``` + ## Providers ### Anthropic +Anthropic's Claude models provide strong reasoning and instruction-following capabilities. + +**YAML Configuration:** +```yaml +models: + anthropic: + api_key: "your_anthropic_key" + base_url: "https://api.anthropic.com" # Default, rarely needs changing + timeout_seconds: 60 +``` + +**Environment Variables:** +- `ANTHROPIC_API_KEY`: Your Anthropic API key +- `ANTHROPIC_BASE_URL`: Override the API endpoint + +**Model Name Aliases:** +- `claude`: Maps to `claude-3-5-sonnet-20240620` +- `claude-3-haiku`: Maps to `claude-3-haiku-20240307` +- `claude-3-opus`: Maps to `claude-3-opus-20240229` +- `claude-3-sonnet`: Maps to `claude-3-sonnet-20240229` +- `claude-3-5-sonnet`: Maps to `claude-3-5-sonnet-20240620` + ### OpenAI -fast-agent supports OpenAI gpt-4o and o1/o3 series models. +fast-agent supports OpenAI's GPT-4o and o1/o3 series models with full tool calling capabilities. + +**YAML Configuration:** +```yaml +models: + openai: + api_key: "your_openai_key" + base_url: "https://api.openai.com/v1" # Default OpenAI endpoint + timeout_seconds: 60 +``` + +**Environment Variables:** +- `OPENAI_API_KEY`: Your OpenAI API key +- `OPENAI_BASE_URL`: Override the API endpoint + +**Model Name Aliases:** +- `gpt-4`: Maps to `gpt-4-turbo-preview` +- `gpt-4o`: Maps to `gpt-4o-2024-05-13` +- `o1`: Maps to `o1-preview` +- `o1-mini`: Maps to `o1-mini-2024-07-18` +- `o3`: Maps to `o3` +- `o3-mini`: Maps to `o3-mini` ### DeepSeek +DeepSeek offers cost-effective models with strong capabilities. + +**YAML Configuration:** +```yaml +models: + deepseek: + api_key: "your_deepseek_key" + base_url: "https://api.deepseek.com/v1" + timeout_seconds: 60 +``` + +**Environment Variables:** +- `DEEPSEEK_API_KEY`: Your DeepSeek API key +- `DEEPSEEK_BASE_URL`: Override the API endpoint + +**Model Names:** +- `deepseek-chat` +- `deepseek-coder` + ### Generic OpenAI LLM !!! warning @@ -29,4 +131,40 @@ fast-agent supports OpenAI gpt-4o and o1/o3 series models. Use the Generic Provider to connect to OpenAI compatible models (including Ollama). Tool Calling and other modalities for generic models are not included in the e2e test suite, and should be used at your own risk. -Models prefixed with `generic` will use a generic OpenAI endpoint, with the defaults configured to work with Ollama. For example, to run with Llama 3.2 latest you can specify `generic.llama3.2:latest`. As with other models `base_url` can be overridden. The associated API key environment variable is `GENERIC_API_KEY`, with `ollama` used as the default. +Models prefixed with `generic` will use a generic OpenAI endpoint, with the defaults configured to work with Ollama. For example, to run with Llama 3.2 latest you can specify `generic.llama3.2:latest`. + +**YAML Configuration:** +```yaml +models: + generic: + api_key: "ollama" # Default for Ollama, change as needed + base_url: "http://localhost:11434/v1" # Default for Ollama + timeout_seconds: 60 +``` + +**Environment Variables:** +- `GENERIC_API_KEY`: Your API key (defaults to `ollama` for Ollama) +- `GENERIC_BASE_URL`: Override the API endpoint + +**Usage with Ollama:** +If you're using `ollama serve`, you can use any model available in Ollama by specifying: +``` +generic.model_name +``` + +For example: `generic.llama3:latest` or `generic.mistral:latest` + +**Usage with other OpenAI API compatible providers:** +By configuring the `base_url` and appropriate `api_key`, you can connect to any OpenAI API-compatible provider, such as: + +- Self-hosted models (LM Studio, vLLM, etc.) +- Azure OpenAI Services +- Other providers with OpenAI-compatible APIs + +## Default Configuration + +You can set a default model for your application in your `fastagent.config.yaml`: + +```yaml +default_model: "openai.gpt-4o" # Default model for all agents +``` From 05033532e8c5e5196c2d17750718a0fd176918ac Mon Sep 17 00:00:00 2001 From: evalstate <1936278+evalstate@users.noreply.github.com> Date: Sun, 6 Apr 2025 17:23:50 +0100 Subject: [PATCH 3/3] prettierignore --- .prettierignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .prettierignore diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..5c457d7 --- /dev/null +++ b/.prettierignore @@ -0,0 +1 @@ +docs \ No newline at end of file