aboutsummaryrefslogtreecommitdiffstats
path: root/packages/multillm-agentwrap
diff options
context:
space:
mode:
Diffstat (limited to 'packages/multillm-agentwrap')
-rw-r--r--packages/multillm-agentwrap/README.md349
-rw-r--r--packages/multillm-agentwrap/pyproject.toml16
-rw-r--r--packages/multillm-agentwrap/src/multillm_agentwrap/__init__.py20
-rw-r--r--packages/multillm-agentwrap/src/multillm_agentwrap/provider.py281
4 files changed, 666 insertions, 0 deletions
diff --git a/packages/multillm-agentwrap/README.md b/packages/multillm-agentwrap/README.md
new file mode 100644
index 0000000..2e0c27c
--- /dev/null
+++ b/packages/multillm-agentwrap/README.md
@@ -0,0 +1,349 @@
+# multillm-agentwrap
+
+Agent wrapper provider for multillm - wraps chat providers with agentic capabilities.
+
+## Overview
+
+The `agentwrap` provider allows you to use any chat provider (OpenAI, Google, Anthropic, etc.) with agentic capabilities including:
+
+- **Tool execution loop**: Automatically executes tools and sends results back
+- **Conversation history management**: Maintains context across tool calls
+- **Multi-turn interactions**: Continues until task is complete or max turns reached
+
+## Installation
+
+```bash
+pip install multillm-agentwrap
+```
+
+Or with uv in a workspace:
+
+```bash
+uv add multillm-agentwrap
+```
+
+## Usage
+
+### Basic Usage
+
+```python
+import asyncio
+import multillm
+
+async def main():
+ client = multillm.Client()
+
+ # Wrap any chat model with agentic capabilities
+ async for msg in client.run("agentwrap/openai/gpt-4", "Hello!"):
+ if msg.type == "text":
+ print(msg.content)
+
+asyncio.run(main())
+```
+
+### With Tools
+
+```python
+import asyncio
+import multillm
+
+# Define a custom tool
+calculate_tool = multillm.Tool(
+ name="calculate",
+ description="Perform a calculation",
+ parameters={
+ "type": "object",
+ "properties": {
+ "expression": {"type": "string", "description": "Math expression"}
+ },
+ "required": ["expression"]
+ },
+ handler=lambda args: {"result": eval(args["expression"])}
+)
+
+async def main():
+ client = multillm.Client()
+
+ # Use with tools
+ async for msg in client.run(
+ "agentwrap/google/gemini-pro",
+ "What's 25 * 4?",
+ tools=[calculate_tool]
+ ):
+ if msg.type == "text":
+ print(msg.content)
+ elif msg.type == "tool_use":
+ print(f" → Using tool: {msg.tool_name}")
+ elif msg.type == "tool_result":
+ print(f" ← Result: {msg.tool_result}")
+
+asyncio.run(main())
+```
+
+### With Options
+
+```python
+from multillm import AgentOptions
+
+async def main():
+ client = multillm.Client()
+
+ options = AgentOptions(
+ max_turns=5,
+ system_prompt="You are a helpful assistant.",
+ temperature=0.7
+ )
+
+ async for msg in client.run(
+ "agentwrap/anthropic/claude-3-5-sonnet-20241022",
+ "Explain quantum computing",
+ options=options
+ ):
+ if msg.type == "text":
+ print(msg.content)
+
+asyncio.run(main())
+```
+
+## Supported Chat Providers
+
+Any chat provider supported by multillm can be wrapped:
+
+- `agentwrap/openai/gpt-4` - OpenAI GPT-4
+- `agentwrap/openai/gpt-4-turbo` - OpenAI GPT-4 Turbo
+- `agentwrap/openai/gpt-3.5-turbo` - OpenAI GPT-3.5 Turbo
+- `agentwrap/google/gemini-pro` - Google Gemini Pro
+- `agentwrap/google/gemini-1.5-pro` - Google Gemini 1.5 Pro
+- `agentwrap/anthropic/claude-3-5-sonnet-20241022` - Anthropic Claude 3.5 Sonnet
+- `agentwrap/openrouter/...` - Any OpenRouter model
+
+## Model Format
+
+The model string follows the format:
+
+```
+agentwrap/<chat-provider>/<model-name>
+```
+
+Where:
+- `agentwrap` - The agent wrapper provider
+- `<chat-provider>` - The chat provider to wrap (openai, google, anthropic, openrouter)
+- `<model-name>` - The specific model from that provider
+
+## How It Works
+
+1. **Receives prompt**: User sends initial message
+2. **Calls chat API**: Uses the wrapped chat provider via `chat_complete()`
+3. **Returns response**: If no tool calls, returns text and stops
+4. **Executes tools**: If tool calls present, executes them with provided handlers
+5. **Continues loop**: Sends tool results back and gets next response
+6. **Repeats**: Steps 3-5 until no more tool calls or max turns reached
+
+## Configuration
+
+Configure the wrapped provider via multillm config:
+
+```python
+config = {
+ "openai": {"api_key": "sk-..."},
+ "google": {"api_key": "..."},
+ "agentwrap": {
+ "max_turns": 10 # Default max turns if not specified in options
+ }
+}
+
+client = multillm.Client(config)
+```
+
+## Agent Options
+
+All `AgentOptions` are supported:
+
+```python
+from multillm import AgentOptions
+
+options = AgentOptions(
+ system_prompt="Custom system prompt",
+ max_turns=15, # Max tool execution iterations
+ temperature=0.8, # Sampling temperature
+ max_tokens=2000, # Max tokens to generate
+)
+```
+
+## Message Types
+
+The agent yields different message types during execution:
+
+### System Message
+```python
+AgentMessage(
+ type="system",
+ content="Agentic session started",
+)
+```
+
+### Text Message
+```python
+AgentMessage(
+ type="text",
+ content="The answer is 42",
+ raw=<original response object>
+)
+```
+
+### Tool Use Message
+```python
+AgentMessage(
+ type="tool_use",
+ tool_name="calculate",
+ tool_input={"expression": "6*7"},
+ raw=<tool call object>
+)
+```
+
+### Tool Result Message
+```python
+AgentMessage(
+ type="tool_result",
+ tool_name="calculate",
+ tool_result="42",
+ raw=<result dict>
+)
+```
+
+### Result Message
+```python
+AgentMessage(
+ type="result",
+ content="Final answer",
+)
+```
+
+## Comparison with Native Agent Providers
+
+### AgentWrap (This Provider)
+- ✅ Works with any chat provider
+- ✅ Simple tool execution loop
+- ✅ Full control over chat API settings
+- ❌ No built-in tools (must provide custom tools)
+- ❌ No file system access
+- ❌ More basic agentic capabilities
+
+### Native Agent Providers (e.g., Claude)
+- ✅ Advanced agentic capabilities
+- ✅ Built-in tools (Bash, Read, Write, etc.)
+- ✅ File system access
+- ✅ Plan mode, interactive sessions
+- ❌ Limited to specific providers
+
+## Use Cases
+
+### When to Use AgentWrap
+
+- **Different models**: Want agentic behavior with OpenAI, Google, or other chat models
+- **Custom tools**: Need specific tool implementations
+- **Simple workflows**: Basic tool calling without file system access
+- **Cost optimization**: Use cheaper chat models with agentic capabilities
+
+### When to Use Native Agents
+
+- **File operations**: Need to read/write files, run commands
+- **Complex workflows**: Multi-step tasks requiring planning
+- **Built-in tools**: Want Bash, Read, Write, Grep, etc.
+- **Claude-specific**: Need Claude's advanced agentic features
+
+## Limitations
+
+1. **No built-in tools**: Must provide all tools yourself (unlike Claude agent which has Bash, Read, Write, etc.)
+2. **No file system access**: Can't read/write files unless you implement those tools
+3. **No interactive mode**: Single-shot sessions only (no `run_interactive`)
+4. **Tool handlers required**: Tools must have Python handler functions
+
+## Examples
+
+### Calculator Agent
+
+```python
+import asyncio
+import multillm
+
+calculate = multillm.Tool(
+ name="calculate",
+ description="Evaluate a mathematical expression",
+ parameters={
+ "type": "object",
+ "properties": {
+ "expression": {"type": "string"}
+ },
+ "required": ["expression"]
+ },
+ handler=lambda args: {"result": eval(args["expression"])}
+)
+
+async def main():
+ client = multillm.Client()
+
+ async for msg in client.run(
+ "agentwrap/openai/gpt-4",
+ "What's (125 + 75) * 3?",
+ tools=[calculate]
+ ):
+ if msg.type == "text":
+ print(msg.content)
+
+asyncio.run(main())
+```
+
+### Multi-Tool Agent
+
+```python
+import asyncio
+import multillm
+from datetime import datetime
+
+get_time = multillm.Tool(
+ name="get_current_time",
+ description="Get the current time",
+ parameters={"type": "object", "properties": {}},
+ handler=lambda args: {"time": datetime.now().isoformat()}
+)
+
+get_weather = multillm.Tool(
+ name="get_weather",
+ description="Get weather for a location",
+ parameters={
+ "type": "object",
+ "properties": {
+ "location": {"type": "string"}
+ },
+ "required": ["location"]
+ },
+ handler=lambda args: {"temp": 72, "condition": "sunny"}
+)
+
+async def main():
+ client = multillm.Client()
+
+ async for msg in client.run(
+ "agentwrap/google/gemini-pro",
+ "What time is it and what's the weather in Tokyo?",
+ tools=[get_time, get_weather]
+ ):
+ if msg.type == "text":
+ print(msg.content)
+
+asyncio.run(main())
+```
+
+## License
+
+MIT
+
+## Contributing
+
+Contributions welcome! Please see the main multillm repository for guidelines.
+
+## See Also
+
+- [multillm](https://github.com/yourusername/multillm) - Main library
+- [multillm-claude](https://github.com/yourusername/multillm-claude) - Claude agent provider
diff --git a/packages/multillm-agentwrap/pyproject.toml b/packages/multillm-agentwrap/pyproject.toml
new file mode 100644
index 0000000..3713db2
--- /dev/null
+++ b/packages/multillm-agentwrap/pyproject.toml
@@ -0,0 +1,16 @@
+[project]
+name = "multillm-agentwrap"
+version = "0.1.0"
+description = "Agent wrapper provider for multillm - wraps chat providers with agentic capabilities"
+readme = "README.md"
+requires-python = ">=3.10"
+dependencies = [
+ "multillm>=0.1.0",
+]
+
+[project.entry-points."multillm.providers"]
+agentwrap = "multillm_agentwrap:AgentWrapProvider"
+
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
diff --git a/packages/multillm-agentwrap/src/multillm_agentwrap/__init__.py b/packages/multillm-agentwrap/src/multillm_agentwrap/__init__.py
new file mode 100644
index 0000000..64e7203
--- /dev/null
+++ b/packages/multillm-agentwrap/src/multillm_agentwrap/__init__.py
@@ -0,0 +1,20 @@
+"""
+Agent wrapper provider for multillm.
+
+Wraps chat providers with agentic capabilities including:
+- Tool execution loop
+- Conversation history management
+- Multi-turn interactions
+
+Usage:
+ # Wrap any chat provider with agentic capabilities
+ client = multillm.Client()
+
+ # Use agentwrap/ prefix to make any chat model agentic
+ async for msg in client.run("agentwrap/google/gemini", "Hello"):
+ print(msg)
+"""
+
+from .provider import AgentWrapProvider as Provider
+
+__all__ = ["Provider", "AgentWrapProvider"]
diff --git a/packages/multillm-agentwrap/src/multillm_agentwrap/provider.py b/packages/multillm-agentwrap/src/multillm_agentwrap/provider.py
new file mode 100644
index 0000000..52f9ff7
--- /dev/null
+++ b/packages/multillm-agentwrap/src/multillm_agentwrap/provider.py
@@ -0,0 +1,281 @@
+"""
+Agent wrapper provider implementation.
+
+Wraps chat providers to provide agentic capabilities.
+"""
+
+import sys
+from typing import Any, AsyncIterator
+
+from multillm import (
+ BaseAgentProvider,
+ AgentMessage,
+ AgentOptions,
+ Tool,
+ ProviderError,
+ load_provider_config,
+ merge_config,
+)
+
+
+class AgentWrapProvider(BaseAgentProvider):
+ """
+ Agent wrapper provider that wraps chat providers with agentic capabilities.
+
+ The model parameter should be the chat provider and model to wrap.
+ For example, when using "agentwrap/google/gemini":
+ - Provider: "agentwrap"
+ - Model: "google/gemini" (passed to this provider)
+
+ This provider will:
+ 1. Use the specified chat provider internally via chat_complete()
+ 2. Implement tool execution loop
+ 3. Manage conversation history
+ 4. Provide agentic multi-turn interactions
+
+ Usage:
+ # Via client
+ client = multillm.Client()
+ async for msg in client.agent_run("agentwrap/google/gemini", "Hello"):
+ print(msg)
+
+ # With tools
+ async for msg in client.agent_run(
+ "agentwrap/openai/gpt-4",
+ "What's 2+2?",
+ tools=[calculate_tool],
+ ):
+ print(msg)
+ """
+
+ PROVIDER_NAME = "agentwrap"
+
+ def __init__(self, config: dict[str, Any] | None = None):
+ super().__init__(config)
+ self._client = None
+
+ def _get_client(self):
+ """Get or create client instance for making chat API calls."""
+ if self._client is None:
+ # Import here to avoid circular dependency
+ from multillm import Client
+ self._client = Client()
+ return self._client
+
+ def _build_options(self, options: AgentOptions | None) -> dict[str, Any]:
+ """Build options dict for wrapped provider."""
+ if options is None:
+ return {}
+
+ opts = {}
+ if options.system_prompt:
+ opts["system_prompt"] = options.system_prompt
+
+ # Merge with extra options (temperature, max_tokens, etc.)
+ if options.extra:
+ opts.update(options.extra)
+
+ return opts
+
+ async def _execute_tool(
+ self,
+ tool_call: dict,
+ tools: list[Tool] | None,
+ ) -> dict:
+ """
+ Execute a tool call and return the result.
+
+ Args:
+ tool_call: Tool call from chat response (OpenAI format)
+ tools: List of available tools with handlers
+
+ Returns:
+ Tool result dict with 'content' key
+ """
+ function_name = tool_call["function"]["name"]
+ function_args = tool_call["function"].get("arguments", {})
+
+ # Find the tool with matching name
+ if tools:
+ for tool in tools:
+ if tool.name == function_name:
+ # Execute the tool handler
+ try:
+ result = tool.handler(function_args)
+ # Handle async handlers
+ if hasattr(result, "__await__"):
+ result = await result
+
+ # Return formatted result
+ return {"content": str(result)}
+
+ except Exception as e:
+ return {
+ "content": f"Error executing tool: {e}",
+ "is_error": True
+ }
+
+ # Tool not found or no handlers
+ return {
+ "content": f"Tool '{function_name}' not found",
+ "is_error": True
+ }
+
+ async def run(
+ self,
+ prompt: str,
+ options: AgentOptions | None = None,
+ tools: list[Tool] | None = None,
+ ) -> AsyncIterator[AgentMessage]:
+ """
+ Run agentic workflow with the wrapped chat provider.
+
+ Args:
+ prompt: User message to send
+ options: Agent options (max_turns, system_prompt, etc.)
+ tools: Optional tools the agent can use
+
+ Yields:
+ AgentMessage objects representing the agent's actions and responses
+ """
+ # Yield session start message
+ yield AgentMessage(
+ type="system",
+ content="Agentic session started",
+ raw=None,
+ )
+
+ # Get wrapped model from config
+ # When client routes "agentwrap/google/gemini", we receive "google/gemini" as model
+ file_config = load_provider_config(self.PROVIDER_NAME)
+ merged_config = merge_config(file_config, self.config, {})
+ wrapped_model = merged_config.get("wrapped_model")
+
+ if not wrapped_model:
+ raise ProviderError(
+ "AgentWrap provider requires 'wrapped_model' in config. "
+ "When using via client, the model should be specified as 'agentwrap/provider/model'."
+ )
+
+ # Build options for chat API
+ chat_options = self._build_options(options)
+
+ # Get max turns
+ max_turns = options.max_turns if options and options.max_turns else 10
+
+ # Initialize conversation history
+ messages = []
+
+ # Add system prompt if provided
+ if options and options.system_prompt:
+ messages.append({
+ "role": "system",
+ "content": options.system_prompt
+ })
+
+ # Add user message
+ messages.append({
+ "role": "user",
+ "content": prompt
+ })
+
+ # Get client
+ client = self._get_client()
+
+ # Tool execution loop
+ final_text = ""
+ for turn in range(max_turns):
+ try:
+ # Call chat_complete with wrapped model
+ response = await client.chat_complete(
+ wrapped_model,
+ messages,
+ tools=tools,
+ **chat_options
+ )
+
+ # Get text from response
+ text = response.choices[0].message.content or ""
+ tool_calls = response.choices[0].message.tool_calls or []
+
+ # Add assistant message to history
+ messages.append({
+ "role": "assistant",
+ "content": text,
+ "tool_calls": tool_calls if tool_calls else None
+ })
+
+ # Yield text message if present
+ if text:
+ final_text = text
+ yield AgentMessage(
+ type="text",
+ content=text,
+ raw=response,
+ )
+
+ # Check if we're done (no tool calls)
+ if not tool_calls:
+ break
+
+ # Process tool calls
+ for tool_call in tool_calls:
+ # Yield tool use message
+ yield AgentMessage(
+ type="tool_use",
+ tool_name=tool_call["function"]["name"],
+ tool_input=tool_call["function"].get("arguments", {}),
+ raw=tool_call,
+ )
+
+ # Execute tool if handler available
+ tool_result = await self._execute_tool(tool_call, tools)
+
+ # Yield tool result message
+ yield AgentMessage(
+ type="tool_result",
+ tool_name=tool_call["function"]["name"],
+ tool_result=tool_result["content"],
+ raw=tool_result,
+ )
+
+ # Add tool result to message history
+ messages.append({
+ "role": "tool",
+ "tool_call_id": tool_call["id"],
+ "name": tool_call["function"]["name"],
+ "content": tool_result["content"]
+ })
+
+ except Exception as e:
+ # Yield error and stop
+ error_msg = f"Error in agentic loop: {e}"
+ print(f"\n{error_msg}", file=sys.stderr)
+ yield AgentMessage(
+ type="error",
+ content=error_msg,
+ raw=e,
+ )
+ raise ProviderError(error_msg) from e
+
+ # Yield final result
+ yield AgentMessage(
+ type="result",
+ content=final_text,
+ raw=None,
+ )
+
+ async def run_interactive(
+ self,
+ options: AgentOptions | None = None,
+ tools: list[Tool] | None = None,
+ ):
+ """
+ Interactive sessions not yet implemented for agentwrap.
+
+ Use multiple calls to run() instead.
+ """
+ raise NotImplementedError(
+ "Interactive sessions not yet implemented for agentwrap provider. "
+ "Use multiple calls to run() for multi-turn conversations."
+ )