aboutsummaryrefslogtreecommitdiffstats
path: root/packages/multillm
diff options
context:
space:
mode:
authorLouis Burda <dev@sinitax.com>2026-02-02 08:10:56 +0100
committerLouis Burda <dev@sinitax.com>2026-02-02 08:11:17 +0100
commitd69c5b355c450e2c79b62b8a1a7946f375ac207d (patch)
treea20cc4b977e400b2cd08b25f5ea9581156524356 /packages/multillm
parent43ddca6e4de9ed2b8615dedd9a31ee42881fdcb5 (diff)
downloadmultillm-main.tar.gz
multillm-main.zip
Add agentwrap provider and allow tools for singleHEADmain
Diffstat (limited to 'packages/multillm')
-rw-r--r--packages/multillm/src/multillm/client.py89
1 files changed, 74 insertions, 15 deletions
diff --git a/packages/multillm/src/multillm/client.py b/packages/multillm/src/multillm/client.py
index 4a108d7..12eb651 100644
--- a/packages/multillm/src/multillm/client.py
+++ b/packages/multillm/src/multillm/client.py
@@ -1,4 +1,5 @@
import importlib
+import warnings
from typing import Any, AsyncIterator
from .base import BaseProvider, Response, SingleResponse
@@ -6,7 +7,7 @@ from .agent import BaseAgentProvider, AgentMessage, AgentOptions, Tool
from .exceptions import ProviderNotFoundError, InvalidModelFormatError
CHAT_PROVIDERS = ["anthropic", "openai", "gemini", "openrouter"]
-AGENT_PROVIDERS = ["claude"]
+AGENT_PROVIDERS = ["claude", "agentwrap"]
SUPPORTED_PROVIDERS = CHAT_PROVIDERS + AGENT_PROVIDERS
@@ -119,10 +120,19 @@ class Client:
self._chat_providers[provider_name] = provider
return provider
- def _get_agent_provider(self, provider_name: str) -> BaseAgentProvider:
- """Get or create an agent provider instance."""
- if provider_name in self._agent_providers:
- return self._agent_providers[provider_name]
+ def _get_agent_provider(self, provider_name: str, wrapped_model: str | None = None) -> BaseAgentProvider:
+ """
+ Get or create an agent provider instance.
+
+ Args:
+ provider_name: Name of the provider
+ wrapped_model: For agentwrap, the model to wrap (e.g., "google/gemini")
+ """
+ # For agentwrap, use a unique key per wrapped model
+ cache_key = f"{provider_name}:{wrapped_model}" if provider_name == "agentwrap" and wrapped_model else provider_name
+
+ if cache_key in self._agent_providers:
+ return self._agent_providers[cache_key]
if provider_name not in AGENT_PROVIDERS:
raise ProviderNotFoundError(provider_name)
@@ -134,8 +144,12 @@ class Client:
**self.config.get(provider_name, {}),
}
+ # For agentwrap, inject the wrapped_model into config
+ if provider_name == "agentwrap" and wrapped_model:
+ provider_config["wrapped_model"] = wrapped_model
+
provider = module.Provider(provider_config)
- self._agent_providers[provider_name] = provider
+ self._agent_providers[cache_key] = provider
return provider
async def single(
@@ -148,13 +162,40 @@ class Client:
"""
Send a single message and get a response.
+ .. deprecated:: 0.2.0
+ Use :meth:`run` with ``agentwrap/<provider>/<model>`` instead for unified agentic API.
+ The single() method will be removed in version 1.0.0.
+
+ Migration examples:
+
+ Instead of::
+
+ result = await client.single("openai/gpt-4", "Hello")
+ print(result.text)
+
+ Use::
+
+ async for msg in client.run("agentwrap/openai/gpt-4", "Hello"):
+ if msg.type == "text":
+ print(msg.content)
+
+ For tool calling::
+
+ # Old way
+ result = await client.single("openai/gpt-4", "Calculate 5+3", tools=tools)
+
+ # New way
+ async for msg in client.run("agentwrap/openai/gpt-4", "Calculate 5+3", tools=tools):
+ if msg.type == "text":
+ print(msg.content)
+
This interface allows using both chat and agent providers for single-turn
interactions. It returns both the text response and any tool calls made.
Interface concepts:
- chat_complete(): Takes full conversation history, returns completion
- agent API: Maintains history internally, takes only newest user message
- - single(): Unified interface for both, handles single message/response
+ - single(): DEPRECATED - Unified interface for both, handles single message/response
Args:
model: Model identifier (e.g., "openai/gpt-4o", "claude/sonnet")
@@ -171,6 +212,14 @@ class Client:
- text: The text response
- tool_calls: List of tool calls made (if any)
"""
+ warnings.warn(
+ "single() is deprecated and will be removed in version 1.0.0. "
+ "Use run() with 'agentwrap/<provider>/<model>' instead for unified agentic API. "
+ "Example: client.run('agentwrap/openai/gpt-4', prompt)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
provider_name, model_name = self._parse_model(model)
if self._is_agent_provider(provider_name):
@@ -298,20 +347,22 @@ class Client:
prompt: str,
options: AgentOptions | None = None,
tools: list[Tool] | None = None,
+ wrapped_model: str | None = None,
) -> AsyncIterator[AgentMessage]:
"""
Run an agent with the given prompt.
Args:
- provider: Provider name (e.g., "claude")
+ provider: Provider name (e.g., "claude", "agentwrap")
prompt: The task or query for the agent
options: Agent execution options
tools: Custom tools available to the agent
+ wrapped_model: For agentwrap, the chat model to wrap
Yields:
AgentMessage objects as the agent works
"""
- agent = self._get_agent_provider(provider)
+ agent = self._get_agent_provider(provider, wrapped_model=wrapped_model)
async for msg in agent.run(prompt, options, tools):
yield msg
@@ -326,7 +377,9 @@ class Client:
Run an agent using model string format.
Args:
- model: Model identifier (e.g., "claude/sonnet", "claude/default")
+ model: Model identifier
+ - Agent: "claude/sonnet", "claude/default"
+ - Agentwrap: "agentwrap/openai/gpt-4", "agentwrap/google/gemini"
prompt: The task or query for the agent
options: Agent execution options (model from string takes precedence)
tools: Custom tools available to the agent
@@ -341,10 +394,16 @@ class Client:
f"'{provider_name}' is a chat provider. Use chat_complete() instead."
)
- if options is None:
- options = self._build_agent_options(model_name)
- elif model_name and model_name != "default":
- options.extra["model"] = model_name
+ # For agentwrap, pass model_name as wrapped_model
+ wrapped_model = None
+ if provider_name == "agentwrap":
+ wrapped_model = model_name
+ else:
+ # For other agent providers, add model to options
+ if options is None:
+ options = self._build_agent_options(model_name)
+ elif model_name and model_name != "default":
+ options.extra["model"] = model_name
- async for msg in self.agent_run(provider_name, prompt, options, tools):
+ async for msg in self.agent_run(provider_name, prompt, options, tools, wrapped_model=wrapped_model):
yield msg