aboutsummaryrefslogtreecommitdiffstats
path: root/packages/multillm-cli/src/multillm_cli/main.py
diff options
context:
space:
mode:
authorLouis Burda <dev@sinitax.com>2026-02-02 08:10:56 +0100
committerLouis Burda <dev@sinitax.com>2026-02-02 08:11:17 +0100
commitd69c5b355c450e2c79b62b8a1a7946f375ac207d (patch)
treea20cc4b977e400b2cd08b25f5ea9581156524356 /packages/multillm-cli/src/multillm_cli/main.py
parent43ddca6e4de9ed2b8615dedd9a31ee42881fdcb5 (diff)
downloadmultillm-main.tar.gz
multillm-main.zip
Add agentwrap provider and allow tools for singleHEADmain
Diffstat (limited to 'packages/multillm-cli/src/multillm_cli/main.py')
-rw-r--r--packages/multillm-cli/src/multillm_cli/main.py252
1 files changed, 209 insertions, 43 deletions
diff --git a/packages/multillm-cli/src/multillm_cli/main.py b/packages/multillm-cli/src/multillm_cli/main.py
index b450b71..a38ebe6 100644
--- a/packages/multillm-cli/src/multillm_cli/main.py
+++ b/packages/multillm-cli/src/multillm_cli/main.py
@@ -65,6 +65,28 @@ BUILTIN_TOOLS = {
"required": ["location"]
}
}
+ },
+ "ask_user": {
+ "type": "function",
+ "function": {
+ "name": "ask_user",
+ "description": "Ask the user a question and get their response. Use this when you need user input or clarification.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "question": {
+ "type": "string",
+ "description": "The question to ask the user"
+ },
+ "options": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "Optional list of suggested answers (user can still provide their own)"
+ }
+ },
+ "required": ["question"]
+ }
+ }
}
}
@@ -118,10 +140,59 @@ def get_weather(location: str, unit: str = "celsius") -> dict:
}
+def ask_user(question: str, options: list[str] = None) -> dict:
+ """
+ Ask the user a question and collect their response.
+
+ This is an interactive tool that displays a question to the user
+ and waits for their input.
+ """
+ print("\n" + "=" * 70, file=sys.stderr)
+ print("❓ QUESTION FROM ASSISTANT", file=sys.stderr)
+ print("=" * 70, file=sys.stderr)
+ print(f"\n{question}\n", file=sys.stderr)
+
+ if options:
+ print("Suggested options:", file=sys.stderr)
+ for i, opt in enumerate(options, 1):
+ print(f" {i}. {opt}", file=sys.stderr)
+ print("\nYou can select a number or provide your own answer.", file=sys.stderr)
+
+ print("\nYour answer: ", file=sys.stderr, end="", flush=True)
+
+ try:
+ # Read from stdin
+ answer = input()
+
+ # If user entered a number and we have options, use that option
+ if options and answer.strip().isdigit():
+ idx = int(answer.strip()) - 1
+ if 0 <= idx < len(options):
+ answer = options[idx]
+
+ print("=" * 70 + "\n", file=sys.stderr)
+
+ return {
+ "question": question,
+ "answer": answer,
+ "selected_from_options": answer in options if options else False
+ }
+
+ except (EOFError, KeyboardInterrupt):
+ print("\n", file=sys.stderr)
+ print("=" * 70 + "\n", file=sys.stderr)
+ return {
+ "question": question,
+ "answer": None,
+ "error": "User cancelled input"
+ }
+
+
TOOL_FUNCTIONS = {
"get_current_time": get_current_time,
"calculate": calculate,
"get_weather": get_weather,
+ "ask_user": ask_user,
}
@@ -220,10 +291,78 @@ async def run_with_tools(
return "Maximum tool calling iterations reached"
-async def run_single(model: str, prompt: str, **kwargs) -> multillm.SingleResponse:
- """Run a single query against the specified model."""
+async def run_agentic(
+ model: str,
+ prompt: str,
+ tools: list[multillm.Tool] | None = None,
+ options: multillm.AgentOptions | None = None,
+ verbose: bool = False
+) -> str:
+ """
+ Run a query using the agentic API.
+
+ Uses agentwrap for chat providers, native agent API for agent providers.
+ """
client = multillm.Client()
- return await client.single(model, prompt, **kwargs)
+
+ # For Claude, if AskUserQuestion is requested, provide custom ask_user tool instead
+ provider_name = model.split("/")[0]
+ if provider_name == "claude" and options and options.allowed_tools:
+ if "AskUserQuestion" in options.allowed_tools:
+ # Remove AskUserQuestion (SDK built-in doesn't work interactively)
+ options.allowed_tools = [t for t in options.allowed_tools if t != "AskUserQuestion"]
+
+ # Add our custom ask_user tool
+ if not tools:
+ tools = []
+
+ # Create ask_user tool for Claude
+ ask_user_claude = multillm.Tool(
+ name="ask_user",
+ description="Ask the user a question and get their response. Use this when you need user input or clarification.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "question": {
+ "type": "string",
+ "description": "The question to ask the user"
+ },
+ "options": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "Optional suggested answers"
+ }
+ },
+ "required": ["question"]
+ },
+ handler=ask_user # Use the same handler as chat providers
+ )
+ tools.append(ask_user_claude)
+
+ print("ℹ️ Using custom 'ask_user' tool instead of AskUserQuestion for interactive prompting", file=sys.stderr)
+
+ # Collect text responses
+ text_parts = []
+ tool_uses = []
+
+ async for msg in client.run(model, prompt, options=options, tools=tools):
+ if msg.type == "text":
+ text_parts.append(msg.content)
+ elif msg.type == "tool_use":
+ tool_uses.append(msg)
+ if verbose:
+ print(f" → {msg.tool_name}({json.dumps(msg.tool_input)})", file=sys.stderr)
+ else:
+ print(f" → {msg.tool_name}", file=sys.stderr)
+ elif msg.type == "tool_result":
+ if verbose:
+ print(f" ← {msg.tool_result}", file=sys.stderr)
+
+ # Show tool usage summary if any tools were used
+ if tool_uses and not verbose:
+ print(f"\n[Used {len(tool_uses)} tool(s)]\n", file=sys.stderr)
+
+ return " ".join(text_parts)
async def run_with_chat_tools(
@@ -232,19 +371,34 @@ async def run_with_chat_tools(
enabled_tools: list[str],
verbose: bool = False
) -> str:
- """Run with chat provider tools."""
- client = multillm.Client()
-
- # Build tool list from enabled tools
- tools = [BUILTIN_TOOLS[name] for name in enabled_tools if name in BUILTIN_TOOLS]
-
- if not tools:
- # No valid tools, just run normally
- result = await run_single(model, prompt)
- return result.text
+ """
+ Run with chat provider tools using agentwrap.
- # Run with tool loop
- return await run_with_tools(client, model, prompt, tools, verbose)
+ Converts built-in tools to Tool objects and uses agentwrap for execution.
+ """
+ # Build Tool objects from enabled tools
+ tool_objects = []
+ for name in enabled_tools:
+ if name in BUILTIN_TOOLS:
+ tool_def = BUILTIN_TOOLS[name]
+ tool_objects.append(multillm.Tool(
+ name=tool_def["function"]["name"],
+ description=tool_def["function"]["description"],
+ parameters=tool_def["function"]["parameters"],
+ handler=TOOL_FUNCTIONS[name]
+ ))
+
+ if not tool_objects:
+ # No valid tools, run without tools
+ return await run_agentic(f"agentwrap/{model}", prompt, verbose=verbose)
+
+ # Run with agentwrap and tools
+ return await run_agentic(
+ f"agentwrap/{model}",
+ prompt,
+ tools=tool_objects,
+ verbose=verbose
+ )
def main():
@@ -254,18 +408,21 @@ def main():
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
- # Chat providers (simple queries)
+ # Chat providers (simple queries) - uses agentwrap internally
multillm -m openai/gpt-4o -p "What is 2+2?"
multillm -m anthropic/claude-sonnet-4-20250514 -p "Explain async/await"
multillm -m gemini/gemini-2.0-flash-exp -p "What is Python?"
- # With built-in tools (for chat providers)
+ # With built-in tools (for chat providers) - uses agentwrap with tool execution
multillm -m openai/gpt-4o -p "What time is it?" --use-tools get_current_time
multillm -m openai/gpt-4o -p "Calculate 15 * 23" --use-tools calculate
multillm -m openai/gpt-4o -p "What's the weather in Tokyo?" --use-tools get_weather
multillm -m openai/gpt-4o -p "What's 5+5 and the current time?" --use-tools calculate get_current_time
- # Agent providers (with tools)
+ # Interactive tools (ask user questions)
+ multillm -m openai/gpt-4o -p "Ask me about my preferences and create a summary" --use-tools ask_user
+
+ # Native agent providers (Claude with built-in tools)
multillm -m claude/default -p "What Python version?" --allowed-tools Bash
multillm -m claude/default -p "List files" --allowed-tools Bash Glob --max-turns 5
multillm -m claude/default -p "Read README.md" --allowed-tools Read
@@ -273,16 +430,23 @@ Examples:
# With stdin
cat file.txt | multillm -m openai/gpt-4o -p "Summarize:" --with-stdin
- # Permission modes
+ # Permission modes (for native agents)
multillm -m claude/default -p "Create hello.py" --allowed-tools Write --permission-mode acceptEdits
# Verbose mode
multillm -m openai/gpt-4o -p "Calculate 5*5" --use-tools calculate --verbose
+Note:
+ - Chat providers (openai, google, anthropic, etc.) are automatically wrapped with
+ agentic capabilities using the 'agentwrap' provider
+ - Native agent providers (claude) use their built-in agentic features
+ - Use --use-tools for chat providers, --allowed-tools for native agents
+
Available Built-in Tools (for chat providers with --use-tools):
get_current_time Get current date and time
calculate Perform mathematical calculations
get_weather Get weather information (mock data)
+ ask_user Ask the user a question and get their response (interactive)
Available Tools (for agent providers with --allowed-tools):
Read, Write, Edit, Bash, Glob, Grep, Task, WebFetch, WebSearch,
@@ -346,37 +510,39 @@ Available Tools (for agent providers with --allowed-tools):
prompt = f"{prompt}\n--- USER STDIN BEGIN ---\n{stdin_content}"
try:
- # Check if this is a chat provider with tools
+ # Determine if this is a chat or agent provider
+ provider_name = args.model.split("/")[0]
+ is_agent_provider = provider_name in ["claude"] # Native agent providers
+
if args.use_tools:
- # Use tool calling workflow for chat providers
+ # Use tool calling workflow for chat providers with agentwrap
result_text = asyncio.run(
run_with_chat_tools(args.model, prompt, args.use_tools, args.verbose)
)
print(result_text)
else:
- # Build kwargs for agent options
- kwargs = {}
- if args.max_turns is not None:
- kwargs["max_turns"] = args.max_turns
- if args.allowed_tools:
- kwargs["allowed_tools"] = args.allowed_tools
- if args.permission_mode:
- kwargs["permission_mode"] = args.permission_mode
-
- # Use single() for normal queries or agent providers
- result = asyncio.run(run_single(args.model, prompt, **kwargs))
-
- # Show tool usage for agent providers (inline)
- if result.tool_calls:
- print(f"\n[Agent used {len(result.tool_calls)} tool(s)]")
- for tc in result.tool_calls:
- if args.verbose:
- print(f" → {tc['function']['name']}({json.dumps(tc['function'].get('arguments', {}))})")
- else:
- print(f" → {tc['function']['name']}")
- print()
+ # Build agent options
+ options = None
+ if args.max_turns is not None or args.allowed_tools or args.permission_mode:
+ options = multillm.AgentOptions(
+ max_turns=args.max_turns,
+ allowed_tools=args.allowed_tools,
+ permission_mode=args.permission_mode,
+ )
+
+ # Determine which model string to use
+ if is_agent_provider:
+ # Use agent provider directly (claude)
+ model_to_use = args.model
+ else:
+ # Use agentwrap for chat providers
+ model_to_use = f"agentwrap/{args.model}"
- print(result.text)
+ # Run with agentic API
+ result_text = asyncio.run(
+ run_agentic(model_to_use, prompt, options=options, verbose=args.verbose)
+ )
+ print(result_text)
except Exception as e:
print(f"Error: {e}", file=sys.stderr)