aboutsummaryrefslogtreecommitdiffstats
path: root/src/gemini/cli.py
diff options
context:
space:
mode:
authorClaude <claude@anthropic.com>2026-03-04 19:14:55 +0100
committerClaude <claude@anthropic.com>2026-03-04 19:14:55 +0100
commit171c5b86ef05974426ba5c5d8547c8025977d1a2 (patch)
tree2a1193e2bb81a6341e55d0b883a3fc33f77f8be1 /src/gemini/cli.py
parent9f14edf2b97286e02830d528038b32d5b31aaa0a (diff)
parent0278c87f062a9ae7d617b92be22b175558a05086 (diff)
downloadgemini-py-main.tar.gz
gemini-py-main.zip
Add initial versionHEADmain
Diffstat (limited to 'src/gemini/cli.py')
-rw-r--r--src/gemini/cli.py60
1 files changed, 60 insertions, 0 deletions
diff --git a/src/gemini/cli.py b/src/gemini/cli.py
new file mode 100644
index 0000000..75e22a7
--- /dev/null
+++ b/src/gemini/cli.py
@@ -0,0 +1,60 @@
+import argparse
+import asyncio
+import sys
+
+from .client import GeminiClient
+from .models import list_models
+from .types import GeminiOptions
+
+
+def parse_args():
+ p = argparse.ArgumentParser(prog="gemini", description="Query Gemini via CLI")
+ p.add_argument("prompt", nargs="?", help="Prompt to send (reads stdin if omitted)")
+ p.add_argument("-m", "--model", default="gemini-2.5-pro", help="Model name")
+ p.add_argument("-c", "--credentials", default=None, help="Path to oauth_creds.json")
+ p.add_argument("--no-stream", action="store_true", help="Non-streaming mode")
+ p.add_argument(
+ "--thinking",
+ type=int,
+ default=None,
+ metavar="BUDGET",
+ help="Enable thinking mode with given token budget",
+ )
+ p.add_argument("--list-models", action="store_true", help="List available models and exit")
+ return p.parse_args()
+
+
+async def run(args):
+ prompt = args.prompt or sys.stdin.read().strip()
+ if not prompt:
+ print("Error: no prompt provided", file=sys.stderr)
+ sys.exit(1)
+
+ opts = GeminiOptions(
+ model=args.model,
+ thinking_budget=args.thinking,
+ )
+
+ async with GeminiClient(options=opts, credentials_path=args.credentials) as client:
+ if args.no_stream:
+ response = await client.send_message(prompt)
+ print(response.text)
+ else:
+ async for chunk in client.send_message_stream(prompt):
+ if chunk.text_delta:
+ print(chunk.text_delta, end="", flush=True)
+ print()
+
+
+def main():
+ args = parse_args()
+ if args.list_models:
+ for m in list_models():
+ tag = " [default]" if m.is_default else " [preview]" if m.is_preview else ""
+ print(f"{m.name}{tag}")
+ return
+ asyncio.run(run(args))
+
+
+if __name__ == "__main__":
+ main()