aboutsummaryrefslogtreecommitdiffstats
path: root/src/gemini/types.py
diff options
context:
space:
mode:
authorClaude <claude@anthropic.com>2026-03-04 19:14:55 +0100
committerClaude <claude@anthropic.com>2026-03-04 19:14:55 +0100
commit171c5b86ef05974426ba5c5d8547c8025977d1a2 (patch)
tree2a1193e2bb81a6341e55d0b883a3fc33f77f8be1 /src/gemini/types.py
parent9f14edf2b97286e02830d528038b32d5b31aaa0a (diff)
parent0278c87f062a9ae7d617b92be22b175558a05086 (diff)
downloadgemini-py-main.tar.gz
gemini-py-main.zip
Add initial versionHEADmain
Diffstat (limited to 'src/gemini/types.py')
-rw-r--r--src/gemini/types.py175
1 files changed, 175 insertions, 0 deletions
diff --git a/src/gemini/types.py b/src/gemini/types.py
new file mode 100644
index 0000000..7e00e2a
--- /dev/null
+++ b/src/gemini/types.py
@@ -0,0 +1,175 @@
+from typing import Any
+
+from pydantic import BaseModel
+
+
+class Part(BaseModel):
+ text: str | None = None
+ thought: bool | None = None
+ thought_signature: str | None = None
+ function_call: dict[str, Any] | None = None
+ function_response: dict[str, Any] | None = None
+
+ model_config = {"populate_by_name": True, "extra": "allow"}
+
+
+class Content(BaseModel):
+ role: str
+ parts: list[dict[str, Any]]
+
+ model_config = {"extra": "allow"}
+
+
+class GenerationConfig(BaseModel):
+ temperature: float | None = None
+ top_p: float | None = None
+ top_k: int | None = None
+ candidate_count: int | None = None
+ max_output_tokens: int | None = None
+ stop_sequences: list[str] | None = None
+ response_mime_type: str | None = None
+ thinking_config: dict[str, Any] | None = None
+
+ model_config = {"populate_by_name": True, "extra": "allow"}
+
+ def to_api(self) -> dict[str, Any]:
+ out: dict[str, Any] = {}
+ if self.temperature is not None:
+ out["temperature"] = self.temperature
+ if self.top_p is not None:
+ out["topP"] = self.top_p
+ if self.top_k is not None:
+ out["topK"] = self.top_k
+ if self.candidate_count is not None:
+ out["candidateCount"] = self.candidate_count
+ if self.max_output_tokens is not None:
+ out["maxOutputTokens"] = self.max_output_tokens
+ if self.stop_sequences is not None:
+ out["stopSequences"] = self.stop_sequences
+ if self.response_mime_type is not None:
+ out["responseMimeType"] = self.response_mime_type
+ if self.thinking_config is not None:
+ out["thinkingConfig"] = self.thinking_config
+ return out
+
+
+class UsageMetadata(BaseModel):
+ prompt_token_count: int | None = None
+ candidates_token_count: int | None = None
+ total_token_count: int | None = None
+ thoughts_token_count: int | None = None
+
+ model_config = {"populate_by_name": True, "extra": "allow"}
+
+
+class Candidate(BaseModel):
+ content: Content | None = None
+ finish_reason: str | None = None
+ index: int | None = None
+
+ model_config = {"populate_by_name": True, "extra": "allow"}
+
+
+class GenerateContentResponse(BaseModel):
+ candidates: list[Candidate] | None = None
+ usage_metadata: UsageMetadata | None = None
+ model_version: str | None = None
+ response_id: str | None = None
+
+ model_config = {"populate_by_name": True, "extra": "allow"}
+
+ @property
+ def text(self) -> str:
+ if not self.candidates:
+ return ""
+ parts = []
+ for candidate in self.candidates:
+ if candidate.content and candidate.content.parts:
+ for part in candidate.content.parts:
+ if isinstance(part, dict):
+ if part.get("thought"):
+ continue
+ t = part.get("text")
+ if t:
+ parts.append(t)
+ return "".join(parts)
+
+ @property
+ def tool_calls(self) -> "list[ToolCall]":
+ calls: list[ToolCall] = []
+ if not self.candidates:
+ return calls
+ for c in self.candidates:
+ if c.content and c.content.parts:
+ for part in c.content.parts:
+ if isinstance(part, dict) and "functionCall" in part:
+ fc = part["functionCall"]
+ calls.append(ToolCall(name=fc["name"], args=fc.get("args", {})))
+ return calls
+
+ @property
+ def thinking(self) -> str:
+ if not self.candidates:
+ return ""
+ parts = []
+ for candidate in self.candidates:
+ if candidate.content and candidate.content.parts:
+ for part in candidate.content.parts:
+ if isinstance(part, dict) and part.get("thought"):
+ t = part.get("text", "")
+ if t:
+ parts.append(t)
+ return "".join(parts)
+
+
+class StreamChunk(BaseModel):
+ response: GenerateContentResponse | None = None
+ trace_id: str | None = None
+ raw: dict[str, Any] = {}
+
+ @property
+ def text_delta(self) -> str:
+ if self.response:
+ return self.response.text
+ return ""
+
+ @property
+ def tool_calls(self) -> "list[ToolCall]":
+ if self.response:
+ return self.response.tool_calls
+ return []
+
+
+class ToolCall(BaseModel):
+ name: str
+ args: dict[str, Any] = {}
+
+
+class FunctionDeclaration(BaseModel):
+ name: str
+ description: str = ""
+ parameters: dict[str, Any] | None = None
+
+ model_config = {"extra": "allow"}
+
+ def to_api(self) -> dict[str, Any]:
+ out: dict[str, Any] = {"name": self.name}
+ if self.description:
+ out["description"] = self.description
+ if self.parameters is not None:
+ out["parameters"] = self.parameters
+ return out
+
+
+class GeminiOptions(BaseModel):
+ model: str = "gemini-2.5-pro"
+ max_output_tokens: int = 32768
+ temperature: float = 1.0
+ top_p: float = 0.95
+ top_k: int = 64
+ thinking_budget: int | None = None
+ stream: bool = True
+ system_prompt: str | None = None
+ session_id: str | None = None
+ credentials_path: str | None = None
+ tools: list[FunctionDeclaration] | None = None