diff --git a/maubot_llmplus/thrid_platform.py b/maubot_llmplus/thrid_platform.py index f51589c..68a0653 100644 --- a/maubot_llmplus/thrid_platform.py +++ b/maubot_llmplus/thrid_platform.py @@ -39,6 +39,37 @@ async def _read_openai_sse(response): pass +async def _read_gemini_sse(response): + """读取 Gemini SSE 流(?alt=sse 格式),yield 每个 text chunk。 + Gemini 无 [DONE] 哨兵,通过 finishReason 字段判断结束。""" + while True: + try: + line_bytes = await asyncio.wait_for(response.content.readline(), timeout=60.0) + except asyncio.TimeoutError: + break + if not line_bytes: + break + line = line_bytes.decode("utf-8").strip() + if not line.startswith("data: "): + continue + data_str = line[6:] + try: + data = json.loads(data_str) + candidates = data.get("candidates", []) + if candidates: + candidate = candidates[0] + parts = candidate.get("content", {}).get("parts", []) + for part in parts: + text = part.get("text", "") + if text: + yield text + # finishReason 存在表示流已结束,主动退出(等价于 [DONE]) + if candidate.get("finishReason"): + break + except json.JSONDecodeError: + pass + + class Deepseek(Platform): def __init__(self, config: BaseProxyConfig, http: ClientSession): @@ -385,28 +416,8 @@ class Gemini(Platform): async with self.http.post(endpoint, headers=headers, data=json.dumps(request_body)) as response: if response.status != 200: raise ValueError(f"Error: {await response.text()}") - while True: - try: - line_bytes = await asyncio.wait_for(response.content.readline(), timeout=60.0) - except asyncio.TimeoutError: - break - if not line_bytes: - break - line = line_bytes.decode("utf-8").strip() - if not line.startswith("data: "): - continue - data_str = line[6:] - try: - data = json.loads(data_str) - candidates = data.get("candidates", []) - if candidates: - parts = candidates[0].get("content", {}).get("parts", []) - for part in parts: - text = part.get("text", "") - if text: - yield text - except json.JSONDecodeError: - pass + async for chunk in _read_gemini_sse(response): + yield chunk async def list_models(self) -> List[str]: full_url = f"{self.url}/v1beta/models"