add: 增加lmstudio chat api接口逻辑

This commit is contained in:
taylor
2024-10-14 15:52:40 +08:00
parent 0082ff55af
commit 3ce7b4efe7
3 changed files with 80 additions and 15 deletions

View File

@@ -8,7 +8,7 @@ reply_in_thread: true
enable_multi_user: true
system_prompt: ""
system_prompt: "response in chinese"
platforms:
local_ai:
@@ -16,6 +16,8 @@ platforms:
url: http://192.168.32.162:11434
api_key:
model: llama3.2
temperature: 1
max_tokens: 2000
max_words: 1000
max_context_messages: 20
openai:
@@ -27,14 +29,15 @@ platforms:
max_context_messages: 20
temperature: 1
anthropic:
url:
url: https://api.anthropic.com
api_key:
max_tokens:
model:
max_words:
model: claude-3-5-sonnet-20240620
max_words: 1000
max_tokens: 2000
max_context_messages: 20
additional_prompt:
- role: user
content: xxx
content: "What model is currently in use?"
- role: system
content: xxx
content: "you can response text contain user name"

View File

@@ -1,3 +1,4 @@
import json
from typing import List
@@ -13,11 +14,9 @@ from maubot_llmplus.plugin import AbsExtraConfigPlugin
class Ollama(Platform):
chat_api: str
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
super().__init__(config, http)
self.chat_api = '/api/chat'
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
full_context = []
@@ -55,10 +54,46 @@ class Ollama(Platform):
class LmStudio(Platform) :
temperature: int
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
super().__init__(config, http)
self.temperature = self.config['temperature']
pass
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
pass
full_context = []
context = await maubot_llmplus.platforms.get_context(plugin, self, evt)
full_context.extend(list(context))
endpoint = f"{self.url}/v1/chat/completions"
headers = {"content-type": "application/json"}
req_body = {model: self.model, message: full_context, temperature: self.temperature, stream: False}
async with self.http.post(
endpoint, headers=headers, data=json.dumps(req_body)
) as response:
# plugin.log.debug(f"响应内容:{response.status}, {await response.json()}")
if response.status != 200:
return ChatCompletion(
message={},
finish_reason=f"Error: {await response.text()}",
model=None
)
response_json = await response.json()
choice = response_json["choices"][0]
return ChatCompletion(
message=choice["message"],
finish_reason=choice["finish_reason"],
model=choice.get("model", None)
)
async def list_models(self) -> List[str]:
full_url = f"{self.url}/v1/models"
async with self.http.get(full_url) as response:
if response.status != 200:
return []
response_data = await response.json()
return [f"- {m['id']}" for m in response_data["data"]]
def get_type(self) -> str:
return "local_ai"

View File

@@ -63,27 +63,54 @@ class OpenAi(Platform):
async def list_models(self) -> List[str]:
# 调用openai接口获取模型列表
full_url = f"{self.url}/v1/models"
headers = {'Authorization': self.api_key}
headers = {'Authorization': f"Bearer {self.api_key}"}
async with self.http.get(full_url, headers=headers) as response:
if response.status != 200:
return []
response_data = await response.json()
return [m["id"] for m in response_data["data"]]
return [f"- {m['id']}" for m in response_data["data"]]
def get_type(self) -> str:
return "openai"
class Anthropic(Platform):
max_tokens: int
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
super().__init__(config, http)
self.max_tokens = self.config['max_tokens']
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
# 获取系统提示词
# 获取额外的其他角色的提示词: role: user role: system
full_context = []
context = await maubot_llmplus.platforms.get_context(plugin, self, evt)
full_context.extend(list(context))
endpoint = f"{self.url}/v1/messages"
headers = {"x-api-key": self.api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
req_body = {"model": self.model, "max_tokens": self.max_tokens, "messages": full_context}
async with self.http.post(endpoint, headers=headers, data=json.dumps(req_body)) as response:
# plugin.log.debug(f"响应内容:{response.status}, {await response.json()}")
if response.status != 200:
return ChatCompletion(
message={},
finish_reason=f"Error: {await response.text()}",
model=None
)
response_json = await response.json()
text = "\n\n".join(c["text"] for c in response_json["content"])
return ChatCompletion(
message=dict(role="assistant", content=text),
finish_reason=response_json['stop_reason'],
model=response_json['model']
)
pass
async def list_models(self) -> List[str]:
# 由于没有列出所有支持的模型的api所有只能写死在代码中
models = ["Claude 3.5 Opus", "Claude 3.5 Sonnet", "Claude 3.5 Haiku", "Claude 3 Opus", "Claude 3 Sonnet ", "Claude 3 Haiku"]
return [f"- {m}" for m in models]
def get_type(self) -> str:
return "anthropic"