add: 增加lmstudio chat api接口逻辑
This commit is contained in:
@@ -8,7 +8,7 @@ reply_in_thread: true
|
|||||||
|
|
||||||
enable_multi_user: true
|
enable_multi_user: true
|
||||||
|
|
||||||
system_prompt: ""
|
system_prompt: "response in chinese"
|
||||||
|
|
||||||
platforms:
|
platforms:
|
||||||
local_ai:
|
local_ai:
|
||||||
@@ -16,6 +16,8 @@ platforms:
|
|||||||
url: http://192.168.32.162:11434
|
url: http://192.168.32.162:11434
|
||||||
api_key:
|
api_key:
|
||||||
model: llama3.2
|
model: llama3.2
|
||||||
|
temperature: 1
|
||||||
|
max_tokens: 2000
|
||||||
max_words: 1000
|
max_words: 1000
|
||||||
max_context_messages: 20
|
max_context_messages: 20
|
||||||
openai:
|
openai:
|
||||||
@@ -27,14 +29,15 @@ platforms:
|
|||||||
max_context_messages: 20
|
max_context_messages: 20
|
||||||
temperature: 1
|
temperature: 1
|
||||||
anthropic:
|
anthropic:
|
||||||
url:
|
url: https://api.anthropic.com
|
||||||
api_key:
|
api_key:
|
||||||
max_tokens:
|
model: claude-3-5-sonnet-20240620
|
||||||
model:
|
max_words: 1000
|
||||||
max_words:
|
max_tokens: 2000
|
||||||
|
max_context_messages: 20
|
||||||
|
|
||||||
additional_prompt:
|
additional_prompt:
|
||||||
- role: user
|
- role: user
|
||||||
content: xxx
|
content: "What model is currently in use?"
|
||||||
- role: system
|
- role: system
|
||||||
content: xxx
|
content: "you can response text contain user name"
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
@@ -13,11 +14,9 @@ from maubot_llmplus.plugin import AbsExtraConfigPlugin
|
|||||||
|
|
||||||
|
|
||||||
class Ollama(Platform):
|
class Ollama(Platform):
|
||||||
chat_api: str
|
|
||||||
|
|
||||||
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
||||||
super().__init__(config, http)
|
super().__init__(config, http)
|
||||||
self.chat_api = '/api/chat'
|
|
||||||
|
|
||||||
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
||||||
full_context = []
|
full_context = []
|
||||||
@@ -55,10 +54,46 @@ class Ollama(Platform):
|
|||||||
|
|
||||||
|
|
||||||
class LmStudio(Platform) :
|
class LmStudio(Platform) :
|
||||||
|
temperature: int
|
||||||
|
|
||||||
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
||||||
super().__init__(config, http)
|
super().__init__(config, http)
|
||||||
|
self.temperature = self.config['temperature']
|
||||||
pass
|
pass
|
||||||
|
|
||||||
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
||||||
pass
|
full_context = []
|
||||||
|
context = await maubot_llmplus.platforms.get_context(plugin, self, evt)
|
||||||
|
full_context.extend(list(context))
|
||||||
|
|
||||||
|
endpoint = f"{self.url}/v1/chat/completions"
|
||||||
|
headers = {"content-type": "application/json"}
|
||||||
|
req_body = {model: self.model, message: full_context, temperature: self.temperature, stream: False}
|
||||||
|
async with self.http.post(
|
||||||
|
endpoint, headers=headers, data=json.dumps(req_body)
|
||||||
|
) as response:
|
||||||
|
# plugin.log.debug(f"响应内容:{response.status}, {await response.json()}")
|
||||||
|
if response.status != 200:
|
||||||
|
return ChatCompletion(
|
||||||
|
message={},
|
||||||
|
finish_reason=f"Error: {await response.text()}",
|
||||||
|
model=None
|
||||||
|
)
|
||||||
|
response_json = await response.json()
|
||||||
|
choice = response_json["choices"][0]
|
||||||
|
return ChatCompletion(
|
||||||
|
message=choice["message"],
|
||||||
|
finish_reason=choice["finish_reason"],
|
||||||
|
model=choice.get("model", None)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def list_models(self) -> List[str]:
|
||||||
|
full_url = f"{self.url}/v1/models"
|
||||||
|
async with self.http.get(full_url) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
return []
|
||||||
|
response_data = await response.json()
|
||||||
|
return [f"- {m['id']}" for m in response_data["data"]]
|
||||||
|
|
||||||
|
def get_type(self) -> str:
|
||||||
|
return "local_ai"
|
||||||
|
|||||||
@@ -63,27 +63,54 @@ class OpenAi(Platform):
|
|||||||
async def list_models(self) -> List[str]:
|
async def list_models(self) -> List[str]:
|
||||||
# 调用openai接口获取模型列表
|
# 调用openai接口获取模型列表
|
||||||
full_url = f"{self.url}/v1/models"
|
full_url = f"{self.url}/v1/models"
|
||||||
headers = {'Authorization': self.api_key}
|
headers = {'Authorization': f"Bearer {self.api_key}"}
|
||||||
async with self.http.get(full_url, headers=headers) as response:
|
async with self.http.get(full_url, headers=headers) as response:
|
||||||
if response.status != 200:
|
if response.status != 200:
|
||||||
return []
|
return []
|
||||||
response_data = await response.json()
|
response_data = await response.json()
|
||||||
return [m["id"] for m in response_data["data"]]
|
return [f"- {m['id']}" for m in response_data["data"]]
|
||||||
|
|
||||||
def get_type(self) -> str:
|
def get_type(self) -> str:
|
||||||
return "openai"
|
return "openai"
|
||||||
|
|
||||||
|
|
||||||
class Anthropic(Platform):
|
class Anthropic(Platform):
|
||||||
|
max_tokens: int
|
||||||
|
|
||||||
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
||||||
super().__init__(config, http)
|
super().__init__(config, http)
|
||||||
|
self.max_tokens = self.config['max_tokens']
|
||||||
|
|
||||||
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
||||||
# 获取系统提示词
|
full_context = []
|
||||||
# 获取额外的其他角色的提示词: role: user role: system
|
context = await maubot_llmplus.platforms.get_context(plugin, self, evt)
|
||||||
|
full_context.extend(list(context))
|
||||||
|
|
||||||
|
endpoint = f"{self.url}/v1/messages"
|
||||||
|
headers = {"x-api-key": self.api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
|
||||||
|
req_body = {"model": self.model, "max_tokens": self.max_tokens, "messages": full_context}
|
||||||
|
|
||||||
|
async with self.http.post(endpoint, headers=headers, data=json.dumps(req_body)) as response:
|
||||||
|
# plugin.log.debug(f"响应内容:{response.status}, {await response.json()}")
|
||||||
|
if response.status != 200:
|
||||||
|
return ChatCompletion(
|
||||||
|
message={},
|
||||||
|
finish_reason=f"Error: {await response.text()}",
|
||||||
|
model=None
|
||||||
|
)
|
||||||
|
response_json = await response.json()
|
||||||
|
text = "\n\n".join(c["text"] for c in response_json["content"])
|
||||||
|
return ChatCompletion(
|
||||||
|
message=dict(role="assistant", content=text),
|
||||||
|
finish_reason=response_json['stop_reason'],
|
||||||
|
model=response_json['model']
|
||||||
|
)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
async def list_models(self) -> List[str]:
|
||||||
|
# 由于没有列出所有支持的模型的api,所有只能写死在代码中
|
||||||
|
models = ["Claude 3.5 Opus", "Claude 3.5 Sonnet", "Claude 3.5 Haiku", "Claude 3 Opus", "Claude 3 Sonnet ", "Claude 3 Haiku"]
|
||||||
|
return [f"- {m}" for m in models]
|
||||||
|
|
||||||
def get_type(self) -> str:
|
def get_type(self) -> str:
|
||||||
return "anthropic"
|
return "anthropic"
|
||||||
|
|||||||
Reference in New Issue
Block a user