add
This commit is contained in:
@@ -31,12 +31,33 @@ platforms:
|
||||
max_tokens: 2000
|
||||
max_words: 1000
|
||||
max_context_messages: 20
|
||||
qwen:
|
||||
# 国内: https://dashscope.aliyuncs.com
|
||||
# 海外: https://dashscope-intl.aliyuncs.com
|
||||
url: https://dashscope.aliyuncs.com
|
||||
api_key:
|
||||
model: qwen-plus
|
||||
temperature: 0.7
|
||||
top_p: 0.8
|
||||
max_tokens: 2000
|
||||
max_words: 1000
|
||||
max_context_messages: 20
|
||||
# 是否开启深度思考模式(仅 qwq 系列支持)
|
||||
enable_thinking: false
|
||||
deepseek:
|
||||
url: https://api.deepseek.com
|
||||
api_key:
|
||||
model:
|
||||
max_words: 1000
|
||||
max_context_messages: 20
|
||||
gemini:
|
||||
url: https://generativelanguage.googleapis.com
|
||||
api_key:
|
||||
model: gemini-2.0-flash
|
||||
temperature: 1
|
||||
max_tokens: 2000
|
||||
max_words: 1000
|
||||
max_context_messages: 20
|
||||
openai:
|
||||
url: https://api.openai.com
|
||||
api_key:
|
||||
@@ -60,14 +81,7 @@ platforms:
|
||||
max_tokens: 1000
|
||||
max_words: 2000
|
||||
max_context_messages: 20
|
||||
gemini:
|
||||
url: https://generativelanguage.googleapis.com
|
||||
api_key:
|
||||
model: gemini-2.0-flash
|
||||
temperature: 1
|
||||
max_tokens: 2000
|
||||
max_words: 1000
|
||||
max_context_messages: 20
|
||||
|
||||
|
||||
# additional prompt
|
||||
additional_prompt:
|
||||
|
||||
@@ -10,7 +10,7 @@ from mautrix.util.config import BaseProxyConfig, ConfigUpdateHelper
|
||||
from maubot_llmplus.local_paltform import Ollama, LmStudio
|
||||
from maubot_llmplus.platforms import Platform
|
||||
from maubot_llmplus.plugin import AbsExtraConfigPlugin, Config
|
||||
from maubot_llmplus.thrid_platform import OpenAi, Anthropic, XAi, Deepseek, Gemini
|
||||
from maubot_llmplus.thrid_platform import OpenAi, Anthropic, XAi, Deepseek, Gemini, Qwen
|
||||
|
||||
|
||||
class AiBotPlugin(AbsExtraConfigPlugin):
|
||||
@@ -162,6 +162,8 @@ class AiBotPlugin(AbsExtraConfigPlugin):
|
||||
return Deepseek(self.config, self.http)
|
||||
if use_platform == 'gemini':
|
||||
return Gemini(self.config, self.http)
|
||||
if use_platform == 'qwen':
|
||||
return Qwen(self.config, self.http)
|
||||
if use_platform == 'local_ai#ollama':
|
||||
return Ollama(self.config, self.http)
|
||||
if use_platform == 'local_ai#lmstudio':
|
||||
@@ -300,7 +302,7 @@ class AiBotPlugin(AbsExtraConfigPlugin):
|
||||
self.config.cur_model = self.config['platforms'][argus.split("#")[0]]['model']
|
||||
await event.react("✅")
|
||||
# 如果是openai或者是claude
|
||||
elif argus == 'openai' or argus == 'anthropic' or argus == 'xai' or argus == 'deepseek' or argus == 'gemini':
|
||||
elif argus == 'openai' or argus == 'anthropic' or argus == 'xai' or argus == 'deepseek' or argus == 'gemini' or argus == 'qwen':
|
||||
if argus == self.config.cur_platform:
|
||||
await event.reply(f"current ai platform has be {argus}")
|
||||
pass
|
||||
|
||||
@@ -323,4 +323,80 @@ class XAi(Platform):
|
||||
pass
|
||||
|
||||
def get_type(self) -> str:
|
||||
return "xai"
|
||||
return "xai"
|
||||
|
||||
|
||||
class Qwen(Platform):
|
||||
max_tokens: int
|
||||
temperature: float
|
||||
top_p: float
|
||||
enable_thinking: bool
|
||||
|
||||
def __init__(self, config: BaseProxyConfig, http: ClientSession) -> None:
|
||||
super().__init__(config, http)
|
||||
self.max_tokens = self.config['max_tokens']
|
||||
self.temperature = self.config['temperature']
|
||||
self.top_p = self.config['top_p']
|
||||
self.enable_thinking = self.config['enable_thinking']
|
||||
|
||||
async def create_chat_completion(self, plugin: AbsExtraConfigPlugin, evt: MessageEvent) -> ChatCompletion:
|
||||
full_context = []
|
||||
context = await maubot_llmplus.platforms.get_context(plugin, self, evt)
|
||||
full_context.extend(list(context))
|
||||
|
||||
parameters = {
|
||||
"result_format": "message"
|
||||
}
|
||||
if self.max_tokens:
|
||||
parameters["max_tokens"] = self.max_tokens
|
||||
if self.temperature is not None:
|
||||
parameters["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
parameters["top_p"] = self.top_p
|
||||
if self.enable_thinking:
|
||||
parameters["enable_thinking"] = True
|
||||
|
||||
request_body = {
|
||||
"model": self.model,
|
||||
"input": {
|
||||
"messages": full_context
|
||||
},
|
||||
"parameters": parameters
|
||||
}
|
||||
|
||||
endpoint = f"{self.url}/api/v1/services/aigc/text-generation/generation"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.api_key}"
|
||||
}
|
||||
|
||||
async with self.http.post(endpoint, headers=headers, data=json.dumps(request_body)) as response:
|
||||
if response.status != 200:
|
||||
return ChatCompletion(
|
||||
result=False,
|
||||
message={},
|
||||
finish_reason=f"Error: {await response.text()}",
|
||||
model=None
|
||||
)
|
||||
response_json = await response.json()
|
||||
choice = response_json["output"]["choices"][0]
|
||||
return ChatCompletion(
|
||||
result=True,
|
||||
message=choice["message"],
|
||||
finish_reason=choice.get("finish_reason", "stop"),
|
||||
model=response_json.get("model", self.model)
|
||||
)
|
||||
|
||||
async def list_models(self) -> List[str]:
|
||||
models = [
|
||||
"qwen-max", "qwen-max-latest",
|
||||
"qwen-plus", "qwen-plus-latest",
|
||||
"qwen-turbo", "qwen-turbo-latest",
|
||||
"qwen-long",
|
||||
"qwen3-235b-a22b", "qwen3-30b-a3b",
|
||||
"qwq-plus", "qwq-plus-latest",
|
||||
]
|
||||
return [f"- {m}" for m in models]
|
||||
|
||||
def get_type(self) -> str:
|
||||
return "qwen"
|
||||
Reference in New Issue
Block a user