Skip to content

Commit

Permalink
feat: support qwen (#429)
Browse files Browse the repository at this point in the history
Signed-off-by: yihong0618 <[email protected]>
  • Loading branch information
yihong0618 authored Jan 17, 2024
1 parent 2258b88 commit f25d458
Show file tree
Hide file tree
Showing 8 changed files with 147 additions and 3 deletions.
9 changes: 8 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
- [ChatGLM](http://open.bigmodel.cn/)
- [Gemini](https://makersuite.google.com/app/apikey)
- [Bard](https://github.com/dsdanielpark/Bard-API)
- [通义千问](https://help.aliyun.com/zh/dashscope/developer-reference/api-details)

## Windows 获取小米音响DID

Expand Down Expand Up @@ -56,6 +57,7 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的api应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
- 可以跟小爱说 `开始持续对话` 自动进入持续对话状态,`结束持续对话` 结束持续对话状态。
- 可以使用 `--tts edge` 来获取更好的 tts 能力
- 可以使用 `--tts openai` 来获取 openai tts 能力
- 可以使用 `--use_langchain` 替代 `--use_chatgpt_api` 来调用 LangChain(默认 chatgpt)服务,实现上网检索、数学运算..

e.g.
Expand All @@ -76,6 +78,8 @@ export OPENAI_API_KEY=${your_api_key}
xiaogpt --hardware LX06 --mute_xiaoai --use_gpt3
# 如果你想使用 google 的 gemini
xiaogpt --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key}
# 如果你想使用阿里的通义千问
xiaogpt --hardware LX06 --mute_xiaoai --use_qwen --qen_key ${qwen_key}
# 如果你想用 edge-tts
xiaogpt --hardware LX06 --cookie ${cookie} --use_chatgpt_api --tts edge
# 如果你想使用 LangChain + SerpApi 实现上网检索或其他本地服务(目前仅支持 stream 模式)
Expand Down Expand Up @@ -106,6 +110,8 @@ python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_glm --glm_key ${glm_key}
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_bard --bard_token ${bard_token}
# 如果你想使用 google 的 gemini
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key}
# 如果你想使用阿里的通义千问
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_qwen --qen_key ${qwen_key}
# 如果你想使用 LangChain+SerpApi 实现上网检索或其他本地服务(目前仅支持 stream 模式)
export OPENAI_API_KEY=${your_api_key}
export SERPAPI_API_KEY=${your_serpapi_key}
Expand Down Expand Up @@ -158,7 +164,8 @@ Bard-API [参考](https://github.com/dsdanielpark/Bard-API)
| openai_key | openai的apikey | |
| serpapi_api_key | serpapi的key 参考 [SerpAPI](https://serpapi.com/) | |
| glm_key | chatglm 的 apikey | |
| gemini_key | gemini 的 apikey [参与](https://makersuite.google.com/app/apikey) | |
| gemini_key | gemini 的 apikey [参考](https://makersuite.google.com/app/apikey) | |
| qwen_key | qwen 的 apikey [参考](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) | |
| bard_token | bard 的 token 参考 [Bard-API](https://github.com/dsdanielpark/Bard-API) | |
| cookie | 小爱账户cookie (如果用上面密码登录可以不填) | |
| mi_did | 设备did | |
Expand Down
16 changes: 15 additions & 1 deletion pdm.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ dependencies = [
"google-search-results>=2.4.2",
"google-generativeai",
"numexpr>=2.8.6",
"dashscope==1.10.0",
]
license = {text = "MIT"}
dynamic = ["version"]
Expand Down
1 change: 1 addition & 0 deletions xiao_config.json.example
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
"openai_key": "",
"glm_key": "",
"gemini_key": "",
"qwen_key": "",
"bard_token": "",
"serpapi_api_key": "",
"cookie": "",
Expand Down
3 changes: 3 additions & 0 deletions xiaogpt/bot/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from xiaogpt.bot.glm_bot import GLMBot
from xiaogpt.bot.bard_bot import BardBot
from xiaogpt.bot.gemini_bot import GeminiBot
from xiaogpt.bot.qwen_bot import QwenBot
from xiaogpt.bot.langchain_bot import LangChainBot
from xiaogpt.config import Config

Expand All @@ -17,6 +18,7 @@
"glm": GLMBot,
"bard": BardBot,
"gemini": GeminiBot,
"qwen": QwenBot,
"langchain": LangChainBot,
}

Expand All @@ -35,6 +37,7 @@ def get_bot(config: Config) -> BaseBot:
"GLMBot",
"BardBot",
"GeminiBot",
"QwenBot",
"get_bot",
"LangChainBot",
]
94 changes: 94 additions & 0 deletions xiaogpt/bot/qwen_bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
"""ChatGLM bot"""
from __future__ import annotations

from typing import Any

from http import HTTPStatus
import dashscope
from dashscope import Generation
from dashscope.api_entities.dashscope_response import Role
from rich import print

from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin


class QwenBot(ChatHistoryMixin, BaseBot):
name = "Qian Wen"

def __init__(self, qwen_key: str) -> None:
self.history = [
{"role": Role.SYSTEM, "content": "You are a helpful assistant."}
]
dashscope.api_key = qwen_key

@classmethod
def from_config(cls, config):
return cls(qwen_key=config.qwen_key)

async def ask(self, query, **options):
# from https://help.aliyun.com/zh/dashscope/developer-reference/api-details
self.history.append({"role": Role.USER, "content": query})

response = Generation.call(
Generation.Models.qwen_turbo,
messages=self.history,
result_format="message", # set the result to be "message" format.
)
if response.status_code == HTTPStatus.OK:
# append result to messages.
content = response.output.choices[0]["message"]["content"]
self.history.append(
{
"role": response.output.choices[0]["message"]["role"],
"content": content,
}
)
# keep last five
first_history = self.history.pop(0)
self.history = [first_history] + self.history[-5:]
print(content)
return content
else:
print(
"Request id: %s, Status code: %s, error code: %s, error message: %s"
% (
response.request_id,
response.status_code,
response.code,
response.message,
)
)
return "没有返回"

async def ask_stream(self, query: str, **options: Any):
self.history.append({"role": Role.USER, "content": query})
responses = Generation.call(
Generation.Models.qwen_turbo,
messages=self.history,
result_format="message", # set the result to be "message" format.
stream=True,
incremental_output=True, # get streaming output incrementally
)
full_content = "" # with incrementally we need to merge output.
role = None
for response in responses:
if response.status_code == HTTPStatus.OK:
content = response.output.choices[0]["message"]["content"]
full_content += content
if not role:
role = response.output.choices[0]["message"]["role"]
print(content, end="")
yield content
else:
print(
"Request id: %s, Status code: %s, error code: %s, error message: %s"
% (
response.request_id,
response.status_code,
response.code,
response.message,
)
)
self.history.append({"role": role, "content": full_content})
first_history = self.history.pop(0)
self.history = [first_history] + self.history[-5:]
23 changes: 22 additions & 1 deletion xiaogpt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ def main():
dest="gemini_key",
help="gemini api key",
)
parser.add_argument(
"--qwen_key",
dest="qwen_key",
help="Alibaba Qwen api key",
)
parser.add_argument(
"--bard_token",
dest="bard_token",
Expand Down Expand Up @@ -137,6 +142,13 @@ def main():
const="bard",
help="if use bard",
)
bot_group.add_argument(
"--use_qwen",
dest="bot",
action="store_const",
const="qwen",
help="if use qwen",
)
bot_group.add_argument(
"--use_gemini",
dest="bot",
Expand All @@ -153,7 +165,16 @@ def main():
"--bot",
dest="bot",
help="bot type",
choices=["gpt3", "chatgptapi", "newbing", "glm", "bard", "gemini", "langchain"],
choices=[
"gpt3",
"chatgptapi",
"newbing",
"glm",
"bard",
"gemini",
"langchain",
"qwen",
],
)
parser.add_argument(
"--config",
Expand Down
3 changes: 3 additions & 0 deletions xiaogpt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ class Config:
openai_key: str = os.getenv("OPENAI_API_KEY", "")
glm_key: str = os.getenv("CHATGLM_KEY", "")
gemini_key: str = os.getenv("GEMINI_KEY", "") # keep the old rule
qwen_key: str = os.getenv("DASHSCOPE_API_KEY", "") # keep the old rule
bard_token: str = os.getenv("BARD_TOKEN", "")
serpapi_api_key: str = os.getenv("SERPAPI_API_KEY", "")
proxy: str | None = None
Expand Down Expand Up @@ -150,6 +151,8 @@ def read_from_file(cls, config_path: str) -> dict:
key, value = "bot", "glm"
elif key == "use_gemini":
key, value = "bot", "gemini"
elif key == "use_qwen":
key, value = "bot", "qwen"
elif key == "use_bard":
key, value = "bot", "bard"
elif key == "use_langchain":
Expand Down

0 comments on commit f25d458

Please sign in to comment.