Skip to content

Commit

Permalink
Merge pull request #23 from GaiZhenbiao/main
Browse files Browse the repository at this point in the history
Merge
  • Loading branch information
bentwnghk authored Oct 21, 2023
2 parents 3819128 + f9abb09 commit de244c3
Show file tree
Hide file tree
Showing 10 changed files with 84 additions and 10 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
| [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) | | [LLaMA](https://github.com/facebookresearch/llama) | 支持 Lora 模型 
| [Google PaLM](https://developers.generativeai.google/products/palm) | 不支持流式传输 | [StableLM](https://github.com/Stability-AI/StableLM)
| [讯飞星火认知大模型](https://xinghuo.xfyun.cn) | | [MOSS](https://github.com/OpenLMLab/MOSS)
| [Inspur Yuan 1.0](https://air.inspur.com/home) | |
| [Inspur Yuan 1.0](https://air.inspur.com/home) | | [通义千问](https://github.com/QwenLM/Qwen/tree/main)
| [MiniMax](https://api.minimax.chat/) |
| [XMChat](https://github.com/MILVLG/xmchat) | 不支持流式传输
| [Midjourney](https://www.midjourney.com/) | 不支持流式传输
Expand Down
57 changes: 57 additions & 0 deletions modules/models/Qwen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import logging
import colorama
from .base_model import BaseLLMModel
from ..presets import MODEL_METADATA


class Qwen_Client(BaseLLMModel):
def __init__(self, model_name, user_name="") -> None:
super().__init__(model_name=model_name, user=user_name)
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_METADATA[model_name]["repo_id"], trust_remote_code=True, resume_download=True)
self.model = AutoModelForCausalLM.from_pretrained(MODEL_METADATA[model_name]["repo_id"], device_map="auto", trust_remote_code=True, resume_download=True).eval()

def generation_config(self):
return GenerationConfig.from_dict({
"chat_format": "chatml",
"do_sample": True,
"eos_token_id": 151643,
"max_length": self.token_upper_limit,
"max_new_tokens": 512,
"max_window_size": 6144,
"pad_token_id": 151643,
"top_k": 0,
"top_p": self.top_p,
"transformers_version": "4.33.2",
"trust_remote_code": True,
"temperature": self.temperature,
})

def _get_glm_style_input(self):
history = [x["content"] for x in self.history]
query = history.pop()
logging.debug(colorama.Fore.YELLOW +
f"{history}" + colorama.Fore.RESET)
assert (
len(history) % 2 == 0
), f"History should be even length. current history is: {history}"
history = [[history[i], history[i + 1]]
for i in range(0, len(history), 2)]
return history, query

def get_answer_at_once(self):
history, query = self._get_glm_style_input()
self.model.generation_config = self.generation_config()
response, history = self.model.chat(self.tokenizer, query, history=history)
return response, len(response)

def get_answer_stream_iter(self):
history, query = self._get_glm_style_input()
self.model.generation_config = self.generation_config()
for response in self.model.chat_stream(
self.tokenizer,
query,
history,
):
yield response
12 changes: 7 additions & 5 deletions modules/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ class ModelType(Enum):
Spark = 12
OpenAIInstruct = 13
Claude = 14
Qwen = 15

@classmethod
def get_type(cls, model_name: str):
Expand Down Expand Up @@ -181,7 +182,9 @@ def get_type(cls, model_name: str):
elif "星火大模型" in model_name_lower:
model_type = ModelType.Spark
elif "claude" in model_name_lower:
model_type = ModelType.Claude
model_type = ModelType.Claude
elif "qwen" in model_name_lower:
model_type = ModelType.Qwen
else:
model_type = ModelType.LLaMA
return model_type
Expand Down Expand Up @@ -656,14 +659,13 @@ def delete_first_conversation(self):
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot.pop()
chatbot = chatbot[:-1]
return chatbot, self.history
if len(self.history) > 0:
self.history.pop()
self.history.pop()
self.history = self.history[:-2]
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot.pop()
chatbot = chatbot[:-1]
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
Expand Down
3 changes: 3 additions & 0 deletions modules/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,9 @@ def get_model(
elif model_type == ModelType.Claude:
from .Claude import Claude_Client
model = Claude_Client(model_name="claude-2", api_secret=os.getenv("CLAUDE_API_SECRET"))
elif model_type == ModelType.Qwen:
from .Qwen import Qwen_Client
model = Qwen_Client(model_name, user_name=user_name)
elif model_type == ModelType.Unknown:
raise ValueError(f"未知模型: {model_name}")
logging.info(msg)
Expand Down
8 changes: 8 additions & 0 deletions modules/presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@
"StableLM",
"MOSS",
"Llama-2-7B-Chat",
"Qwen 7B",
"Qwen 14B"
]

# Additional metadate for local models
Expand All @@ -101,6 +103,12 @@
"Llama-2-7B-Chat":{
"repo_id": "TheBloke/Llama-2-7b-Chat-GGUF",
"filelist": ["llama-2-7b-chat.Q6_K.gguf"],
},
"Qwen 7B": {
"repo_id": "Qwen/Qwen-7B-Chat-Int4",
},
"Qwen 14B": {
"repo_id": "Qwen/Qwen-14B-Chat-Int4",
}
}

Expand Down
2 changes: 1 addition & 1 deletion modules/repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def version_time():
)
commit_time = commit_datetime.strftime("%Y-%m-%dT%H:%M:%SZ")

logging.info(f"commit time: {commit_time}")
# logging.info(f"commit time: {commit_time}")
except Exception:
commit_time = "unknown"
return commit_time
Expand Down
2 changes: 1 addition & 1 deletion readme/README_en.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@
| [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) | | [LLaMA](https://github.com/facebookresearch/llama) | Support Lora models
| [Google PaLM](https://developers.generativeai.google/products/palm) | Not support streaming | [StableLM](https://github.com/Stability-AI/StableLM)
| [iFlytek Starfire Cognition Large Model](https://xinghuo.xfyun.cn) | | [MOSS](https://github.com/OpenLMLab/MOSS)
| [Inspur Yuan 1.0](https://air.inspur.com/home) | |
| [Inspur Yuan 1.0](https://air.inspur.com/home) | | [Qwen](https://github.com/QwenLM/Qwen/tree/main)
| [MiniMax](https://api.minimax.chat/) |
| [XMChat](https://github.com/MILVLG/xmchat) | Not support streaming
| [Midjourney](https://www.midjourney.com/) | Not support streaming
Expand Down
2 changes: 1 addition & 1 deletion readme/README_ja.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
| [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) | | [LLaMA](https://github.com/facebookresearch/llama) | Loraモデルのサポートあり 
| [Google PaLM](https://developers.generativeai.google/products/palm) | ストリーミング転送はサポートされていません | [StableLM](https://github.com/Stability-AI/StableLM)
| [讯飞星火认知大模型](https://xinghuo.xfyun.cn) | | [MOSS](https://github.com/OpenLMLab/MOSS)
| [Inspur Yuan 1.0](https://air.inspur.com/home) | |
| [Inspur Yuan 1.0](https://air.inspur.com/home) | | [Qwen](https://github.com/QwenLM/Qwen/tree/main)
| [MiniMax](https://api.minimax.chat/) |
| [XMChat](https://github.com/MILVLG/xmchat) | ストリーミング転送はサポートされていません
| [Midjourney](https://www.midjourney.com/) | ストリーミング転送はサポートされていません
Expand Down
2 changes: 1 addition & 1 deletion readme/README_ru.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
| [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) | | [LLaMA](https://github.com/facebookresearch/llama) | Поддерживает модель Lora 
| [Google PaLM](https://developers.generativeai.google/products/palm) | Не поддерживает потоковую передачу данных | [StableLM](https://github.com/Stability-AI/StableLM)
| [Xunfei Xinghuo Cognitive Model](https://xinghuo.xfyun.cn) | | [MOSS](https://github.com/OpenLMLab/MOSS)
| [Inspur Yuan 1.0](https://air.inspur.com/home) | |
| [Inspur Yuan 1.0](https://air.inspur.com/home) | | [Qwen](https://github.com/QwenLM/Qwen/tree/main)
| [MiniMax](https://api.minimax.chat/) |
| [XMChat](https://github.com/MILVLG/xmchat) | Не поддерживает потоковую передачу данных
| [Midjourney](https://www.midjourney.com/) | Не поддерживает потоковую передачу данных
Expand Down
4 changes: 4 additions & 0 deletions requirements_advanced.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,7 @@ sentence_transformers
accelerate
sentencepiece
llama-cpp-python
transformers_stream_generator
einops
optimum
auto-gptq

0 comments on commit de244c3

Please sign in to comment.