Skip to content

Commit

Permalink
修复本地模型在windows上的兼容性
Browse files Browse the repository at this point in the history
  • Loading branch information
binary-sky committed Nov 11, 2023
1 parent fcf0455 commit 2b917ed
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 15 deletions.
3 changes: 1 addition & 2 deletions request_llms/bridge_chatglm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@

from transformers import AutoModel, AutoTokenizer
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns



# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetGLM2Handle(LocalLLMHandle):

def load_model_info(self):
Expand Down
3 changes: 1 addition & 2 deletions request_llms/bridge_chatglm3.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@

from transformers import AutoModel, AutoTokenizer
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns



# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetGLM3Handle(LocalLLMHandle):

def load_model_info(self):
Expand Down
3 changes: 1 addition & 2 deletions request_llms/bridge_chatglmonnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns

from .chatglmoonx import ChatGLMModel, chat_template

Expand All @@ -17,7 +17,6 @@
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetONNXGLMHandle(LocalLLMHandle):

def load_model_info(self):
Expand Down
3 changes: 1 addition & 2 deletions request_llms/bridge_internlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns


# ------------------------------------------------------------------------------------------------------------------------
Expand All @@ -34,7 +34,6 @@ def combine_history(prompt, hist):
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetInternlmHandle(LocalLLMHandle):

def load_model_info(self):
Expand Down
3 changes: 1 addition & 2 deletions request_llms/bridge_llama2.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from toolbox import update_ui, get_conf, ProxyNetworkActivate
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
from threading import Thread


# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetONNXGLMHandle(LocalLLMHandle):

def load_model_info(self):
Expand Down
3 changes: 1 addition & 2 deletions request_llms/bridge_qwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,13 @@
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns



# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@SingletonLocalLLM
class GetONNXGLMHandle(LocalLLMHandle):

def load_model_info(self):
Expand Down
7 changes: 4 additions & 3 deletions request_llms/local_llm_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def __init__(self):
self.parent_state, self.child_state = create_queue_pipe()
# allow redirect_stdout
self.std_tag = "[Subprocess Message] "
self.child.write = lambda x: self.child.send(self.std_tag + x)
self.running = True
self._model = None
self._tokenizer = None
Expand Down Expand Up @@ -137,6 +136,8 @@ def check_dependency(self):
def run(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process
# 第一次运行,加载参数
self.child.flush = lambda *args: None
self.child.write = lambda x: self.child.send(self.std_tag + x)
reset_tqdm_output()
self.set_state("`尝试加载模型`")
try:
Expand Down Expand Up @@ -220,7 +221,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
"""
refer to request_llms/bridge_all.py
"""
_llm_handle = LLMSingletonClass()
_llm_handle = SingletonLocalLLM(LLMSingletonClass)()
if len(observe_window) >= 1:
observe_window[0] = load_message + "\n\n" + _llm_handle.get_state()
if not _llm_handle.running:
Expand Down Expand Up @@ -268,7 +269,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
"""
chatbot.append((inputs, ""))

_llm_handle = LLMSingletonClass()
_llm_handle = SingletonLocalLLM(LLMSingletonClass)()
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state())
yield from update_ui(chatbot=chatbot, history=[])
if not _llm_handle.running:
Expand Down

0 comments on commit 2b917ed

Please sign in to comment.