Skip to content

Commit

Permalink
docs
Browse files Browse the repository at this point in the history
  • Loading branch information
sudoskys committed Feb 19, 2023
1 parent 17c8e49 commit 925e83c
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 21 deletions.
67 changes: 49 additions & 18 deletions App/Event.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,31 +43,59 @@
REDIS_CONF = _service["redis"]
TTS_CONF = _service["tts"]
PLUGIN_TABLE = _service["plugin"]

# End
PLUGIN_TABLE.pop("search", None)
PLUGIN_TABLE.pop("duckgo", None)

BACKEND_CONF = _service["backend"]
PROXY_CONF = ProxyConfig(**_service["proxy"])
HARM_TYPE = _service["moderation_type"]
HARM_TYPE = list(set(HARM_TYPE))

# Backend
MODEL_NAME = BACKEND_CONF.get("model")
MODEL_TOKEN_LIMIT = BACKEND_CONF.get("token_limit")
SimilarityInit = BACKEND_CONF.get("similarity_init")
BACKEND_CONF = _service["backend"]
CHAT_OPTIMIZER = Optimizer.SinglePoint

# Limit
MODEL_TOKEN_LIMIT = MODEL_TOKEN_LIMIT if MODEL_TOKEN_LIMIT else 2000
if not MODEL_NAME:
logger.warning("Model Conf Not Found")
if not BACKEND_CONF.get("type"):
logger.warning("Model Type Not Set:Service.json")

# Proxy
if PROXY_CONF.status:
llm_kira.setting.proxyUrl = PROXY_CONF.url

CHATGPT_CONF = BACKEND_CONF.get("chatgpt")
OPENAI_CONF = BACKEND_CONF.get("openai")

global LLM_TYPE
global LLM_MODEL_PARAM
global MODEL_TOKEN_LIMIT
global LLM_CLIENT
MODEL_TOKEN_LIMIT = OPENAI_CONF.get("token_limit") if OPENAI_CONF.get("token_limit") else 3000


def CreateLLM():
global LLM_TYPE
global LLM_MODEL_PARAM
global MODEL_TOKEN_LIMIT
global LLM_CLIENT
if BACKEND_CONF.get("type") == "openai":
logger.info("Using Openai Api")
MODEL_NAME = OPENAI_CONF.get("model")
MODEL_TOKEN_LIMIT = OPENAI_CONF.get("token_limit") if OPENAI_CONF.get("token_limit") else 3000
LLM_MODEL_PARAM = llm_kira.client.llms.OpenAiParam(model=MODEL_NAME)
LLM_CLIENT = llm_kira.client.llms.OpenAi
elif BACKEND_CONF.get("type") == "chatgpt":
logger.info("Using ChatGPT Server")
if not CHATGPT_CONF.get("agree"):
logger.warning("请注意,你的账号会授权给来自 https://github.com/bytemate/chatapi-single 的反代服务器")
MODEL_TOKEN_LIMIT = 4500
CHATGPT_API = CHATGPT_CONF.get("api")
LLM_MODEL_PARAM = llm_kira.client.llms.ChatGptParam(api=CHATGPT_API)
LLM_CLIENT = llm_kira.client.llms.ChatGpt


CreateLLM()

llm_kira.setting.redisSetting = llm_kira.setting.RedisConfig(**REDIS_CONF)
llm_kira.setting.llmRetryTime = 2
llm_kira.setting.llmRetryTimeMax = 30
Expand Down Expand Up @@ -217,14 +245,16 @@ async def Forget(user_id: int, chat_id: int):


class Reply(object):
def __init__(self, user, group, api_key):
def __init__(self, user, group, api_key=None):
# 用量检测
self.user = user
self.group = group
self.api_key = api_key
self._UsageManager = Usage(uid=self.user)

async def openai_moderation(self, prompt: str) -> bool:
if not self.api_key:
return False
# 内容审计
try:
_harm = False
Expand Down Expand Up @@ -292,10 +322,10 @@ async def load_response(self,
try:
# 分发类型
if method == "write":
llm_param = LLM_MODEL_PARAM
response = await llm_model.run(prompt=str(_prompt.text),
predict_tokens=int(_csonfig["token_limit"]),
llm_param=OpenAiParam(model=MODEL_NAME, temperature=0.2,
frequency_penalty=1)
llm_param=llm_param
)
_deal = response.reply[0]
_usage = response.usage
Expand All @@ -304,8 +334,9 @@ async def load_response(self,
)
elif method == "catch":
chat_client = llm_kira.client.ChatBot(profile=profile, llm_model=llm_model)
llm_param = LLM_MODEL_PARAM
response = await chat_client.predict(
llm_param=OpenAiParam(model_name=MODEL_NAME),
llm_param=llm_param,
prompt=prompt,
predict_tokens=150
)
Expand Down Expand Up @@ -343,13 +374,14 @@ async def load_response(self,
prompt: PromptEngine
if _head:
prompt.description += _head[:400]
llm_param = LLM_MODEL_PARAM
if isinstance(llm_param, OpenAiParam):
llm_param.logit_bias = _style,
llm_param.presence_penalty = 0.5
response = await chat_client.predict(
prompt=prompt,
predict_tokens=int(_csonfig["token_limit"]),
llm_param=OpenAiParam(model_name=MODEL_NAME,
logit_bias=_style,
presence_penalty=0.5,
frequency_penalty=0),
llm_param=llm_param,
rank_name=False
)
prompt.clean(clean_prompt=True)
Expand Down Expand Up @@ -623,7 +655,7 @@ async def Group(Message: User_Message, bot_profile: ProfileReturn, config) -> Pu
return PublicReturn(status=False, msg=f"No Match Type", trace="PromptPreprocess")

# LLM
llm_model = llm_kira.client.llms.OpenAi(
llm_model = LLM_CLIENT(
profile=conversation,
api_key=OPENAI_API_KEY_MANAGER.get_key(),
call_func=OPENAI_API_KEY_MANAGER.check_api_key,
Expand Down Expand Up @@ -763,7 +795,6 @@ async def Friends(Message: User_Message, bot_profile: ProfileReturn, config) ->
# 对话层
if not _prompt_type.status:
return PublicReturn(status=False, msg=f"No Match Type", trace="PromptPreprocess")

# LLM
llm_model = llm_kira.client.llms.OpenAi(
profile=conversation,
Expand Down
13 changes: 10 additions & 3 deletions utils/Data.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,9 +282,16 @@ def defaultService():
"plugin": {
},
"backend": {
"model": "text-davinci-003",
"similarity_init": True,
"token_limit": 4000
"type": "openai",
"openai": {
"model": "text-davinci-003",
"similarity_init": True,
"token_limit": 4000
},
"chatgpt": {
"api": None,
"agree": False
},
},
"media": {
"blip": {
Expand Down

0 comments on commit 925e83c

Please sign in to comment.