codefuse-chatbot/configs/model_config.py.example

266 lines
10 KiB
Plaintext
Raw Permalink Normal View History

2023-09-28 10:58:58 +08:00
import os
2023-12-12 15:41:58 +08:00
import sys
2023-09-28 10:58:58 +08:00
import logging
import torch
2023-12-12 15:41:58 +08:00
import openai
import base64
from .utils import is_running_in_docker
2023-09-28 10:58:58 +08:00
# 日志格式
LOG_FORMAT = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(format=LOG_FORMAT)
# os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659"
os.environ["API_BASE_URL"] = "http://openai.com/v1/chat/completions"
2023-09-28 10:58:58 +08:00
os.environ["OPENAI_API_KEY"] = ""
os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659"
os.environ["BAIDU_OCR_API_KEY"] = ""
os.environ["BAIDU_OCR_SECRET_KEY"] = ""
2023-09-28 10:58:58 +08:00
import platform
system_name = platform.system()
# 在以下字典中修改属性值以指定本地embedding模型存储位置
# 如将 "text2vec": "GanymedeNil/text2vec-large-chinese" 修改为 "text2vec": "User/Downloads/text2vec-large-chinese"
# 此处请写绝对路径
embedding_model_dict = {
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
"ernie-base": "nghuyong/ernie-3.0-base-zh",
"text2vec-base": "shibing624/text2vec-base-chinese",
"text2vec": "GanymedeNil/text2vec-large-chinese",
"text2vec-paraphrase": "shibing624/text2vec-base-chinese-paraphrase",
"text2vec-sentence": "shibing624/text2vec-base-chinese-sentence",
"text2vec-multilingual": "shibing624/text2vec-base-multilingual",
"m3e-small": "moka-ai/m3e-small",
"m3e-base": "moka-ai/m3e-base",
"m3e-large": "moka-ai/m3e-large",
"bge-small-zh": "BAAI/bge-small-zh",
"bge-base-zh": "BAAI/bge-base-zh",
"bge-large-zh": "BAAI/bge-large-zh"
}
LOCAL_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "embedding_models")
embedding_model_dict = {k: f"/home/user/chatbot/embedding_models/{v}" if is_running_in_docker() else f"{LOCAL_MODEL_DIR}/{v}" for k, v in embedding_model_dict.items()}
2023-09-28 10:58:58 +08:00
# 选用的 Embedding 名称
EMBEDDING_ENGINE = 'openai'
2023-09-28 10:58:58 +08:00
EMBEDDING_MODEL = "text2vec-base"
# Embedding 模型运行设备
EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
ONLINE_LLM_MODEL = {
# 线上模型。请在server_config中为每个在线API设置不同的端口
"openai-api": {
"model_name": "gpt-3.5-turbo",
"api_base_url": "https://api.openai.com/v1",
"api_key": "",
"openai_proxy": "",
},
"example": {
"version": "gpt-3.5", # 采用openai接口做示例
"api_base_url": "https://api.openai.com/v1",
"api_key": "",
"provider": "ExampleWorker",
},
}
# 建议使用chat模型不要使用base无法获取正确输出
2023-09-28 10:58:58 +08:00
llm_model_dict = {
"chatglm-6b": {
"local_model_path": "THUDM/chatglm-6b",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},
# 以下模型经过测试可接入,配置仿照上述即可
# 'codellama_34b', 'Baichuan2-13B-Base', 'Baichuan2-13B-Chat', 'baichuan2-7b-base', 'baichuan2-7b-chat',
# 'internlm-7b-base', 'internlm-chat-7b', 'chatglm2-6b', 'qwen-14b-base', 'qwen-14b-chat', 'qwen-1-8B-Chat',
# 'Qwen-7B', 'Qwen-7B-Chat', 'qwen-7b-base-v1.1', 'qwen-7b-chat-v1.1', 'chatglm3-6b', 'chatglm3-6b-32k',
# 'chatglm3-6b-base', 'Qwen-72B-Chat-Int4'
2023-09-28 10:58:58 +08:00
# 调用chatgpt时如果报出 urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='api.openai.com', port=443):
# Max retries exceeded with url: /v1/chat/completions
# 则需要将urllib3版本修改为1.25.11
# 如果依然报urllib3.exceptions.MaxRetryError: HTTPSConnectionPool则将https改为http
# 参考https://zhuanlan.zhihu.com/p/350015032
# 如果报出raise NewConnectionError(
# urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x000001FE4BDB85E0>:
# Failed to establish a new connection: [WinError 10060]
# 则是因为内地和香港的IP都被OPENAI封了需要切换为日本、新加坡等地
"gpt-3.5-turbo": {
"local_model_path": "gpt-3.5-turbo",
"api_base_url": os.environ.get("API_BASE_URL"),
"api_key": os.environ.get("OPENAI_API_KEY")
},
"gpt-3.5-turbo-16k": {
"local_model_path": "gpt-3.5-turbo-16k",
"api_base_url": os.environ.get("API_BASE_URL"),
"api_key": os.environ.get("OPENAI_API_KEY")
},
2023-09-28 10:58:58 +08:00
}
# 建议使用chat模型不要使用base无法获取正确输出
VLLM_MODEL_DICT = {
'chatglm2-6b': "THUDM/chatglm-6b",
}
# 以下模型经过测试可接入,配置仿照上述即可
# 'codellama_34b', 'Baichuan2-13B-Base', 'Baichuan2-13B-Chat', 'baichuan2-7b-base', 'baichuan2-7b-chat',
# 'internlm-7b-base', 'internlm-chat-7b', 'chatglm2-6b', 'qwen-14b-base', 'qwen-14b-chat', 'qwen-1-8B-Chat',
# 'Qwen-7B', 'Qwen-7B-Chat', 'qwen-7b-base-v1.1', 'qwen-7b-chat-v1.1', 'chatglm3-6b', 'chatglm3-6b-32k',
# 'chatglm3-6b-base', 'Qwen-72B-Chat-Int4'
2023-09-28 10:58:58 +08:00
LOCAL_LLM_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "llm_models")
2023-12-26 14:59:03 +08:00
# 若不想修改模型地址,可取消相关地址设置
llm_model_dict_c = {}
for k, v in llm_model_dict.items():
v_c = {}
for kk, vv in v.items():
if k=="local_model_path":
v_c[kk] = f"/home/user/chatbot/llm_models/{vv}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{vv}"
else:
v_c[kk] = vv
llm_model_dict_c[k] = v_c
llm_model_dict = llm_model_dict_c
2023-12-26 14:59:03 +08:00
# 若不想修改模型地址,可取消相关地址设置
VLLM_MODEL_DICT_c = {}
for k, v in VLLM_MODEL_DICT.items():
VLLM_MODEL_DICT_c[k] = f"/home/user/chatbot/llm_models/{v}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{v}"
VLLM_MODEL_DICT = VLLM_MODEL_DICT_c
2023-09-28 10:58:58 +08:00
# LLM 名称
# EMBEDDING_ENGINE = 'openai'
EMBEDDING_ENGINE = 'model'
EMBEDDING_MODEL = "text2vec-base"
# LLM_MODEL = "gpt-4"
LLM_MODEL = "gpt-3.5-turbo-16k"
LLM_MODELs = ["gpt-3.5-turbo-16k"]
USE_FASTCHAT = "gpt" not in LLM_MODEL # 判断是否进行fastchat
2023-09-28 10:58:58 +08:00
# LLM 运行设备
LLM_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
# 日志存储路径
LOG_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "logs")
if not os.path.exists(LOG_PATH):
os.mkdir(LOG_PATH)
# 知识库默认存储路径
SOURCE_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "sources")
# 知识库默认存储路径
KB_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "knowledge_base")
# 代码库默认存储路径
CB_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "code_base")
2023-09-28 10:58:58 +08:00
# nltk 模型存储路径
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "nltk_data")
# 代码存储路径
JUPYTER_WORK_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "jupyter_work")
# WEB_CRAWL存储路径
WEB_CRAWL_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "knowledge_base")
2023-09-28 10:58:58 +08:00
# NEBULA_DATA存储路径
NELUBA_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data/neluba_data")
for _path in [LOG_PATH, SOURCE_PATH, KB_ROOT_PATH, NLTK_DATA_PATH, JUPYTER_WORK_PATH, WEB_CRAWL_PATH, NELUBA_PATH]:
2023-09-28 10:58:58 +08:00
if not os.path.exists(_path):
2023-12-09 17:28:01 +08:00
os.makedirs(_path, exist_ok=True)
2023-09-28 10:58:58 +08:00
# 数据库默认存储路径。
# 如果使用sqlite可以直接修改DB_ROOT_PATH如果使用其它数据库请直接修改SQLALCHEMY_DATABASE_URI。
DB_ROOT_PATH = os.path.join(KB_ROOT_PATH, "info.db")
SQLALCHEMY_DATABASE_URI = f"sqlite:///{DB_ROOT_PATH}"
# 可选向量库类型及对应配置
kbs_config = {
"faiss": {
},
# "milvus": {
# "host": "127.0.0.1",
# "port": "19530",
# "user": "",
# "password": "",
# "secure": False,
# },
# "pg": {
# "connection_uri": "postgresql://postgres:postgres@127.0.0.1:5432/langchain_chatchat",
# }
}
# 默认向量库类型。可选faiss, milvus, pg.
DEFAULT_VS_TYPE = "faiss"
# 缓存向量库数量
CACHED_VS_NUM = 1
# 知识库中单段文本长度
CHUNK_SIZE = 500
2023-09-28 10:58:58 +08:00
# 知识库中相邻文本重合长度
OVERLAP_SIZE = 50
# 知识库匹配向量数量
VECTOR_SEARCH_TOP_K = 5
# 知识库匹配相关度阈值取值范围在0-1之间SCORE越小相关度越高取到1相当于不筛选建议设置在0.5左右
# Mac 可能存在无法使用normalized_L2的问题因此调整SCORE_THRESHOLD至 0~1100
FAISS_NORMALIZE_L2 = True if system_name in ["Linux", "Windows"] else False
SCORE_THRESHOLD = 1 if system_name in ["Linux", "Windows"] else 1100
# 搜索引擎匹配结题数量
SEARCH_ENGINE_TOP_K = 5
# 代码引擎匹配结题数量
CODE_SEARCH_TOP_K = 1
2023-09-28 10:58:58 +08:00
# 基于本地知识问答的提示词模版
PROMPT_TEMPLATE = """【指令】根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,不允许在答案中添加编造成分,答案请使用中文。
【已知信息】{context}
【问题】{question}"""
# 基于本地代码知识问答的提示词模版
CODE_PROMPT_TEMPLATE = """【指令】根据已知信息来回答问题。
【已知信息】{context}
【问题】{question}"""
# 代码解释模版
CODE_INTERPERT_TEMPLATE = '''{code}
解释一下这段代码'''
2023-09-28 10:58:58 +08:00
# API 是否开启跨域默认为False如果需要开启请设置为True
# is open cross domain
OPEN_CROSS_DOMAIN = False
# Bing 搜索必备变量
# 使用 Bing 搜索需要使用 Bing Subscription Key,需要在azure port中申请试用bing search
# 具体申请方式请见
# https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/create-bing-search-service-resource
# 使用python创建bing api 搜索实例详见:
# https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/quickstarts/rest/python
BING_SEARCH_URL = "https://api.bing.microsoft.com/v7.0/search"
# 注意不是bing Webmaster Tools的api key
# 此外如果是在服务器上报Failed to establish a new connection: [Errno 110] Connection timed out
# 是因为服务器加了防火墙需要联系管理员加白名单如果公司的服务器的话就别想了GG
BING_SUBSCRIPTION_KEY = ""
# 是否开启中文标题加强,以及标题增强的相关配置
# 通过增加标题判断判断哪些文本为标题并在metadata中进行标记
# 然后将文本与往上一级的标题进行拼合,实现文本信息的增强。
ZH_TITLE_ENHANCE = False
log_verbose = False