update fastchat's doc and demo
This commit is contained in:
parent
b60a7ac4bb
commit
3556553ce0
|
@ -14,6 +14,7 @@
|
|||
|
||||
|
||||
## 🔔 更新
|
||||
- [2023.12.26] 基于FastChat接入开源私有化大模型和大模型接口的能力开放
|
||||
- [2023.12.14] 量子位公众号专题报道:[文章链接](https://mp.weixin.qq.com/s/MuPfayYTk9ZW6lcqgMpqKA)
|
||||
- [2023.12.01] Multi-Agent和代码库检索功能开放
|
||||
- [2023.11.15] 增加基于本地代码库的问答增强模式
|
||||
|
@ -41,7 +42,7 @@
|
|||
- **🔧 垂类专属知识:** 为DevOps领域定制的专属知识库,支持垂类知识库的自助一键构建,便捷实用。
|
||||
- **🤖 垂类模型兼容:** 针对DevOps领域的小型模型,保证了与DevOps相关平台的兼容性,促进了技术生态的整合。
|
||||
|
||||
🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。
|
||||
🌍 依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。[接入Demo](sources/readme_docs/fastchat.md)
|
||||
|
||||
👥 核心研发团队长期专注于 AIOps + NLP 领域的研究。我们发起了 Codefuse-ai 项目,希望大家广泛贡献高质量的开发和运维文档,共同完善这套解决方案,以实现“让天下没有难做的开发”的目标。
|
||||
|
||||
|
|
|
@ -114,6 +114,7 @@ VLLM_MODEL_DICT = {
|
|||
|
||||
|
||||
LOCAL_LLM_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "llm_models")
|
||||
# 若不想修改模型地址,可取消相关地址设置
|
||||
llm_model_dict_c = {}
|
||||
for k, v in llm_model_dict.items():
|
||||
v_c = {}
|
||||
|
@ -125,6 +126,11 @@ for k, v in llm_model_dict.items():
|
|||
llm_model_dict_c[k] = v_c
|
||||
|
||||
llm_model_dict = llm_model_dict_c
|
||||
# 若不想修改模型地址,可取消相关地址设置
|
||||
VLLM_MODEL_DICT_c = {}
|
||||
for k, v in VLLM_MODEL_DICT.items():
|
||||
VLLM_MODEL_DICT_c[k] = f"/home/user/chatbot/llm_models/{v}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{v}"
|
||||
VLLM_MODEL_DICT = VLLM_MODEL_DICT_c
|
||||
|
||||
|
||||
# LLM 名称
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
import os, sys, requests
|
||||
|
||||
src_dir = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
)
|
||||
sys.path.append(src_dir)
|
||||
|
||||
from dev_opsgpt.tools import (
|
||||
toLangchainTools, get_tool_schema, DDGSTool, DocRetrieval,
|
||||
TOOL_DICT, TOOL_SETS
|
||||
)
|
||||
|
||||
from configs.model_config import *
|
||||
from dev_opsgpt.connector.phase import BasePhase
|
||||
from dev_opsgpt.connector.agents import BaseAgent
|
||||
from dev_opsgpt.connector.chains import BaseChain
|
||||
from dev_opsgpt.connector.schema import (
|
||||
Message, Memory, load_role_configs, load_phase_configs, load_chain_configs
|
||||
)
|
||||
from dev_opsgpt.connector.configs import AGETN_CONFIGS, CHAIN_CONFIGS, PHASE_CONFIGS
|
||||
import importlib
|
||||
|
||||
tools = toLangchainTools([TOOL_DICT[i] for i in TOOL_SETS if i in TOOL_DICT])
|
||||
|
||||
|
||||
role_configs = load_role_configs(AGETN_CONFIGS)
|
||||
chain_configs = load_chain_configs(CHAIN_CONFIGS)
|
||||
phase_configs = load_phase_configs(PHASE_CONFIGS)
|
||||
|
||||
agent_module = importlib.import_module("dev_opsgpt.connector.agents")
|
||||
|
||||
|
||||
phase_name = "codeChatPhase"
|
||||
phase = BasePhase(phase_name,
|
||||
task = None,
|
||||
phase_config = PHASE_CONFIGS,
|
||||
chain_config = CHAIN_CONFIGS,
|
||||
role_config = AGETN_CONFIGS,
|
||||
do_summary=False,
|
||||
do_code_retrieval=True,
|
||||
do_doc_retrieval=False,
|
||||
do_search=False,
|
||||
)
|
||||
|
||||
# 代码一共有多少类 => 基于cypher
|
||||
# 代码库里有哪些函数,返回5个就行 => 基于cypher
|
||||
# remove 这个函数是做什么的 => 基于标签
|
||||
# 有没有函数已经实现了从字符串删除指定字符串的功能,使用的话可以怎么使用,写个java代码 => 基于描述
|
||||
# 有根据我以下的需求用 java 开发一个方法:输入为字符串,将输入中的 .java 字符串给删除掉,然后返回新的字符串 => 基于描述
|
||||
|
||||
# round-1
|
||||
query_content = "代码一共有多少类"
|
||||
query = Message(
|
||||
role_name="user", role_type="human",
|
||||
role_content=query_content, input_query=query_content, origin_query=query_content,
|
||||
code_engine_name="client", score_threshold=1.0, top_k=3, cb_search_type="cypher"
|
||||
)
|
||||
|
||||
output_message1, _ = phase.step(query)
|
|
@ -0,0 +1,223 @@
|
|||
# 本地私有化/大模型接口接入
|
||||
|
||||
依托于开源的 LLM 与 Embedding 模型,本项目可实现基于开源模型的离线私有部署。此外,本项目也支持 OpenAI API 的调用。
|
||||
|
||||
## 📜 目录
|
||||
- [ 本地私有化模型接入](#本地私有化模型接入)
|
||||
- [ 公开大模型接口接入](#公开大模型接口接入)
|
||||
- [ 启动大模型服务](#启动大模型服务)
|
||||
|
||||
## 本地私有化模型接入
|
||||
|
||||
<br>模型地址配置示例,model_config.py配置修改
|
||||
|
||||
```bash
|
||||
# 建议:走huggingface接入,尽量使用chat模型,不要使用base,无法获取正确输出
|
||||
# 注意:当llm_model_dict和VLLM_MODEL_DICT同时存在时,优先启动VLLM_MODEL_DICT中的模型配置
|
||||
|
||||
# llm_model_dict 配置接入示例如下
|
||||
llm_model_dict = {
|
||||
"chatglm-6b": {
|
||||
"local_model_path": "THUDM/chatglm-6b",
|
||||
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
|
||||
"api_key": "EMPTY"
|
||||
}
|
||||
}
|
||||
|
||||
# VLLM_MODEL_DICT 配置接入示例如下
|
||||
VLLM_MODEL_DICT = {
|
||||
'chatglm2-6b': "THUDM/chatglm-6b",
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
<br>模型路径填写示例
|
||||
|
||||
```bash
|
||||
# 1、若把模型放到 ~/codefuse-chatbot/llm_models 路径下
|
||||
# 若模型地址如下
|
||||
model_dir: ~/codefuse-chatbot/llm_models/THUDM/chatglm-6b
|
||||
|
||||
# 参考配置如下
|
||||
llm_model_dict = {
|
||||
"chatglm-6b": {
|
||||
"local_model_path": "THUDM/chatglm-6b",
|
||||
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
|
||||
"api_key": "EMPTY"
|
||||
}
|
||||
}
|
||||
|
||||
VLLM_MODEL_DICT = {
|
||||
'chatglm2-6b': "THUDM/chatglm-6b",
|
||||
}
|
||||
|
||||
# or 若模型地址如下
|
||||
model_dir: ~/codefuse-chatbot/llm_models/chatglm-6b
|
||||
llm_model_dict = {
|
||||
"chatglm-6b": {
|
||||
"local_model_path": "chatglm-6b",
|
||||
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
|
||||
"api_key": "EMPTY"
|
||||
}
|
||||
}
|
||||
|
||||
VLLM_MODEL_DICT = {
|
||||
'chatglm2-6b': "chatglm-6b",
|
||||
}
|
||||
|
||||
# 2、若不想移动相关模型到 ~/codefuse-chatbot/llm_models
|
||||
# 同时删除 `模型路径重置` 以下的相关代码,具体见model_config.py
|
||||
# 若模型地址如下
|
||||
model_dir: ~/THUDM/chatglm-6b
|
||||
# 参考配置如下
|
||||
llm_model_dict = {
|
||||
"chatglm-6b": {
|
||||
"local_model_path": "~/THUDM/chatglm-6b",
|
||||
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
|
||||
"api_key": "EMPTY"
|
||||
}
|
||||
}
|
||||
|
||||
VLLM_MODEL_DICT = {
|
||||
'chatglm2-6b': "~/THUDM/chatglm-6b",
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
# 3、指定启动的模型服务,两者保持一致
|
||||
LLM_MODEL = "gpt-3.5-turbo-16k"
|
||||
LLM_MODELs = ["gpt-3.5-turbo-16k"]
|
||||
```
|
||||
|
||||
```bash
|
||||
# server_config.py配置修改, 若LLM_MODELS无多个模型配置不需要额外进行设置
|
||||
# 修改server_config.py#FSCHAT_MODEL_WORKERS的配置
|
||||
"model_name": {'host': DEFAULT_BIND_HOST, 'port': 20057}
|
||||
```
|
||||
|
||||
|
||||
|
||||
<br>量化模型接入
|
||||
|
||||
```bash
|
||||
# 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁
|
||||
cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
|
||||
|
||||
# 若需要支撑qwen-72b-int4模型,需要给fastchat打一个补丁
|
||||
cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py
|
||||
# 量化需修改llm_api.py的配置
|
||||
# dev_opsgpt/service/llm_api.py#559 取消注释 kwargs["gptq_wbits"] = 4
|
||||
```
|
||||
|
||||
## 公开大模型接口接入
|
||||
|
||||
```bash
|
||||
# model_config.py配置修改
|
||||
# ONLINE_LLM_MODEL
|
||||
# 其它接口开发来自于langchain-chatchat项目,缺少相关账号未经测试
|
||||
|
||||
# 指定启动的模型服务,两者保持一致
|
||||
LLM_MODEL = "gpt-3.5-turbo-16k"
|
||||
LLM_MODELs = ["gpt-3.5-turbo-16k"]
|
||||
```
|
||||
|
||||
外部大模型接口接入示例
|
||||
|
||||
```bash
|
||||
# 1、实现新的模型接入类
|
||||
# 参考 ~/dev_opsgpt/service/model_workers/openai.py#ExampleWorker
|
||||
# 实现do_chat函数即可使用LLM的能力
|
||||
|
||||
class XXWorker(ApiModelWorker):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
controller_addr: str = None,
|
||||
worker_addr: str = None,
|
||||
model_names: List[str] = ["gpt-3.5-turbo"],
|
||||
version: str = "gpt-3.5",
|
||||
**kwargs,
|
||||
):
|
||||
kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
|
||||
kwargs.setdefault("context_len", 16384) #TODO 16K模型需要改成16384
|
||||
super().__init__(**kwargs)
|
||||
self.version = version
|
||||
|
||||
def do_chat(self, params: ApiChatParams) -> Dict:
|
||||
'''
|
||||
执行Chat的方法,默认使用模块里面的chat函数。
|
||||
:params.messages : [
|
||||
{"role": "user", "content": "hello"},
|
||||
{"role": "assistant", "content": "hello"}
|
||||
]
|
||||
:params.xx: 详情见 ApiChatParams
|
||||
要求返回形式:{"error_code": int, "text": str}
|
||||
'''
|
||||
return {"error_code": 500, "text": f"{self.model_names[0]}未实现chat功能"}
|
||||
|
||||
|
||||
# 最后在 ~/dev_opsgpt/service/model_workers/__init__.py 中完成注册
|
||||
# from .xx import XXWorker
|
||||
|
||||
# 2、通过已有模型接入类完成接入
|
||||
# 或者直接使用已有的相关大模型类进行使用(缺少相关账号测试,欢迎大家测试后提PR)
|
||||
```
|
||||
|
||||
|
||||
```bash
|
||||
# model_config.py#ONLINE_LLM_MODEL 配置修改
|
||||
# 填写专属模型的 version、api_base_url、api_key、provider(与上述类名一致)
|
||||
ONLINE_LLM_MODEL = {
|
||||
# 线上模型。请在server_config中为每个在线API设置不同的端口
|
||||
|
||||
"openai-api": {
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"api_base_url": "https://api.openai.com/v1",
|
||||
"api_key": "",
|
||||
"openai_proxy": "",
|
||||
},
|
||||
"example": {
|
||||
"version": "gpt-3.5", # 采用openai接口做示例
|
||||
"api_base_url": "https://api.openai.com/v1",
|
||||
"api_key": "",
|
||||
"provider": "ExampleWorker",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## 启动大模型服务
|
||||
```bash
|
||||
# start llm-service(可选) 单独启动大模型服务
|
||||
python dev_opsgpt/service/llm_api.py
|
||||
```
|
||||
|
||||
```bash
|
||||
# 启动测试
|
||||
import openai
|
||||
# openai.api_key = "EMPTY" # Not support yet
|
||||
openai.api_base = "http://127.0.0.1:8888/v1"
|
||||
|
||||
# 选择你启动的模型
|
||||
model = "example"
|
||||
|
||||
# create a chat completion
|
||||
completion = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": "Hello! What is your name? "}],
|
||||
max_tokens=100,
|
||||
)
|
||||
# print the completion
|
||||
print(completion.choices[0].message.content)
|
||||
|
||||
# 正确输出后则确认LLM可正常接入
|
||||
```
|
||||
|
||||
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
# model_config.py#USE_FASTCHAT 判断是否进行fastchat接入本地模型
|
||||
USE_FASTCHAT = "gpt" not in LLM_MODEL
|
||||
python start.py #224 自动执行 python service/llm_api.py
|
||||
```
|
|
@ -8,14 +8,14 @@
|
|||
|
||||
|
||||
完整路线
|
||||
- [ ] Sandbox 环境
|
||||
- [x] Sandbox 环境
|
||||
- [x] 环境隔离的sandbox环境与代码执行
|
||||
- [ ] 上传、下载文件
|
||||
- [x] 上传、下载文件
|
||||
- [ ] 支持java执行环境
|
||||
- [ ] Vector Database & Retrieval
|
||||
- [ ] llama-index 兼容向量管理
|
||||
- [ ] task retrieval
|
||||
- [ ] tool retrieval
|
||||
- [x] task retrieval
|
||||
- [x] tool retrieval
|
||||
- [ ] Prompt Control
|
||||
- [ ] prompt flow
|
||||
- [ ] 基于不同prompt管理不同任务
|
||||
|
@ -27,10 +27,10 @@
|
|||
- [ ] 全流程自动
|
||||
- [x] 基于fastchat接入LLM
|
||||
- [x] 基于sentencebert接入Text Embedding
|
||||
- [ ] 向量加载速度提升
|
||||
- [ ] Connector
|
||||
- [ ] 基于langchain的react模式
|
||||
- [ ] 基于langchain完成tool检索
|
||||
- [x] 向量加载速度提升
|
||||
- [x] Connector
|
||||
- [x] 基于langchain的react模式
|
||||
- [x] 基于langchain完成tool检索
|
||||
- [ ] Web Crawl 通用能力
|
||||
- [x] 技术文档: 知乎、csdn、阿里云开发者论坛、腾讯云开发者论坛等
|
||||
- [ ] issue document
|
||||
|
@ -48,11 +48,11 @@
|
|||
<br>
|
||||
- v0.1
|
||||
- [x] Sandbox 环境: 上传、下载文件
|
||||
- [ ] Vector Database & Retrieval
|
||||
- [ ] task retrieval
|
||||
- [ ] tool retrieval
|
||||
- [ ] Connector
|
||||
- [ ] 基于langchain的react模式
|
||||
- [x] Vector Database & Retrieval
|
||||
- [x] task retrieval
|
||||
- [x] tool retrieval
|
||||
- [x] Connector
|
||||
- [x] 基于langchain的react模式
|
||||
- [x] 基于sentencebert接入Text Embedding: 向量加载速度提升
|
||||
<br>
|
||||
|
||||
|
|
Loading…
Reference in New Issue