[feature](webui)<add config_webui for starting app>
This commit is contained in:
		
							parent
							
								
									fef3e85061
								
							
						
					
					
						commit
						2d726185f8
					
				
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -15,3 +15,5 @@ tests | |||||||
| *egg-info | *egg-info | ||||||
| build | build | ||||||
| dist | dist | ||||||
|  | package.sh | ||||||
|  | local_config.json | ||||||
							
								
								
									
										63
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										63
									
								
								README.md
									
									
									
									
									
								
							| @ -123,60 +123,23 @@ cd codefuse-chatbot | |||||||
| pip install -r requirements.txt | pip install -r requirements.txt | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| 2、基础配置 | 2、启动服务 | ||||||
| 
 |  | ||||||
| ```bash |  | ||||||
| # 修改服务启动的基础配置 |  | ||||||
| cd configs |  | ||||||
| cp model_config.py.example model_config.py |  | ||||||
| cp server_config.py.example server_config.py |  | ||||||
| 
 |  | ||||||
| # model_config#11~12 若需要使用openai接口,openai接口key |  | ||||||
| os.environ["OPENAI_API_KEY"] = "sk-xxx" |  | ||||||
| # 可自行替换自己需要的api_base_url |  | ||||||
| os.environ["API_BASE_URL"] = "https://api.openai.com/v1" |  | ||||||
| 
 |  | ||||||
| # vi model_config#LLM_MODEL 你需要选择的语言模型 |  | ||||||
| LLM_MODEL = "gpt-3.5-turbo" |  | ||||||
| LLM_MODELs = ["gpt-3.5-turbo"] |  | ||||||
| 
 |  | ||||||
| # vi model_config#EMBEDDING_MODEL 你需要选择的私有化向量模型 |  | ||||||
| EMBEDDING_ENGINE = 'model' |  | ||||||
| EMBEDDING_MODEL = "text2vec-base" |  | ||||||
| 
 |  | ||||||
| # vi model_config#embedding_model_dict 修改成你的本地路径,如果能直接连接huggingface则无需修改 |  | ||||||
| # 若模型地址为: |  | ||||||
| model_dir: ~/codefuse-chatbot/embedding_models/shibing624/text2vec-base-chinese |  | ||||||
| # 配置如下 |  | ||||||
| "text2vec-base": "shibing624/text2vec-base-chinese", |  | ||||||
| 
 |  | ||||||
| # vi server_config#8~14, 推荐采用容器启动服务 |  | ||||||
| DOCKER_SERVICE = True |  | ||||||
| # 是否采用容器沙箱 |  | ||||||
| SANDBOX_DO_REMOTE = True |  | ||||||
| # 是否采用api服务来进行 |  | ||||||
| NO_REMOTE_API = True |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| 3、启动服务 |  | ||||||
| 
 |  | ||||||
| 默认只启动webui相关服务,未启动fastchat(可选)。 |  | ||||||
| ```bash |  | ||||||
| # 若需要支撑codellama-34b-int4模型,需要给fastchat打一个补丁 |  | ||||||
| # cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py |  | ||||||
| # examples/llm_api.py#258 修改为 kwargs={"gptq_wbits": 4}, |  | ||||||
| 
 |  | ||||||
| # start llm-service(可选) |  | ||||||
| python examples/llm_api.py |  | ||||||
| ``` |  | ||||||
| 更多LLM接入方法见[更多细节...](sources/readme_docs/fastchat.md) |  | ||||||
| <br> |  | ||||||
| 
 |  | ||||||
| ```bash | ```bash | ||||||
| # 完成server_config.py配置后,可一键启动 | # 完成server_config.py配置后,可一键启动 | ||||||
| cd examples | cd examples | ||||||
| python start.py | bash start.sh | ||||||
|  | # 开始在页面进行配置即可 | ||||||
| ``` | ``` | ||||||
|  | <div align=center> | ||||||
|  |   <img src="sources/docs_imgs/webui_config.png" alt="图片"> | ||||||
|  | </div> | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 或者通过`start.py`进行启动[老版启动方式](sources/readme_docs/start.md) | ||||||
|  | 更多LLM接入方法见[更多细节...](sources/readme_docs/fastchat.md) | ||||||
|  | <br> | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| ## 贡献指南 | ## 贡献指南 | ||||||
| 非常感谢您对 Codefuse 项目感兴趣,我们非常欢迎您对 Codefuse 项目的各种建议、意见(包括批评)、评论和贡献。 | 非常感谢您对 Codefuse 项目感兴趣,我们非常欢迎您对 Codefuse 项目的各种建议、意见(包括批评)、评论和贡献。 | ||||||
| 
 | 
 | ||||||
|  | |||||||
							
								
								
									
										56
									
								
								README_en.md
									
									
									
									
									
								
							
							
						
						
									
										56
									
								
								README_en.md
									
									
									
									
									
								
							| @ -146,57 +146,23 @@ git lfs clone https://huggingface.co/THUDM/chatglm2-6b | |||||||
| git lfs clone https://huggingface.co/shibing624/text2vec-base-chinese | git lfs clone https://huggingface.co/shibing624/text2vec-base-chinese | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| 4. Basic Configuration |  | ||||||
| 
 |  | ||||||
| ```bash |  | ||||||
| # Modify the basic configuration for service startup |  | ||||||
| cd configs |  | ||||||
| cp model_config.py.example model_config.py |  | ||||||
| cp server_config.py.example server_config.py |  | ||||||
| 
 |  | ||||||
| # model_config#11~12 If you need to use the openai interface, openai interface key |  | ||||||
| os.environ["OPENAI_API_KEY"] = "sk-xxx" |  | ||||||
| # You can replace the api_base_url yourself |  | ||||||
| os.environ["API_BASE_URL"] = "https://api.openai.com/v1" |  | ||||||
| 
 |  | ||||||
| # vi model_config#105 You need to choose the language model |  | ||||||
| LLM_MODEL = "gpt-3.5-turbo" |  | ||||||
| 
 |  | ||||||
| # vi model_config#43 You need to choose the vector model |  | ||||||
| EMBEDDING_MODEL = "text2vec-base" |  | ||||||
| 
 |  | ||||||
| # vi model_config#25 Modify to your local path, if you can directly connect to huggingface, no modification is needed |  | ||||||
| "text2vec-base": "shibing624/text2vec-base-chinese", |  | ||||||
| 
 |  | ||||||
| # vi server_config#8~14, it is recommended to start the service using containers. |  | ||||||
| DOCKER_SERVICE = True |  | ||||||
| # Whether to use container sandboxing is up to your specific requirements and preferences |  | ||||||
| SANDBOX_DO_REMOTE = True |  | ||||||
| # Whether to use api-service to use chatbot |  | ||||||
| NO_REMOTE_API = True |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| 5. Start the Service |  | ||||||
| 
 |  | ||||||
| By default, only webui related services are started, and fastchat is not started (optional). |  | ||||||
| ```bash |  | ||||||
| # if use codellama-34b-int4, you should replace fastchat's gptq.py |  | ||||||
| # cp examples/gptq.py ~/site-packages/fastchat/modules/gptq.py |  | ||||||
| # examples/llm_api.py#258 => kwargs={"gptq_wbits": 4}, |  | ||||||
| 
 |  | ||||||
| # start llm-service(可选) |  | ||||||
| python examples/llm_api.py |  | ||||||
| ``` |  | ||||||
| More details about accessing LLM Moldes[More Details...](sources/readme_docs/fastchat.md) |  | ||||||
| <br> |  | ||||||
| 
 | 
 | ||||||
|  | 4. Start the Service | ||||||
| ```bash | ```bash | ||||||
| # After configuring server_config.py, you can start with just one click. | # After configuring server_config.py, you can start with just one click. | ||||||
| cd examples | cd examples | ||||||
| bash start_webui.sh | bash start.sh | ||||||
|  | # you can config your llm model and embedding model | ||||||
| ``` | ``` | ||||||
|  | <div align=center> | ||||||
|  |   <img src="sources/docs_imgs/webui_config.png" alt="图片"> | ||||||
|  | </div> | ||||||
| 
 | 
 | ||||||
| ## 贡献指南 | Or `python start.py` by [old version to start](sources/readme_docs/start-en.md) | ||||||
|  | More details about accessing LLM Moldes[More Details...](sources/readme_docs/fastchat.md) | ||||||
|  | <br> | ||||||
|  | 
 | ||||||
|  | ## Contribution | ||||||
| Thank you for your interest in the Codefuse project. We warmly welcome any suggestions, opinions (including criticisms), comments, and contributions to the Codefuse project. | Thank you for your interest in the Codefuse project. We warmly welcome any suggestions, opinions (including criticisms), comments, and contributions to the Codefuse project. | ||||||
| 
 | 
 | ||||||
| Your suggestions, opinions, and comments on Codefuse can be directly submitted through GitHub Issues. | Your suggestions, opinions, and comments on Codefuse can be directly submitted through GitHub Issues. | ||||||
|  | |||||||
| @ -1,5 +1,6 @@ | |||||||
| import os | import os | ||||||
| import platform | import platform | ||||||
|  | from loguru import logger | ||||||
| 
 | 
 | ||||||
| system_name = platform.system() | system_name = platform.system() | ||||||
| executable_path = os.getcwd() | executable_path = os.getcwd() | ||||||
| @ -7,8 +8,8 @@ executable_path = os.getcwd() | |||||||
| # 日志存储路径 | # 日志存储路径 | ||||||
| LOG_PATH = os.environ.get("LOG_PATH", None) or os.path.join(executable_path, "logs") | LOG_PATH = os.environ.get("LOG_PATH", None) or os.path.join(executable_path, "logs") | ||||||
| 
 | 
 | ||||||
| # 知识库默认存储路径 | # # 知识库默认存储路径 | ||||||
| SOURCE_PATH = os.environ.get("SOURCE_PATH", None) or os.path.join(executable_path, "sources") | # SOURCE_PATH = os.environ.get("SOURCE_PATH", None) or os.path.join(executable_path, "sources") | ||||||
| 
 | 
 | ||||||
| # 知识库默认存储路径 | # 知识库默认存储路径 | ||||||
| KB_ROOT_PATH = os.environ.get("KB_ROOT_PATH", None) or os.path.join(executable_path, "knowledge_base") | KB_ROOT_PATH = os.environ.get("KB_ROOT_PATH", None) or os.path.join(executable_path, "knowledge_base") | ||||||
| @ -16,8 +17,8 @@ KB_ROOT_PATH = os.environ.get("KB_ROOT_PATH", None) or os.path.join(executable_p | |||||||
| # 代码库默认存储路径 | # 代码库默认存储路径 | ||||||
| CB_ROOT_PATH = os.environ.get("CB_ROOT_PATH", None) or os.path.join(executable_path, "code_base") | CB_ROOT_PATH = os.environ.get("CB_ROOT_PATH", None) or os.path.join(executable_path, "code_base") | ||||||
| 
 | 
 | ||||||
| # nltk 模型存储路径 | # # nltk 模型存储路径 | ||||||
| NLTK_DATA_PATH = os.environ.get("NLTK_DATA_PATH", None) or os.path.join(executable_path, "nltk_data") | # NLTK_DATA_PATH = os.environ.get("NLTK_DATA_PATH", None) or os.path.join(executable_path, "nltk_data") | ||||||
| 
 | 
 | ||||||
| # 代码存储路径 | # 代码存储路径 | ||||||
| JUPYTER_WORK_PATH = os.environ.get("JUPYTER_WORK_PATH", None) or os.path.join(executable_path, "jupyter_work") | JUPYTER_WORK_PATH = os.environ.get("JUPYTER_WORK_PATH", None) or os.path.join(executable_path, "jupyter_work") | ||||||
| @ -31,8 +32,8 @@ NEBULA_PATH = os.environ.get("NEBULA_PATH", None) or os.path.join(executable_pat | |||||||
| # CHROMA 存储路径 | # CHROMA 存储路径 | ||||||
| CHROMA_PERSISTENT_PATH = os.environ.get("CHROMA_PERSISTENT_PATH", None) or os.path.join(executable_path, "data/chroma_data") | CHROMA_PERSISTENT_PATH = os.environ.get("CHROMA_PERSISTENT_PATH", None) or os.path.join(executable_path, "data/chroma_data") | ||||||
| 
 | 
 | ||||||
| for _path in [LOG_PATH, SOURCE_PATH, KB_ROOT_PATH, CB_ROOT_PATH, NLTK_DATA_PATH, JUPYTER_WORK_PATH, WEB_CRAWL_PATH, NEBULA_PATH, CHROMA_PERSISTENT_PATH]: | for _path in [LOG_PATH, KB_ROOT_PATH, CB_ROOT_PATH, JUPYTER_WORK_PATH, WEB_CRAWL_PATH, NEBULA_PATH, CHROMA_PERSISTENT_PATH]: | ||||||
|     if not os.path.exists(_path): |     if not os.path.exists(_path) and int(os.environ.get("do_create_dir", True)): | ||||||
|         os.makedirs(_path, exist_ok=True) |         os.makedirs(_path, exist_ok=True) | ||||||
| 
 | 
 | ||||||
| # 数据库默认存储路径。 | # 数据库默认存储路径。 | ||||||
|  | |||||||
| @ -101,6 +101,7 @@ class CodeBaseHandler: | |||||||
| 
 | 
 | ||||||
|         # get KG info |         # get KG info | ||||||
|         if self.nh: |         if self.nh: | ||||||
|  |             time.sleep(10) # aviod nebula staus didn't complete | ||||||
|             stat = self.nh.get_stat() |             stat = self.nh.get_stat() | ||||||
|             vertices_num, edges_num = stat['vertices'], stat['edges'] |             vertices_num, edges_num = stat['vertices'], stat['edges'] | ||||||
|         else: |         else: | ||||||
|  | |||||||
| @ -310,7 +310,8 @@ class LocalMemoryManager(BaseMemoryManager): | |||||||
|         #  |         #  | ||||||
|         save_to_json_file(memory_messages, file_path) |         save_to_json_file(memory_messages, file_path) | ||||||
| 
 | 
 | ||||||
|     def load(self, load_dir: str = "./") -> Memory: |     def load(self, load_dir: str = None) -> Memory: | ||||||
|  |         load_dir = load_dir or self.kb_root_path | ||||||
|         file_path = os.path.join(load_dir, f"{self.user_name}/{self.unique_name}/{self.memory_type}/converation.jsonl") |         file_path = os.path.join(load_dir, f"{self.user_name}/{self.unique_name}/{self.memory_type}/converation.jsonl") | ||||||
|         uuid_name = "_".join([self.user_name, self.unique_name, self.memory_type]) |         uuid_name = "_".join([self.user_name, self.unique_name, self.memory_type]) | ||||||
| 
 | 
 | ||||||
| @ -398,6 +399,7 @@ class LocalMemoryManager(BaseMemoryManager): | |||||||
|     def embedding_retrieval(self, text: str, top_k=1, score_threshold=1.0, user_name: str = "default", **kwargs) -> List[Message]: |     def embedding_retrieval(self, text: str, top_k=1, score_threshold=1.0, user_name: str = "default", **kwargs) -> List[Message]: | ||||||
|         if text is None: return [] |         if text is None: return [] | ||||||
|         vb_name = f"{user_name}/{self.unique_name}/{self.memory_type}" |         vb_name = f"{user_name}/{self.unique_name}/{self.memory_type}" | ||||||
|  |         # logger.debug(f"vb_name={vb_name}") | ||||||
|         vb = KBServiceFactory.get_service(vb_name, "faiss", self.embed_config, self.kb_root_path) |         vb = KBServiceFactory.get_service(vb_name, "faiss", self.embed_config, self.kb_root_path) | ||||||
|         docs = vb.search_docs(text, top_k=top_k, score_threshold=score_threshold) |         docs = vb.search_docs(text, top_k=top_k, score_threshold=score_threshold) | ||||||
|         return [Message(**doc.metadata) for doc, score in docs] |         return [Message(**doc.metadata) for doc, score in docs] | ||||||
| @ -405,11 +407,13 @@ class LocalMemoryManager(BaseMemoryManager): | |||||||
|     def text_retrieval(self, text: str, user_name: str = "default", **kwargs)  -> List[Message]: |     def text_retrieval(self, text: str, user_name: str = "default", **kwargs)  -> List[Message]: | ||||||
|         if text is None: return [] |         if text is None: return [] | ||||||
|         uuid_name = "_".join([user_name, self.unique_name, self.memory_type]) |         uuid_name = "_".join([user_name, self.unique_name, self.memory_type]) | ||||||
|  |         # logger.debug(f"uuid_name={uuid_name}") | ||||||
|         return self._text_retrieval_from_cache(self.recall_memory_dict[uuid_name].messages, text, score_threshold=0.3, topK=5, **kwargs) |         return self._text_retrieval_from_cache(self.recall_memory_dict[uuid_name].messages, text, score_threshold=0.3, topK=5, **kwargs) | ||||||
| 
 | 
 | ||||||
|     def datetime_retrieval(self,  datetime: str, text: str = None, n: int = 5, user_name: str = "default", **kwargs) -> List[Message]: |     def datetime_retrieval(self,  datetime: str, text: str = None, n: int = 5, user_name: str = "default", **kwargs) -> List[Message]: | ||||||
|         if datetime is None: return [] |         if datetime is None: return [] | ||||||
|         uuid_name = "_".join([user_name, self.unique_name, self.memory_type]) |         uuid_name = "_".join([user_name, self.unique_name, self.memory_type]) | ||||||
|  |         # logger.debug(f"uuid_name={uuid_name}") | ||||||
|         return self._datetime_retrieval_from_cache(self.recall_memory_dict[uuid_name].messages, datetime, text, n, **kwargs) |         return self._datetime_retrieval_from_cache(self.recall_memory_dict[uuid_name].messages, datetime, text, n, **kwargs) | ||||||
|      |      | ||||||
|     def _text_retrieval_from_cache(self, messages: List[Message], text: str = None, score_threshold=0.3, topK=5, tag_topK=5, **kwargs) -> List[Message]: |     def _text_retrieval_from_cache(self, messages: List[Message], text: str = None, score_threshold=0.3, topK=5, tag_topK=5, **kwargs) -> List[Message]: | ||||||
|  | |||||||
| @ -7,7 +7,7 @@ from websocket import create_connection | |||||||
| from websockets.client import WebSocketClientProtocol, ClientConnection | from websockets.client import WebSocketClientProtocol, ClientConnection | ||||||
| from websockets.exceptions import ConnectionClosedError | from websockets.exceptions import ConnectionClosedError | ||||||
| 
 | 
 | ||||||
| # from configs.model_config import JUPYTER_WORK_PATH | from coagent.base_configs.env_config import JUPYTER_WORK_PATH | ||||||
| from .basebox import BaseBox, CodeBoxResponse, CodeBoxStatus | from .basebox import BaseBox, CodeBoxResponse, CodeBoxStatus | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @ -21,7 +21,7 @@ class PyCodeBox(BaseBox): | |||||||
|             remote_ip: str = "http://127.0.0.1", |             remote_ip: str = "http://127.0.0.1", | ||||||
|             remote_port: str = "5050", |             remote_port: str = "5050", | ||||||
|             token: str = "mytoken", |             token: str = "mytoken", | ||||||
|             jupyter_work_path: str = "", |             jupyter_work_path: str = JUPYTER_WORK_PATH, | ||||||
|             do_code_exe: bool = False, |             do_code_exe: bool = False, | ||||||
|             do_remote: bool = False, |             do_remote: bool = False, | ||||||
|             do_check_net: bool = True, |             do_check_net: bool = True, | ||||||
| @ -30,7 +30,6 @@ class PyCodeBox(BaseBox): | |||||||
|         super().__init__(remote_url, remote_ip, remote_port, token, do_code_exe, do_remote) |         super().__init__(remote_url, remote_ip, remote_port, token, do_code_exe, do_remote) | ||||||
|         self.enter_status = True |         self.enter_status = True | ||||||
|         self.do_check_net = do_check_net |         self.do_check_net = do_check_net | ||||||
|         self.use_stop = use_stop |  | ||||||
|         self.jupyter_work_path = jupyter_work_path |         self.jupyter_work_path = jupyter_work_path | ||||||
|         # asyncio.run(self.astart()) |         # asyncio.run(self.astart()) | ||||||
|         self.start() |         self.start() | ||||||
|  | |||||||
| @ -70,7 +70,8 @@ def encode2md(data, md_format): | |||||||
|     return md_dict |     return md_dict | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| method_text_md = '''> {function_name} | method_text_md = ''' | ||||||
|  | > {function_name} | ||||||
| 
 | 
 | ||||||
| | Column Name | Content | | | Column Name | Content | | ||||||
| |-----------------|-----------------| | |-----------------|-----------------| | ||||||
| @ -79,7 +80,8 @@ method_text_md = '''> {function_name} | |||||||
| | Return type   | {ReturnType} | | | Return type   | {ReturnType} | | ||||||
| ''' | ''' | ||||||
| 
 | 
 | ||||||
| class_text_md = '''> {code_path} | class_text_md = ''' | ||||||
|  | > {code_path} | ||||||
| 
 | 
 | ||||||
| Bases: {ClassBase} | Bases: {ClassBase} | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -22,11 +22,14 @@ JUPYTER_WORK_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath | |||||||
| WEB_CRAWL_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "knowledge_base") | WEB_CRAWL_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "knowledge_base") | ||||||
| # NEBULA_DATA存储路径 | # NEBULA_DATA存储路径 | ||||||
| NEBULA_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data/nebula_data") | NEBULA_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data/nebula_data") | ||||||
| 
 | # 语言模型存储路径 | ||||||
|  | LOCAL_LLM_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "llm_models") | ||||||
|  | # 向量模型存储路径 | ||||||
|  | LOCAL_EM_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "embedding_models") | ||||||
| # CHROMA 存储路径 | # CHROMA 存储路径 | ||||||
| CHROMA_PERSISTENT_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data/chroma_data") | CHROMA_PERSISTENT_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data/chroma_data") | ||||||
| 
 | 
 | ||||||
| for _path in [LOG_PATH, SOURCE_PATH, KB_ROOT_PATH, CB_ROOT_PATH, NLTK_DATA_PATH, JUPYTER_WORK_PATH, WEB_CRAWL_PATH, NEBULA_PATH, CHROMA_PERSISTENT_PATH]: | for _path in [LOG_PATH, SOURCE_PATH, KB_ROOT_PATH, CB_ROOT_PATH, NLTK_DATA_PATH, JUPYTER_WORK_PATH, WEB_CRAWL_PATH, NEBULA_PATH, CHROMA_PERSISTENT_PATH, LOCAL_LLM_MODEL_DIR, LOCAL_EM_MODEL_DIR]: | ||||||
|     if not os.path.exists(_path): |     if not os.path.exists(_path): | ||||||
|         os.makedirs(_path, exist_ok=True) |         os.makedirs(_path, exist_ok=True) | ||||||
|          |          | ||||||
|  | |||||||
| @ -4,6 +4,7 @@ import logging | |||||||
| import torch | import torch | ||||||
| import openai | import openai | ||||||
| import base64 | import base64 | ||||||
|  | import json | ||||||
| from .utils import is_running_in_docker | from .utils import is_running_in_docker | ||||||
| from .default_config import * | from .default_config import * | ||||||
| # 日志格式 | # 日志格式 | ||||||
| @ -29,26 +30,35 @@ try: | |||||||
|     client.visit_domain = os.environ.get("visit_domain") |     client.visit_domain = os.environ.get("visit_domain") | ||||||
|     client.visit_biz = os.environ.get("visit_biz") |     client.visit_biz = os.environ.get("visit_biz") | ||||||
|     client.visit_biz_line = os.environ.get("visit_biz_line") |     client.visit_biz_line = os.environ.get("visit_biz_line") | ||||||
| except: | except Exception as e: | ||||||
|  |     OPENAI_API_BASE = "https://api.openai.com/v1" | ||||||
|  |     logger.error(e) | ||||||
|     pass |     pass | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  | try: | ||||||
|  |     with open("./local_config.json", "r") as f: | ||||||
|  |         update_config = json.load(f) | ||||||
|  | except: | ||||||
|  |     update_config = {} | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| # add your openai key | # add your openai key | ||||||
| OPENAI_API_BASE = "https://api.openai.com/v1" | os.environ["API_BASE_URL"] = os.environ.get("API_BASE_URL") or update_config.get("API_BASE_URL") or OPENAI_API_BASE | ||||||
| os.environ["API_BASE_URL"] = OPENAI_API_BASE | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") or update_config.get("OPENAI_API_KEY") or "sk-xx" | ||||||
| os.environ["OPENAI_API_KEY"] = "sk-xx" | openai.api_key = os.environ["OPENAI_API_KEY"] | ||||||
| openai.api_key = "sk-xx" |  | ||||||
| # os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" | # os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:13659" | ||||||
| os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or "socks5://127.0.0.1:13659" | os.environ["DUCKDUCKGO_PROXY"] = os.environ.get("DUCKDUCKGO_PROXY") or update_config.get("DUCKDUCKGO_PROXY") or "socks5h://127.0.0.1:13659" | ||||||
| # ignore if you dont's use baidu_ocr_api | # ignore if you dont's use baidu_ocr_api | ||||||
| os.environ["BAIDU_OCR_API_KEY"] = "xx" | os.environ["BAIDU_OCR_API_KEY"] = "xx" | ||||||
| os.environ["BAIDU_OCR_SECRET_KEY"] = "xx" | os.environ["BAIDU_OCR_SECRET_KEY"] = "xx" | ||||||
| 
 | 
 | ||||||
| os.environ["log_verbose"] = "2" | os.environ["log_verbose"] = "2" | ||||||
| # LLM 名称 | # LLM 名称 | ||||||
| EMBEDDING_ENGINE = 'model'  # openai or model | EMBEDDING_ENGINE = os.environ.get("EMBEDDING_ENGINE") or update_config.get("EMBEDDING_ENGINE") or 'model'  # openai or model | ||||||
| EMBEDDING_MODEL = "text2vec-base" | EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL") or update_config.get("EMBEDDING_MODEL") or "text2vec-base" | ||||||
| LLM_MODEL = "gpt-3.5-turbo" | LLM_MODEL = os.environ.get("LLM_MODEL") or "gpt-3.5-turbo" | ||||||
| LLM_MODELs = ["gpt-3.5-turbo"] | LLM_MODELs = [LLM_MODEL] | ||||||
| USE_FASTCHAT = "gpt" not in LLM_MODEL # 判断是否进行fastchat | USE_FASTCHAT = "gpt" not in LLM_MODEL # 判断是否进行fastchat | ||||||
| 
 | 
 | ||||||
| # LLM 运行设备 | # LLM 运行设备 | ||||||
| @ -57,10 +67,12 @@ LLM_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mp | |||||||
| # 在以下字典中修改属性值,以指定本地embedding模型存储位置 | # 在以下字典中修改属性值,以指定本地embedding模型存储位置 | ||||||
| # 如将 "text2vec": "GanymedeNil/text2vec-large-chinese" 修改为 "text2vec": "User/Downloads/text2vec-large-chinese" | # 如将 "text2vec": "GanymedeNil/text2vec-large-chinese" 修改为 "text2vec": "User/Downloads/text2vec-large-chinese" | ||||||
| # 此处请写绝对路径 | # 此处请写绝对路径 | ||||||
| embedding_model_dict = { | embedding_model_dict = json.loads(os.environ.get("embedding_model_dict")) if os.environ.get("embedding_model_dict") else {} | ||||||
|  | embedding_model_dict = embedding_model_dict or update_config.get("EMBEDDING_MODEL") | ||||||
|  | embedding_model_dict = embedding_model_dict or { | ||||||
|     "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", |     "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", | ||||||
|     "ernie-base": "nghuyong/ernie-3.0-base-zh", |     "ernie-base": "nghuyong/ernie-3.0-base-zh", | ||||||
|     "text2vec-base": "shibing624/text2vec-base-chinese", |     "text2vec-base": "text2vec-base-chinese", | ||||||
|     "text2vec": "GanymedeNil/text2vec-large-chinese", |     "text2vec": "GanymedeNil/text2vec-large-chinese", | ||||||
|     "text2vec-paraphrase": "shibing624/text2vec-base-chinese-paraphrase", |     "text2vec-paraphrase": "shibing624/text2vec-base-chinese-paraphrase", | ||||||
|     "text2vec-sentence": "shibing624/text2vec-base-chinese-sentence", |     "text2vec-sentence": "shibing624/text2vec-base-chinese-sentence", | ||||||
| @ -74,31 +86,35 @@ embedding_model_dict = { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| LOCAL_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "embedding_models") | # LOCAL_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "embedding_models") | ||||||
| embedding_model_dict = {k: f"/home/user/chatbot/embedding_models/{v}" if is_running_in_docker() else f"{LOCAL_MODEL_DIR}/{v}" for k, v in embedding_model_dict.items()} | # embedding_model_dict = {k: f"/home/user/chatbot/embedding_models/{v}" if is_running_in_docker() else f"{LOCAL_MODEL_DIR}/{v}" for k, v in embedding_model_dict.items()} | ||||||
| 
 | 
 | ||||||
| # Embedding 模型运行设备 | # Embedding 模型运行设备 | ||||||
| EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" | EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" | ||||||
| 
 | 
 | ||||||
| ONLINE_LLM_MODEL = { | ONLINE_LLM_MODEL = json.loads(os.environ.get("ONLINE_LLM_MODEL")) if os.environ.get("ONLINE_LLM_MODEL") else {} | ||||||
|  | ONLINE_LLM_MODEL = ONLINE_LLM_MODEL or update_config.get("ONLINE_LLM_MODEL") | ||||||
|  | ONLINE_LLM_MODEL = ONLINE_LLM_MODEL  or { | ||||||
|     # 线上模型。请在server_config中为每个在线API设置不同的端口 |     # 线上模型。请在server_config中为每个在线API设置不同的端口 | ||||||
| 
 | 
 | ||||||
|     "openai-api": { |     "openai-api": { | ||||||
|         "model_name": "gpt-3.5-turbo", |         "model_name": "gpt-3.5-turbo", | ||||||
|         "api_base_url": "https://api.openai.com/v1", |         "api_base_url": OPENAI_API_BASE, # "https://api.openai.com/v1", | ||||||
|         "api_key": "", |         "api_key": "", | ||||||
|         "openai_proxy": "", |         "openai_proxy": "", | ||||||
|     }, |     }, | ||||||
|     "example": { |     "example": { | ||||||
|         "version": "gpt-3.5",  # 采用openai接口做示例 |         "version": "gpt-3.5-turbo",  # 采用openai接口做示例 | ||||||
|         "api_base_url": "https://api.openai.com/v1", |         "api_base_url": OPENAI_API_BASE, # "https://api.openai.com/v1", | ||||||
|         "api_key": "", |         "api_key": "", | ||||||
|         "provider": "ExampleWorker", |         "provider": "ExampleWorker", | ||||||
|     }, |     }, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| # 建议使用chat模型,不要使用base,无法获取正确输出 | # 建议使用chat模型,不要使用base,无法获取正确输出 | ||||||
| llm_model_dict = { | llm_model_dict = json.loads(os.environ.get("llm_model_dict")) if os.environ.get("llm_model_dict") else {} | ||||||
|  | llm_model_dict = llm_model_dict or update_config.get("llm_model_dict") | ||||||
|  | llm_model_dict = llm_model_dict or { | ||||||
|     "chatglm-6b": { |     "chatglm-6b": { | ||||||
|         "local_model_path": "THUDM/chatglm-6b", |         "local_model_path": "THUDM/chatglm-6b", | ||||||
|         "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url" |         "api_base_url": "http://localhost:8888/v1",  # "name"修改为fastchat服务中的"api_base_url" | ||||||
| @ -147,7 +163,9 @@ llm_model_dict = { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| # 建议使用chat模型,不要使用base,无法获取正确输出 | # 建议使用chat模型,不要使用base,无法获取正确输出 | ||||||
| VLLM_MODEL_DICT = { | VLLM_MODEL_DICT = json.loads(os.environ.get("VLLM_MODEL_DICT")) if os.environ.get("VLLM_MODEL_DICT") else {} | ||||||
|  | VLLM_MODEL_DICT = VLLM_MODEL_DICT or update_config.get("VLLM_MODEL_DICT") | ||||||
|  | VLLM_MODEL_DICT = VLLM_MODEL_DICT or { | ||||||
|  'chatglm2-6b':  "THUDM/chatglm-6b", |  'chatglm2-6b':  "THUDM/chatglm-6b", | ||||||
|  } |  } | ||||||
| # 以下模型经过测试可接入,配置仿照上述即可 | # 以下模型经过测试可接入,配置仿照上述即可 | ||||||
| @ -157,21 +175,21 @@ VLLM_MODEL_DICT = { | |||||||
| # 'chatglm3-6b-base', 'Qwen-72B-Chat-Int4' | # 'chatglm3-6b-base', 'Qwen-72B-Chat-Int4' | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| LOCAL_LLM_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "llm_models") | # LOCAL_LLM_MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "llm_models") | ||||||
| # 模型路径重置 | # # 模型路径重置 | ||||||
| llm_model_dict_c = {} | # llm_model_dict_c = {} | ||||||
| for k, v in llm_model_dict.items(): | # for k, v in llm_model_dict.items(): | ||||||
|     v_c = {} | #     v_c = {} | ||||||
|     for kk, vv in v.items(): | #     for kk, vv in v.items(): | ||||||
|         if k=="local_model_path": | #         if k=="local_model_path": | ||||||
|             v_c[kk] = f"/home/user/chatbot/llm_models/{vv}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{vv}"  | #             v_c[kk] = f"/home/user/chatbot/llm_models/{vv}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{vv}"  | ||||||
|         else: | #         else: | ||||||
|             v_c[kk] = vv | #             v_c[kk] = vv | ||||||
|     llm_model_dict_c[k] = v_c | #     llm_model_dict_c[k] = v_c | ||||||
| 
 | 
 | ||||||
| llm_model_dict = llm_model_dict_c | # llm_model_dict = llm_model_dict_c | ||||||
| #  | # #  | ||||||
| VLLM_MODEL_DICT_c = {} | # VLLM_MODEL_DICT_c = {} | ||||||
| for k, v in VLLM_MODEL_DICT.items(): | # for k, v in VLLM_MODEL_DICT.items(): | ||||||
|     VLLM_MODEL_DICT_c[k] = f"/home/user/chatbot/llm_models/{v}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{v}"  | #     VLLM_MODEL_DICT_c[k] = f"/home/user/chatbot/llm_models/{v}" if is_running_in_docker() else f"{LOCAL_LLM_MODEL_DIR}/{v}"  | ||||||
| VLLM_MODEL_DICT = VLLM_MODEL_DICT_c | # VLLM_MODEL_DICT = VLLM_MODEL_DICT_c | ||||||
| @ -1,13 +1,25 @@ | |||||||
| from .model_config import LLM_MODEL, LLM_DEVICE | from .model_config import LLM_MODEL, LLM_DEVICE | ||||||
| import os | import os, json | ||||||
|  | 
 | ||||||
|  | try: | ||||||
|  |     with open("./local_config.json", "r") as f: | ||||||
|  |         update_config = json.load(f) | ||||||
|  | except: | ||||||
|  |     update_config = {} | ||||||
| 
 | 
 | ||||||
| # API 是否开启跨域,默认为False,如果需要开启,请设置为True | # API 是否开启跨域,默认为False,如果需要开启,请设置为True | ||||||
| # is open cross domain | # is open cross domain | ||||||
| OPEN_CROSS_DOMAIN = False | OPEN_CROSS_DOMAIN = False | ||||||
| # 是否用容器来启动服务 | # 是否用容器来启动服务 | ||||||
| DOCKER_SERVICE = True | try: | ||||||
|  |     DOCKER_SERVICE = json.loads(os.environ["DOCKER_SERVICE"]) or update_config.get("DOCKER_SERVICE") or False | ||||||
|  | except: | ||||||
|  |     DOCKER_SERVICE = True | ||||||
| # 是否采用容器沙箱 | # 是否采用容器沙箱 | ||||||
| SANDBOX_DO_REMOTE = True | try: | ||||||
|  |     SANDBOX_DO_REMOTE = json.loads(os.environ["SANDBOX_DO_REMOTE"]) or update_config.get("SANDBOX_DO_REMOTE") or False | ||||||
|  | except: | ||||||
|  |     SANDBOX_DO_REMOTE = True | ||||||
| # 是否采用api服务来进行 | # 是否采用api服务来进行 | ||||||
| NO_REMOTE_API = True | NO_REMOTE_API = True | ||||||
| # 各服务器默认绑定host | # 各服务器默认绑定host | ||||||
| @ -61,7 +73,7 @@ NEBULA_GRAPH_SERVER = { | |||||||
| # sandbox api server | # sandbox api server | ||||||
| SANDBOX_CONTRAINER_NAME = "devopsgpt_sandbox" | SANDBOX_CONTRAINER_NAME = "devopsgpt_sandbox" | ||||||
| SANDBOX_IMAGE_NAME = "devopsgpt:py39" | SANDBOX_IMAGE_NAME = "devopsgpt:py39" | ||||||
| SANDBOX_HOST = os.environ.get("SANDBOX_HOST") or DEFAULT_BIND_HOST # "172.25.0.3" | SANDBOX_HOST = os.environ.get("SANDBOX_HOST") or update_config.get("SANDBOX_HOST") or DEFAULT_BIND_HOST # "172.25.0.3" | ||||||
| SANDBOX_SERVER = { | SANDBOX_SERVER = { | ||||||
|     "host": f"http://{SANDBOX_HOST}", |     "host": f"http://{SANDBOX_HOST}", | ||||||
|     "port": 5050, |     "port": 5050, | ||||||
| @ -73,7 +85,10 @@ SANDBOX_SERVER = { | |||||||
| # fastchat model_worker server | # fastchat model_worker server | ||||||
| # 这些模型必须是在model_config.llm_model_dict中正确配置的。 | # 这些模型必须是在model_config.llm_model_dict中正确配置的。 | ||||||
| # 在启动startup.py时,可用通过`--model-worker --model-name xxxx`指定模型,不指定则为LLM_MODEL | # 在启动startup.py时,可用通过`--model-worker --model-name xxxx`指定模型,不指定则为LLM_MODEL | ||||||
| FSCHAT_MODEL_WORKERS = { | # 建议使用chat模型,不要使用base,无法获取正确输出 | ||||||
|  | FSCHAT_MODEL_WORKERS = json.loads(os.environ.get("FSCHAT_MODEL_WORKERS")) if os.environ.get("FSCHAT_MODEL_WORKERS") else {} | ||||||
|  | FSCHAT_MODEL_WORKERS = FSCHAT_MODEL_WORKERS or update_config.get("FSCHAT_MODEL_WORKERS") | ||||||
|  | FSCHAT_MODEL_WORKERS = FSCHAT_MODEL_WORKERS or { | ||||||
|     "default": { |     "default": { | ||||||
|         "host": DEFAULT_BIND_HOST, |         "host": DEFAULT_BIND_HOST, | ||||||
|         "port": 20002, |         "port": 20002, | ||||||
| @ -117,7 +132,9 @@ FSCHAT_MODEL_WORKERS = { | |||||||
|     'chatglm3-6b-32k': {'host': DEFAULT_BIND_HOST, 'port': 20018}, |     'chatglm3-6b-32k': {'host': DEFAULT_BIND_HOST, 'port': 20018}, | ||||||
|     'chatglm3-6b-base': {'host': DEFAULT_BIND_HOST, 'port': 20019}, |     'chatglm3-6b-base': {'host': DEFAULT_BIND_HOST, 'port': 20019}, | ||||||
|     'Qwen-72B-Chat-Int4': {'host': DEFAULT_BIND_HOST, 'port': 20020}, |     'Qwen-72B-Chat-Int4': {'host': DEFAULT_BIND_HOST, 'port': 20020}, | ||||||
|     'gpt-3.5-turbo': {'host': DEFAULT_BIND_HOST, 'port': 20021} |     'gpt-3.5-turbo': {'host': DEFAULT_BIND_HOST, 'port': 20021}, | ||||||
|  |     'example': {'host': DEFAULT_BIND_HOST, 'port': 20022}, | ||||||
|  |     'openai-api': {'host': DEFAULT_BIND_HOST, 'port': 20023} | ||||||
| } | } | ||||||
| # fastchat multi model worker server | # fastchat multi model worker server | ||||||
| FSCHAT_MULTI_MODEL_WORKERS = { | FSCHAT_MULTI_MODEL_WORKERS = { | ||||||
|  | |||||||
| @ -41,24 +41,16 @@ embed_config = EmbedConfig( | |||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # delete codebase | # delete codebase | ||||||
| codebase_name = 'client_local' | codebase_name = 'client_nebula' | ||||||
| code_path = '/Users/bingxu/Desktop/工作/大模型/chatbot/test_code_repo/client' | code_path = '/Users/bingxu/Desktop/工作/大模型/chatbot/test_code_repo/client' | ||||||
| code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" | code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" | ||||||
| use_nh = True | use_nh = True | ||||||
| # cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, | do_interpret = False | ||||||
| #                       llm_config=llm_config, embed_config=embed_config) |  | ||||||
| cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, | cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, | ||||||
|                       llm_config=llm_config, embed_config=embed_config) |                       llm_config=llm_config, embed_config=embed_config) | ||||||
| cbh.delete_codebase(codebase_name=codebase_name) | cbh.delete_codebase(codebase_name=codebase_name) | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| # initialize codebase | # initialize codebase | ||||||
| codebase_name = 'client_local' |  | ||||||
| code_path = '/Users/bingxu/Desktop/工作/大模型/chatbot/test_code_repo/client' |  | ||||||
| code_path = "D://chromeDownloads/devopschat-bot/client_v2/client" |  | ||||||
| code_path = "/home/user/client" |  | ||||||
| use_nh = True |  | ||||||
| do_interpret = True |  | ||||||
| cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, | cbh = CodeBaseHandler(codebase_name, code_path, crawl_type='dir', use_nh=use_nh, local_graph_path=CB_ROOT_PATH, | ||||||
|                       llm_config=llm_config, embed_config=embed_config) |                       llm_config=llm_config, embed_config=embed_config) | ||||||
| cbh.import_code(do_interpret=do_interpret) | cbh.import_code(do_interpret=do_interpret) | ||||||
| @ -78,25 +70,25 @@ phase = BasePhase( | |||||||
| 
 | 
 | ||||||
| ## 需要启动容器中的nebula,采用use_nh=True来构建代码库,是可以通过cypher来查询 | ## 需要启动容器中的nebula,采用use_nh=True来构建代码库,是可以通过cypher来查询 | ||||||
| # round-1 | # round-1 | ||||||
| # query_content = "代码一共有多少类" | query_content = "代码一共有多少类" | ||||||
| # query = Message( | query = Message( | ||||||
| #     role_name="human", role_type="user", |     role_name="human", role_type="user", | ||||||
| #     role_content=query_content, input_query=query_content, origin_query=query_content, |     role_content=query_content, input_query=query_content, origin_query=query_content, | ||||||
| #     code_engine_name="client_1", score_threshold=1.0, top_k=3, cb_search_type="cypher" |     code_engine_name="client_1", score_threshold=1.0, top_k=3, cb_search_type="cypher" | ||||||
| #     ) |     ) | ||||||
| # | 
 | ||||||
| # output_message1, _ = phase.step(query) | output_message1, _ = phase.step(query) | ||||||
| # print(output_message1) | print(output_message1) | ||||||
| 
 | 
 | ||||||
| # round-2 | # round-2 | ||||||
| # query_content = "代码库里有哪些函数,返回5个就行" | query_content = "代码库里有哪些函数,返回5个就行" | ||||||
| # query = Message( | query = Message( | ||||||
| #     role_name="human", role_type="user", |     role_name="human", role_type="user", | ||||||
| #     role_content=query_content, input_query=query_content, origin_query=query_content, |     role_content=query_content, input_query=query_content, origin_query=query_content, | ||||||
| #     code_engine_name="client_1", score_threshold=1.0, top_k=3, cb_search_type="cypher" |     code_engine_name="client_1", score_threshold=1.0, top_k=3, cb_search_type="cypher" | ||||||
| #     ) |     ) | ||||||
| # output_message2, _ = phase.step(query) | output_message2, _ = phase.step(query) | ||||||
| # print(output_message2) | print(output_message2) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # round-3 | # round-3 | ||||||
|  | |||||||
| @ -7,7 +7,6 @@ sys.path.append(src_dir) | |||||||
| 
 | 
 | ||||||
| from configs.model_config import KB_ROOT_PATH, JUPYTER_WORK_PATH | from configs.model_config import KB_ROOT_PATH, JUPYTER_WORK_PATH | ||||||
| from configs.server_config import SANDBOX_SERVER | from configs.server_config import SANDBOX_SERVER | ||||||
| from coagent.tools import toLangchainTools, TOOL_DICT, TOOL_SETS |  | ||||||
| from coagent.llm_models.llm_config import EmbedConfig, LLMConfig | from coagent.llm_models.llm_config import EmbedConfig, LLMConfig | ||||||
| 
 | 
 | ||||||
| from coagent.connector.phase import BasePhase | from coagent.connector.phase import BasePhase | ||||||
|  | |||||||
| @ -16,3 +16,12 @@ from .baichuan import BaiChuanWorker | |||||||
| from .azure import AzureWorker | from .azure import AzureWorker | ||||||
| from .tiangong import TianGongWorker | from .tiangong import TianGongWorker | ||||||
| from .openai import ExampleWorker | from .openai import ExampleWorker | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | IMPORT_MODEL_WORKERS = [ | ||||||
|  |     ChatGLMWorker, MiniMaxWorker, XingHuoWorker, QianFanWorker, FangZhouWorker, | ||||||
|  |     QwenWorker, BaiChuanWorker, AzureWorker, TianGongWorker, ExampleWorker | ||||||
|  | ] | ||||||
|  | 
 | ||||||
|  | MODEL_WORKER_SETS = [tool.__name__ for tool in IMPORT_MODEL_WORKERS] | ||||||
|  | 
 | ||||||
|  | |||||||
| @ -1,6 +1,5 @@ | |||||||
| from fastchat.conversation import Conversation | from fastchat.conversation import Conversation | ||||||
| from configs.model_config import LOG_PATH | from configs.default_config import LOG_PATH | ||||||
| # from coagent.base_configs.env_config import LOG_PATH |  | ||||||
| import fastchat.constants | import fastchat.constants | ||||||
| fastchat.constants.LOGDIR = LOG_PATH | fastchat.constants.LOGDIR = LOG_PATH | ||||||
| from fastchat.serve.base_model_worker import BaseModelWorker | from fastchat.serve.base_model_worker import BaseModelWorker | ||||||
|  | |||||||
| @ -1,4 +1,4 @@ | |||||||
| import docker, sys, os, time, requests, psutil | import docker, sys, os, time, requests, psutil, json | ||||||
| import subprocess | import subprocess | ||||||
| from docker.types import Mount, DeviceRequest | from docker.types import Mount, DeviceRequest | ||||||
| from loguru import logger | from loguru import logger | ||||||
| @ -25,9 +25,6 @@ def check_process(content: str, lang: str = None, do_stop=False): | |||||||
|     '''process-not-exist is true, process-exist is false''' |     '''process-not-exist is true, process-exist is false''' | ||||||
|     for process in psutil.process_iter(["pid", "name", "cmdline"]): |     for process in psutil.process_iter(["pid", "name", "cmdline"]): | ||||||
|         # check process name contains "jupyter" and port=xx |         # check process name contains "jupyter" and port=xx | ||||||
| 
 |  | ||||||
|         # if f"port={SANDBOX_SERVER['port']}" in str(process.info["cmdline"]).lower() and \ |  | ||||||
|         #     "jupyter" in process.info['name'].lower(): |  | ||||||
|         if content in str(process.info["cmdline"]).lower(): |         if content in str(process.info["cmdline"]).lower(): | ||||||
|             logger.info(f"content, {process.info}") |             logger.info(f"content, {process.info}") | ||||||
|             # 关闭进程 |             # 关闭进程 | ||||||
| @ -106,7 +103,7 @@ def start_sandbox_service(network_name ='my_network'): | |||||||
|         ) |         ) | ||||||
|     mounts = [mount] |     mounts = [mount] | ||||||
|     # 沙盒的启动与服务的启动是独立的 |     # 沙盒的启动与服务的启动是独立的 | ||||||
|     if SANDBOX_SERVER["do_remote"]: |     if SANDBOX_DO_REMOTE: | ||||||
|         client = docker.from_env() |         client = docker.from_env() | ||||||
|         networks = client.networks.list() |         networks = client.networks.list() | ||||||
|         if any([network_name==i.attrs["Name"] for i in networks]): |         if any([network_name==i.attrs["Name"] for i in networks]): | ||||||
| @ -159,18 +156,6 @@ def start_api_service(sandbox_host=DEFAULT_BIND_HOST): | |||||||
|             target='/home/user/chatbot/', |             target='/home/user/chatbot/', | ||||||
|             read_only=False  # 如果需要只读访问,将此选项设置为True |             read_only=False  # 如果需要只读访问,将此选项设置为True | ||||||
|         ) |         ) | ||||||
|         # mount_database = Mount( |  | ||||||
|         #     type='bind', |  | ||||||
|         #     source=os.path.join(src_dir, "knowledge_base"), |  | ||||||
|         #     target='/home/user/knowledge_base/', |  | ||||||
|         #     read_only=False  # 如果需要只读访问,将此选项设置为True |  | ||||||
|         # ) |  | ||||||
|         # mount_code_database = Mount( |  | ||||||
|         #     type='bind', |  | ||||||
|         #     source=os.path.join(src_dir, "code_base"), |  | ||||||
|         #     target='/home/user/code_base/', |  | ||||||
|         #     read_only=False  # 如果需要只读访问,将此选项设置为True |  | ||||||
|         # ) |  | ||||||
|         ports={ |         ports={ | ||||||
|                 f"{API_SERVER['docker_port']}/tcp": f"{API_SERVER['port']}/tcp",  |                 f"{API_SERVER['docker_port']}/tcp": f"{API_SERVER['port']}/tcp",  | ||||||
|                 f"{WEBUI_SERVER['docker_port']}/tcp": f"{WEBUI_SERVER['port']}/tcp", |                 f"{WEBUI_SERVER['docker_port']}/tcp": f"{WEBUI_SERVER['port']}/tcp", | ||||||
| @ -208,6 +193,8 @@ def start_api_service(sandbox_host=DEFAULT_BIND_HOST): | |||||||
|         if check_docker(client, CONTRAINER_NAME, do_stop=True): |         if check_docker(client, CONTRAINER_NAME, do_stop=True): | ||||||
|             container = start_docker(client, script_shs, ports, IMAGE_NAME, CONTRAINER_NAME, mounts, network=network_name) |             container = start_docker(client, script_shs, ports, IMAGE_NAME, CONTRAINER_NAME, mounts, network=network_name) | ||||||
| 
 | 
 | ||||||
|  |         logger.info("You can open http://localhost:8501 to use chatbot!") | ||||||
|  | 
 | ||||||
|     else: |     else: | ||||||
|         logger.info("start local service") |         logger.info("start local service") | ||||||
|         # 关闭之前启动的docker 服务 |         # 关闭之前启动的docker 服务 | ||||||
| @ -234,12 +221,17 @@ def start_api_service(sandbox_host=DEFAULT_BIND_HOST): | |||||||
|          |          | ||||||
|         subprocess.Popen(webui_sh, shell=True) |         subprocess.Popen(webui_sh, shell=True) | ||||||
| 
 | 
 | ||||||
|  |         logger.info("You can please open http://localhost:8501 to use chatbot!") | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == "__main__": | def start_main(): | ||||||
|  |     global SANDBOX_DO_REMOTE, DOCKER_SERVICE | ||||||
|  |     SANDBOX_DO_REMOTE = SANDBOX_DO_REMOTE if os.environ.get("SANDBOX_DO_REMOTE") is None else json.loads(os.environ.get("SANDBOX_DO_REMOTE")) | ||||||
|  |     DOCKER_SERVICE = DOCKER_SERVICE if os.environ.get("DOCKER_SERVICE") is None else json.loads(os.environ.get("DOCKER_SERVICE")) | ||||||
|  | 
 | ||||||
|     start_sandbox_service() |     start_sandbox_service() | ||||||
|     sandbox_host = DEFAULT_BIND_HOST |     sandbox_host = DEFAULT_BIND_HOST | ||||||
|     if SANDBOX_SERVER["do_remote"]: |     if SANDBOX_DO_REMOTE: | ||||||
|         client = docker.from_env() |         client = docker.from_env() | ||||||
|         containers = client.containers.list(all=True) |         containers = client.containers.list(all=True) | ||||||
| 
 | 
 | ||||||
| @ -252,3 +244,5 @@ if __name__ == "__main__": | |||||||
|     start_api_service(sandbox_host) |     start_api_service(sandbox_host) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     start_main() | ||||||
|  | |||||||
							
								
								
									
										7
									
								
								examples/start.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								examples/start.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,7 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | cp ../configs/model_config.py.example ../configs/model_config.py | ||||||
|  | cp ../configs/server_config.py.example ../configs/server_config.py | ||||||
|  | 
 | ||||||
|  | streamlit run webui_config.py --server.port 8510 | ||||||
| @ -17,14 +17,20 @@ try: | |||||||
| except: | except: | ||||||
|     client = None |     client = None | ||||||
| 
 | 
 | ||||||
| #  |  | ||||||
| check_docker(client, SANDBOX_CONTRAINER_NAME, do_stop=True, ) |  | ||||||
| check_process(f"port={SANDBOX_SERVER['port']}", do_stop=True) |  | ||||||
| check_process(f"port=5050", do_stop=True) |  | ||||||
| 
 | 
 | ||||||
| #  | def stop_main(): | ||||||
| check_docker(client, CONTRAINER_NAME, do_stop=True, ) |     #  | ||||||
| check_process("api.py", do_stop=True) |     check_docker(client, SANDBOX_CONTRAINER_NAME, do_stop=True, ) | ||||||
| check_process("sdfile_api.py", do_stop=True) |     check_process(f"port={SANDBOX_SERVER['port']}", do_stop=True) | ||||||
| check_process("llm_api.py", do_stop=True) |     check_process(f"port=5050", do_stop=True) | ||||||
| check_process("webui.py", do_stop=True) | 
 | ||||||
|  |     #  | ||||||
|  |     check_docker(client, CONTRAINER_NAME, do_stop=True, ) | ||||||
|  |     check_process("api.py", do_stop=True) | ||||||
|  |     check_process("sdfile_api.py", do_stop=True) | ||||||
|  |     check_process("llm_api.py", do_stop=True) | ||||||
|  |     check_process("webui.py", do_stop=True) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     stop_main() | ||||||
| @ -357,7 +357,7 @@ def knowledge_page( | |||||||
|                 empty.progress(0.0, "") |                 empty.progress(0.0, "") | ||||||
|                 for d in api.recreate_vector_store( |                 for d in api.recreate_vector_store( | ||||||
|                     kb, vs_type=default_vs_type, embed_model=embedding_model, embedding_device=EMBEDDING_DEVICE, |                     kb, vs_type=default_vs_type, embed_model=embedding_model, embedding_device=EMBEDDING_DEVICE, | ||||||
|                       embed_model_path=embedding_model_dict[EMBEDDING_MODEL], embed_engine=EMBEDDING_ENGINE, |                       embed_model_path=embedding_model_dict[embedding_model], embed_engine=EMBEDDING_ENGINE, | ||||||
|                       api_key=llm_model_dict[LLM_MODEL]["api_key"], |                       api_key=llm_model_dict[LLM_MODEL]["api_key"], | ||||||
|                       api_base_url=llm_model_dict[LLM_MODEL]["api_base_url"], |                       api_base_url=llm_model_dict[LLM_MODEL]["api_base_url"], | ||||||
|                     ): |                     ): | ||||||
|  | |||||||
							
								
								
									
										208
									
								
								examples/webui_config.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										208
									
								
								examples/webui_config.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,208 @@ | |||||||
|  | import streamlit as st | ||||||
|  | import docker | ||||||
|  | import torch, os, sys, json | ||||||
|  | from loguru import logger  | ||||||
|  | 
 | ||||||
|  | src_dir = os.path.join( | ||||||
|  |     os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | ||||||
|  | ) | ||||||
|  | sys.path.append(src_dir) | ||||||
|  | from configs.default_config import * | ||||||
|  | 
 | ||||||
|  | import platform | ||||||
|  | system_name = platform.system() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | VERSION = "v0.1.0" | ||||||
|  | 
 | ||||||
|  | MODEL_WORKER_SETS = [ | ||||||
|  |     "ChatGLMWorker", "MiniMaxWorker", "XingHuoWorker", "QianFanWorker", "FangZhouWorker", | ||||||
|  |     "QwenWorker", "BaiChuanWorker", "AzureWorker", "TianGongWorker", "ExampleWorker" | ||||||
|  | ] | ||||||
|  | 
 | ||||||
|  | openai_models = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-4"] | ||||||
|  | embedding_models = ["openai"] | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | st.write("启动配置页面!") | ||||||
|  | 
 | ||||||
|  | st.write("如果你要使用语言模型,请将LLM放到 ~/Codefuse-chatbot/llm_models") | ||||||
|  | 
 | ||||||
|  | st.write("如果你要使用向量模型,请将向量模型放到 ~/Codefuse-chatbot/embedding_models") | ||||||
|  | 
 | ||||||
|  | with st.container(): | ||||||
|  | 
 | ||||||
|  |     col1, col2 = st.columns(2) | ||||||
|  |     with col1.container(): | ||||||
|  |         llm_model_name = st.selectbox('LLM Model Name', openai_models + [i for i in os.listdir(LOCAL_LLM_MODEL_DIR) if os.path.isdir(os.path.join(LOCAL_LLM_MODEL_DIR, i))]) | ||||||
|  | 
 | ||||||
|  |         llm_apikey = st.text_input('填写 LLM API KEY', 'EMPTY') | ||||||
|  |         llm_apiurl = st.text_input('填写 LLM API URL', 'http://localhost:8888/v1') | ||||||
|  | 
 | ||||||
|  |         llm_engine = st.selectbox('选择哪个llm引擎', ["online", "fastchat", "fastchat-vllm"]) | ||||||
|  |         llm_model_port = st.text_input('LLM Model Port,非fastchat模式可无视', '20006') | ||||||
|  |         llm_provider_option = st.selectbox('选择哪个online模型加载器,非online可无视', ["openai"] + MODEL_WORKER_SETS) | ||||||
|  | 
 | ||||||
|  |         if llm_engine == "online" and llm_provider_option == "openai": | ||||||
|  |             try: | ||||||
|  |                 from zdatafront import OPENAI_API_BASE | ||||||
|  |             except: | ||||||
|  |                 OPENAI_API_BASE = "https://api.openai.com/v1" | ||||||
|  |             llm_apiurl = OPENAI_API_BASE | ||||||
|  | 
 | ||||||
|  |         device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" | ||||||
|  | 
 | ||||||
|  |         FSCHAT_MODEL_WORKERS = { | ||||||
|  |             llm_model_name: { | ||||||
|  |                 'host': "127.0.0.1", 'port': llm_model_port, | ||||||
|  |                 "device": device, | ||||||
|  |                 # todo: 多卡加载需要配置的参数 | ||||||
|  |                 "gpus": None, | ||||||
|  |                 "numgpus": 1,}, | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         ONLINE_LLM_MODEL, llm_model_dict, VLLM_MODEL_DICT = {}, {}, {} | ||||||
|  |         if llm_engine == "online": | ||||||
|  |             ONLINE_LLM_MODEL = { | ||||||
|  |                 llm_model_name: { | ||||||
|  |                     "model_name": llm_model_name, | ||||||
|  |                     "version": llm_model_name, | ||||||
|  |                     "api_base_url": llm_apiurl, # "https://api.openai.com/v1", | ||||||
|  |                     "api_key": llm_apikey, | ||||||
|  |                     "openai_proxy": "", | ||||||
|  |                     "provider": llm_provider_option | ||||||
|  |                 }, | ||||||
|  |             } | ||||||
|  |          | ||||||
|  |         if llm_engine == "fastchat": | ||||||
|  |             llm_model_dict = { | ||||||
|  |                 llm_model_name: { | ||||||
|  |                     "local_model_path": llm_model_name, | ||||||
|  |                     "api_base_url": llm_apiurl,  # "name"修改为fastchat服务中的"api_base_url" | ||||||
|  |                     "api_key": llm_apikey | ||||||
|  |                     }} | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         if llm_engine == "fastchat-vllm": | ||||||
|  |             VLLM_MODEL_DICT = { | ||||||
|  |                 llm_model_name: { | ||||||
|  |                     "local_model_path": llm_model_name, | ||||||
|  |                     "api_base_url": llm_apiurl,  # "name"修改为fastchat服务中的"api_base_url" | ||||||
|  |                     "api_key": llm_apikey | ||||||
|  |                     } | ||||||
|  |             } | ||||||
|  |             llm_model_dict = { | ||||||
|  |                 llm_model_name: { | ||||||
|  |                     "local_model_path": llm_model_name, | ||||||
|  |                     "api_base_url": llm_apiurl,  # "name"修改为fastchat服务中的"api_base_url" | ||||||
|  |                     "api_key": llm_apikey | ||||||
|  |                     }} | ||||||
|  |              | ||||||
|  | 
 | ||||||
|  |     with col2.container(): | ||||||
|  |         em_model_name = st.selectbox('Embedding Model Name', [i for i in os.listdir(LOCAL_EM_MODEL_DIR) if os.path.isdir(os.path.join(LOCAL_EM_MODEL_DIR, i))] + embedding_models) | ||||||
|  |         em_engine = st.selectbox('选择哪个embedding引擎', ["model", "openai"]) | ||||||
|  |         device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" | ||||||
|  |         embedding_model_dict = {em_model_name: em_model_name} | ||||||
|  |         # em_apikey = st.text_input('Embedding API KEY', '') | ||||||
|  |         # em_apiurl = st.text_input('Embedding API URL', '') | ||||||
|  | 
 | ||||||
|  | #  | ||||||
|  | try: | ||||||
|  |     client = docker.from_env() | ||||||
|  |     has_docker = True | ||||||
|  | except: | ||||||
|  |     has_docker = False | ||||||
|  | 
 | ||||||
|  | if has_docker: | ||||||
|  |     with st.container(): | ||||||
|  |         DOCKER_SERVICE = st.toggle('DOCKER_SERVICE', True) | ||||||
|  |         SANDBOX_DO_REMOTE = st.toggle('SANDBOX_DO_REMOTE', True) | ||||||
|  | else: | ||||||
|  |     DOCKER_SERVICE = False | ||||||
|  |     SANDBOX_DO_REMOTE = False | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | with st.container(): | ||||||
|  |     cols = st.columns(3) | ||||||
|  | 
 | ||||||
|  |     if cols[0].button( | ||||||
|  |         "重启服务,按前配置生效", | ||||||
|  |         use_container_width=True, | ||||||
|  |     ): | ||||||
|  |         from start import start_main | ||||||
|  |         from stop import stop_main | ||||||
|  |         stop_main() | ||||||
|  |         start_main() | ||||||
|  |     if cols[1].button( | ||||||
|  |         "停止服务", | ||||||
|  |         use_container_width=True, | ||||||
|  |     ): | ||||||
|  |         from stop import stop_main | ||||||
|  |         stop_main() | ||||||
|  | 
 | ||||||
|  |     if cols[2].button( | ||||||
|  |         "启动对话服务", | ||||||
|  |         use_container_width=True | ||||||
|  |     ): | ||||||
|  |          | ||||||
|  |         os.environ["API_BASE_URL"] = llm_apiurl | ||||||
|  |         os.environ["OPENAI_API_KEY"] = llm_apikey | ||||||
|  | 
 | ||||||
|  |         os.environ["EMBEDDING_ENGINE"] = em_engine | ||||||
|  |         os.environ["EMBEDDING_MODEL"] = em_model_name | ||||||
|  |         os.environ["LLM_MODEL"] = llm_model_name | ||||||
|  | 
 | ||||||
|  |         embedding_model_dict = {k: f"/home/user/chatbot/embedding_models/{v}" if DOCKER_SERVICE else f"{LOCAL_EM_MODEL_DIR}/{v}" for k, v in embedding_model_dict.items()} | ||||||
|  |         os.environ["embedding_model_dict"] = json.dumps(embedding_model_dict) | ||||||
|  | 
 | ||||||
|  |         os.environ["ONLINE_LLM_MODEL"] = json.dumps(ONLINE_LLM_MODEL) | ||||||
|  | 
 | ||||||
|  |         # 模型路径重置 | ||||||
|  |         llm_model_dict_c = {} | ||||||
|  |         for k, v in llm_model_dict.items(): | ||||||
|  |             v_c = {} | ||||||
|  |             for kk, vv in v.items(): | ||||||
|  |                 if k=="local_model_path": | ||||||
|  |                     v_c[kk] = f"/home/user/chatbot/llm_models/{vv}" if DOCKER_SERVICE else f"{LOCAL_LLM_MODEL_DIR}/{vv}"  | ||||||
|  |                 else: | ||||||
|  |                     v_c[kk] = vv | ||||||
|  |             llm_model_dict_c[k] = v_c | ||||||
|  | 
 | ||||||
|  |         llm_model_dict = llm_model_dict_c | ||||||
|  |         os.environ["llm_model_dict"] = json.dumps(llm_model_dict) | ||||||
|  |         #  | ||||||
|  |         VLLM_MODEL_DICT_c = {} | ||||||
|  |         for k, v in VLLM_MODEL_DICT.items(): | ||||||
|  |             VLLM_MODEL_DICT_c[k] = f"/home/user/chatbot/llm_models/{v}" if DOCKER_SERVICE else f"{LOCAL_LLM_MODEL_DIR}/{v}"  | ||||||
|  |         VLLM_MODEL_DICT = VLLM_MODEL_DICT_c | ||||||
|  |         os.environ["VLLM_MODEL_DICT"] = json.dumps(VLLM_MODEL_DICT) | ||||||
|  | 
 | ||||||
|  |         # server config | ||||||
|  |         os.environ["DOCKER_SERVICE"] = json.dumps(DOCKER_SERVICE) | ||||||
|  |         os.environ["SANDBOX_DO_REMOTE"] = json.dumps(SANDBOX_DO_REMOTE) | ||||||
|  |         os.environ["FSCHAT_MODEL_WORKERS"] = json.dumps(FSCHAT_MODEL_WORKERS) | ||||||
|  | 
 | ||||||
|  |         update_json = { | ||||||
|  |             "API_BASE_URL": llm_apiurl, | ||||||
|  |             "OPENAI_API_KEY": llm_apikey, | ||||||
|  |             "EMBEDDING_ENGINE": em_engine, | ||||||
|  |             "EMBEDDING_MODEL": em_model_name, | ||||||
|  |             "LLM_MODEL": llm_model_name, | ||||||
|  |             "embedding_model_dict": json.dumps(embedding_model_dict), | ||||||
|  |             "llm_model_dict": json.dumps(llm_model_dict), | ||||||
|  |             "ONLINE_LLM_MODEL": json.dumps(ONLINE_LLM_MODEL), | ||||||
|  |             "VLLM_MODEL_DICT": json.dumps(VLLM_MODEL_DICT), | ||||||
|  |             "DOCKER_SERVICE": json.dumps(DOCKER_SERVICE), | ||||||
|  |             "SANDBOX_DO_REMOTE": json.dumps(SANDBOX_DO_REMOTE), | ||||||
|  |             "FSCHAT_MODEL_WORKERS": json.dumps(FSCHAT_MODEL_WORKERS) | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         with open(os.path.join(src_dir, "configs/local_config.json"), "w") as f: | ||||||
|  |             json.dump(update_json, f) | ||||||
|  | 
 | ||||||
|  |         from start import start_main | ||||||
|  |         from stop import stop_main | ||||||
|  |         stop_main() | ||||||
|  |         start_main() | ||||||
							
								
								
									
										
											BIN
										
									
								
								sources/docs_imgs/webui_config.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								sources/docs_imgs/webui_config.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 55 KiB | 
| @ -79,7 +79,7 @@ print(src_dir) | |||||||
| 
 | 
 | ||||||
| # chain的测试 | # chain的测试 | ||||||
| llm_config = LLMConfig( | llm_config = LLMConfig( | ||||||
|     model_name="gpt-3.5-turbo", model_device="cpu",api_key=os.environ["OPENAI_API_KEY"],  |     model_name="gpt-3.5-turbo", api_key=os.environ["OPENAI_API_KEY"],  | ||||||
|     api_base_url=os.environ["API_BASE_URL"], temperature=0.3 |     api_base_url=os.environ["API_BASE_URL"], temperature=0.3 | ||||||
|     ) |     ) | ||||||
| embed_config = EmbedConfig( | embed_config = EmbedConfig( | ||||||
|  | |||||||
| @ -5,7 +5,7 @@ src_dir = os.path.join( | |||||||
| ) | ) | ||||||
| sys.path.append(src_dir) | sys.path.append(src_dir) | ||||||
| 
 | 
 | ||||||
| from configs import llm_model_dict, LLM_MODEL | from configs.model_config import llm_model_dict, LLM_MODEL | ||||||
| import openai | import openai | ||||||
| # os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:7890" | # os.environ["OPENAI_PROXY"] = "socks5h://127.0.0.1:7890" | ||||||
| # os.environ["OPENAI_PROXY"] = "http://127.0.0.1:7890" | # os.environ["OPENAI_PROXY"] = "http://127.0.0.1:7890" | ||||||
| @ -22,30 +22,32 @@ if __name__ == "__main__": | |||||||
|     # chat = ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo") |     # chat = ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo") | ||||||
|     # print(chat.predict("hi!")) |     # print(chat.predict("hi!")) | ||||||
| 
 | 
 | ||||||
|     # print(LLM_MODEL, llm_model_dict[LLM_MODEL]["api_key"], llm_model_dict[LLM_MODEL]["api_base_url"]) |     print(LLM_MODEL, llm_model_dict[LLM_MODEL]["api_key"], llm_model_dict[LLM_MODEL]["api_base_url"]) | ||||||
|     # model = ChatOpenAI( |     from langchain.chat_models import ChatOpenAI | ||||||
|     #     streaming=True, |     model = ChatOpenAI( | ||||||
|     #     verbose=True, |         streaming=True, | ||||||
|     #     openai_api_key=llm_model_dict[LLM_MODEL]["api_key"], |         verbose=True, | ||||||
|     #     openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"], |         openai_api_key="dsdadas", | ||||||
|     #     model_name=LLM_MODEL |         openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"], | ||||||
|     # ) |         model_name=LLM_MODEL | ||||||
|  |     ) | ||||||
|  |     print(model.predict("hi!")) | ||||||
|     # chat_prompt = ChatPromptTemplate.from_messages([("human", "{input}")]) |     # chat_prompt = ChatPromptTemplate.from_messages([("human", "{input}")]) | ||||||
|     # chain = LLMChain(prompt=chat_prompt, llm=model) |     # chain = LLMChain(prompt=chat_prompt, llm=model) | ||||||
|     # content = chain({"input": "hello"}) |     # content = chain({"input": "hello"}) | ||||||
|     # print(content) |     # print(content) | ||||||
| 
 | 
 | ||||||
|     import openai |     # import openai | ||||||
|     # openai.api_key = "EMPTY" # Not support yet |     # # openai.api_key = "EMPTY" # Not support yet | ||||||
|     openai.api_base = "http://127.0.0.1:8888/v1" |     # openai.api_base = "http://127.0.0.1:8888/v1" | ||||||
| 
 | 
 | ||||||
|     model = "example" |     # model = "example" | ||||||
| 
 | 
 | ||||||
|     # create a chat completion |     # # create a chat completion | ||||||
|     completion = openai.ChatCompletion.create( |     # completion = openai.ChatCompletion.create( | ||||||
|     model=model, |     # model=model, | ||||||
|     messages=[{"role": "user", "content": "Hello! What is your name? "}], |     # messages=[{"role": "user", "content": "Hello! What is your name? "}], | ||||||
|     max_tokens=100, |     # max_tokens=100, | ||||||
|     ) |     # ) | ||||||
|     # print the completion |     # # print the completion | ||||||
|     print(completion.choices[0].message.content) |     # print(completion.choices[0].message.content) | ||||||
| @ -86,7 +86,8 @@ pycodebox = PyCodeBox(remote_url="http://localhost:5050", | |||||||
| reuslt = pycodebox.chat("```import os\nos.getcwd()```", do_code_exe=True) | reuslt = pycodebox.chat("```import os\nos.getcwd()```", do_code_exe=True) | ||||||
| print(reuslt) | print(reuslt) | ||||||
| 
 | 
 | ||||||
| reuslt = pycodebox.chat("print('hello world!')", do_code_exe=False) | # reuslt = pycodebox.chat("```print('hello world!')```", do_code_exe=True) | ||||||
|  | reuslt = pycodebox.chat("print('hello world!')", do_code_exe=True) | ||||||
| print(reuslt) | print(reuslt) | ||||||
| 
 | 
 | ||||||
|      |      | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user