mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-30 02:35:29 +08:00
commit
670131c886
@ -193,7 +193,7 @@ $ python startup.py -a
|
||||
[](https://t.me/+RjliQ3jnJ1YyN2E9)
|
||||
|
||||
### 项目交流群
|
||||
<img src="img/qr_code_88.jpg" alt="二维码" width="300" />
|
||||
<img src="img/qr_code_89.jpg" alt="二维码" width="300" />
|
||||
|
||||
🎉 Langchain-Chatchat 项目微信交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
||||
|
||||
|
||||
@ -192,7 +192,7 @@ please refer to the [Wiki](https://github.com/chatchat-space/Langchain-Chatchat/
|
||||
|
||||
### WeChat Group
|
||||
|
||||
<img src="img/qr_code_88.jpg" alt="二维码" width="300" height="300" />
|
||||
<img src="img/qr_code_89.jpg" alt="二维码" width="300" height="300" />
|
||||
|
||||
### WeChat Official Account
|
||||
|
||||
|
||||
@ -185,7 +185,7 @@ $ python startup.py -a
|
||||
|
||||
### WeChat グループ
|
||||
|
||||
<img src="img/qr_code_88.jpg" alt="二维码" width="300" height="300" />
|
||||
<img src="img/qr_code_89.jpg" alt="二维码" width="300" height="300" />
|
||||
|
||||
### WeChat 公式アカウント
|
||||
|
||||
|
||||
@ -150,12 +150,16 @@ MODEL_PATH = {
|
||||
"m3e-small": "moka-ai/m3e-small",
|
||||
"m3e-base": "moka-ai/m3e-base",
|
||||
"m3e-large": "moka-ai/m3e-large",
|
||||
|
||||
"bge-small-zh": "BAAI/bge-small-zh",
|
||||
"bge-base-zh": "BAAI/bge-base-zh",
|
||||
"bge-large-zh": "BAAI/bge-large-zh",
|
||||
"bge-large-zh-noinstruct": "BAAI/bge-large-zh-noinstruct",
|
||||
"bge-base-zh-v1.5": "BAAI/bge-base-zh-v1.5",
|
||||
"bge-large-zh-v1.5": "BAAI/bge-large-zh-v1.5",
|
||||
|
||||
"bge-m3": "BAAI/bge-m3",
|
||||
|
||||
"piccolo-base-zh": "sensenova/piccolo-base-zh",
|
||||
"piccolo-large-zh": "sensenova/piccolo-large-zh",
|
||||
"nlp_gte_sentence-embedding_chinese-large": "damo/nlp_gte_sentence-embedding_chinese-large",
|
||||
@ -181,6 +185,14 @@ MODEL_PATH = {
|
||||
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
|
||||
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
|
||||
|
||||
# Qwen1.5 模型 VLLM可能出现问题
|
||||
"Qwen1.5-0.5B-Chat": "Qwen/Qwen1.5-0.5B-Chat",
|
||||
"Qwen1.5-1.8B-Chat": "Qwen/Qwen1.5-1.8B-Chat",
|
||||
"Qwen1.5-4B-Chat": "Qwen/Qwen1.5-4B-Chat",
|
||||
"Qwen1.5-7B-Chat": "Qwen/Qwen1.5-7B-Chat",
|
||||
"Qwen1.5-14B-Chat": "Qwen/Qwen1.5-14B-Chat",
|
||||
"Qwen1.5-72B-Chat": "Qwen/Qwen1.5-72B-Chat",
|
||||
|
||||
"baichuan-7b-chat": "baichuan-inc/Baichuan-7B-Chat",
|
||||
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
|
||||
"baichuan2-7b-chat": "baichuan-inc/Baichuan2-7B-Chat",
|
||||
|
||||
@ -90,13 +90,12 @@ FSCHAT_MODEL_WORKERS = {
|
||||
# 'disable_log_requests': False
|
||||
|
||||
},
|
||||
"Qwen-1_8B-Chat": {
|
||||
"device": "cpu",
|
||||
},
|
||||
"chatglm3-6b": {
|
||||
"device": "cuda",
|
||||
},
|
||||
|
||||
"Qwen1.5-0.5B-Chat": {
|
||||
"device": "cuda",
|
||||
},
|
||||
# 以下配置可以不用修改,在model_config中设置启动的模型
|
||||
"zhipu-api": {
|
||||
"port": 21001,
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 195 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 318 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 234 KiB |
BIN
img/qr_code_89.jpg
Normal file
BIN
img/qr_code_89.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 231 KiB |
@ -2,7 +2,7 @@ torch==2.1.2
|
||||
torchvision==0.16.2
|
||||
torchaudio==2.1.2
|
||||
xformers==0.0.23.post1
|
||||
transformers==4.37.1
|
||||
transformers==4.37.2
|
||||
sentence_transformers==2.2.2
|
||||
langchain==0.0.354
|
||||
langchain-experimental==0.0.47
|
||||
|
||||
@ -2,7 +2,7 @@ torch~=2.1.2
|
||||
torchvision~=0.16.2
|
||||
torchaudio~=2.1.2
|
||||
xformers>=0.0.23.post1
|
||||
transformers==4.37.1
|
||||
transformers==4.37.2
|
||||
sentence_transformers==2.2.2
|
||||
langchain==0.0.354
|
||||
langchain-experimental==0.0.47
|
||||
|
||||
@ -50,7 +50,7 @@ class MilvusKBService(KBService):
|
||||
|
||||
def _load_milvus(self):
|
||||
self.milvus = Milvus(embedding_function=EmbeddingsFunAdapter(self.embed_model),
|
||||
collection_name=self.kb_name,
|
||||
collection_name=self.kb_name,
|
||||
connection_args=kbs_config.get("milvus"),
|
||||
index_params=kbs_config.get("milvus_kwargs")["index_params"],
|
||||
search_params=kbs_config.get("milvus_kwargs")["search_params"]
|
||||
@ -89,6 +89,14 @@ class MilvusKBService(KBService):
|
||||
if self.milvus.col:
|
||||
self.milvus.col.delete(expr=f'pk in {id_list}')
|
||||
|
||||
# Issue 2846, for windows
|
||||
# if self.milvus.col:
|
||||
# file_path = kb_file.filepath.replace("\\", "\\\\")
|
||||
# file_name = os.path.basename(file_path)
|
||||
# id_list = [item.get("pk") for item in
|
||||
# self.milvus.col.query(expr=f'source == "{file_name}"', output_fields=["pk"])]
|
||||
# self.milvus.col.delete(expr=f'pk in {id_list}')
|
||||
|
||||
def do_clear_vs(self):
|
||||
if self.milvus.col:
|
||||
self.do_drop_kb()
|
||||
|
||||
@ -13,7 +13,6 @@ async def request(appid, api_key, api_secret, Spark_url, domain, question, tempe
|
||||
wsParam = SparkApi.Ws_Param(appid, api_key, api_secret, Spark_url)
|
||||
wsUrl = wsParam.create_url()
|
||||
data = SparkApi.gen_params(appid, domain, question, temperature, max_token)
|
||||
print(data)
|
||||
async with websockets.connect(wsUrl) as ws:
|
||||
await ws.send(json.dumps(data, ensure_ascii=False))
|
||||
finish = False
|
||||
|
||||
@ -1,13 +1,21 @@
|
||||
from contextlib import contextmanager
|
||||
|
||||
import httpx
|
||||
from fastchat.conversation import Conversation
|
||||
from httpx_sse import EventSource
|
||||
|
||||
from server.model_workers.base import *
|
||||
from fastchat import conversation as conv
|
||||
import sys
|
||||
from typing import List, Dict, Iterator, Literal
|
||||
from configs import logger, log_verbose
|
||||
import requests
|
||||
from typing import List, Dict, Iterator, Literal, Any
|
||||
import jwt
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
@contextmanager
|
||||
def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: Any):
|
||||
with client.stream(method, url, **kwargs) as response:
|
||||
yield EventSource(response)
|
||||
|
||||
|
||||
def generate_token(apikey: str, exp_seconds: int):
|
||||
@ -37,7 +45,7 @@ class ChatGLMWorker(ApiModelWorker):
|
||||
model_names: List[str] = ["zhipu-api"],
|
||||
controller_addr: str = None,
|
||||
worker_addr: str = None,
|
||||
version: Literal["chatglm_turbo"] = "chatglm_turbo",
|
||||
version: Literal["glm-4"] = "glm-4",
|
||||
**kwargs,
|
||||
):
|
||||
kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
|
||||
@ -59,25 +67,25 @@ class ChatGLMWorker(ApiModelWorker):
|
||||
"temperature": params.temperature,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
# for chunk in response.iter_lines():
|
||||
# if chunk:
|
||||
# chunk_str = chunk.decode('utf-8')
|
||||
# json_start_pos = chunk_str.find('{"id"')
|
||||
# if json_start_pos != -1:
|
||||
# json_str = chunk_str[json_start_pos:]
|
||||
# json_data = json.loads(json_str)
|
||||
# for choice in json_data.get('choices', []):
|
||||
# delta = choice.get('delta', {})
|
||||
# content = delta.get('content', '')
|
||||
# yield {"error_code": 0, "text": content}
|
||||
ans = response.json()
|
||||
content = ans["choices"][0]["message"]["content"]
|
||||
yield {"error_code": 0, "text": content}
|
||||
with httpx.Client(headers=headers) as client:
|
||||
response = client.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
chunk = response.json()
|
||||
print(chunk)
|
||||
yield {"error_code": 0, "text": chunk["choices"][0]["message"]["content"]}
|
||||
|
||||
# with connect_sse(client, "POST", url, json=data) as event_source:
|
||||
# for sse in event_source.iter_sse():
|
||||
# chunk = json.loads(sse.data)
|
||||
# if len(chunk["choices"]) != 0:
|
||||
# text += chunk["choices"][0]["delta"]["content"]
|
||||
# yield {"error_code": 0, "text": text}
|
||||
|
||||
|
||||
|
||||
def get_embeddings(self, params):
|
||||
# 临时解决方案,不支持embedding
|
||||
print("embedding")
|
||||
print(params)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user