mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-02-04 21:53:14 +08:00
移除 llama-index 依赖;修复 /v1/models 错误
This commit is contained in:
parent
d0846f88cc
commit
77c3fa65c2
@ -19,31 +19,6 @@ rapidocr_onnxruntime~=1.3.8
|
|||||||
requests~=2.31.0
|
requests~=2.31.0
|
||||||
pathlib~=1.0.1
|
pathlib~=1.0.1
|
||||||
pytest~=7.4.3
|
pytest~=7.4.3
|
||||||
llama-index==0.9.35
|
|
||||||
|
|
||||||
# jq==1.6.0
|
|
||||||
# beautifulsoup4==4.12.2
|
|
||||||
# pysrt==1.1.2
|
|
||||||
# dashscope==1.13.6 # qwen
|
|
||||||
# volcengine==1.0.119 # fangzhou
|
|
||||||
# uncomment libs if you want to use corresponding vector store
|
|
||||||
# pymilvus==2.3.6
|
|
||||||
# psycopg2==2.9.9
|
|
||||||
# pgvector>=0.2.4
|
|
||||||
# chromadb==0.4.13
|
|
||||||
|
|
||||||
#flash-attn==2.4.2 # For Orion-14B-Chat and Qwen-14B-Chat
|
|
||||||
#autoawq==0.1.8 # For Int4
|
|
||||||
#rapidocr_paddle[gpu]==1.3.11 # gpu accelleration for ocr of pdf and image files
|
|
||||||
|
|
||||||
arxiv==2.1.0
|
|
||||||
youtube-search==2.1.2
|
|
||||||
duckduckgo-search==3.9.9
|
|
||||||
metaphor-python==0.1.23
|
|
||||||
|
|
||||||
httpx==0.26.0
|
|
||||||
httpx_sse==0.4.0
|
|
||||||
watchdog==3.0.0
|
|
||||||
pyjwt==2.8.0
|
pyjwt==2.8.0
|
||||||
elasticsearch
|
elasticsearch
|
||||||
numexpr>=2.8.8
|
numexpr>=2.8.8
|
||||||
@ -53,7 +28,7 @@ tqdm>=4.66.1
|
|||||||
websockets>=12.0
|
websockets>=12.0
|
||||||
numpy>=1.26.3
|
numpy>=1.26.3
|
||||||
pandas~=2.1.4
|
pandas~=2.1.4
|
||||||
pydantic<2
|
pydantic==1.10.14
|
||||||
httpx[brotli,http2,socks]>=0.25.2
|
httpx[brotli,http2,socks]>=0.25.2
|
||||||
|
|
||||||
# optional document loaders
|
# optional document loaders
|
||||||
|
|||||||
@ -74,7 +74,7 @@ async def list_models() -> Dict:
|
|||||||
try:
|
try:
|
||||||
client = get_OpenAIClient(name, is_async=True)
|
client = get_OpenAIClient(name, is_async=True)
|
||||||
models = await client.models.list()
|
models = await client.models.list()
|
||||||
models = models.dict(exclude=["data", "object"])
|
models = models.dict(exclude={"data":..., "object":...})
|
||||||
for x in models:
|
for x in models:
|
||||||
models[x]["platform_name"] = name
|
models[x]["platform_name"] = name
|
||||||
return models
|
return models
|
||||||
|
|||||||
@ -8,8 +8,7 @@ from typing import Optional, Sequence
|
|||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
from langchain.callbacks.manager import Callbacks
|
from langchain.callbacks.manager import Callbacks
|
||||||
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
|
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
|
||||||
from llama_index.bridge.pydantic import Field, PrivateAttr
|
from pydantic import Field, PrivateAttr
|
||||||
|
|
||||||
|
|
||||||
class LangchainReranker(BaseDocumentCompressor):
|
class LangchainReranker(BaseDocumentCompressor):
|
||||||
"""Document compressor that uses `Cohere Rerank API`."""
|
"""Document compressor that uses `Cohere Rerank API`."""
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user