mirror of
https://github.com/RYDE-WORK/Langchain-Chatchat.git
synced 2026-01-26 08:43:23 +08:00
77 lines
1.6 KiB
Plaintext
77 lines
1.6 KiB
Plaintext
# API requirements
|
|
|
|
# Torch requiremnts, install the cuda version manually from https://pytorch.org/
|
|
torch>=2.1.2
|
|
torchvision>=0.16.2
|
|
torchaudio>=2.1.2
|
|
|
|
# Langchain 0.1.x requirements
|
|
langchain>=0.1.0
|
|
langchain_openai>=0.0.2
|
|
langchain-community>=0.0.11
|
|
langchainhub>=0.1.14
|
|
|
|
pydantic==1.10.13
|
|
fschat==0.2.35
|
|
openai==1.9.0
|
|
fastapi==0.109.0
|
|
sse_starlette==1.8.2
|
|
nltk==3.8.1
|
|
uvicorn>=0.27.0.post1
|
|
starlette==0.35.0
|
|
unstructured[all-docs] # ==0.11.8
|
|
python-magic-bin; sys_platform == 'win32'
|
|
SQLAlchemy==2.0.25
|
|
faiss-cpu==1.7.4
|
|
accelerate==0.24.1
|
|
spacy==3.7.2
|
|
PyMuPDF==1.23.16
|
|
rapidocr_onnxruntime==1.3.8
|
|
requests==2.31.0
|
|
pathlib==1.0.1
|
|
pytest==7.4.3
|
|
numexpr==2.8.6
|
|
strsimpy==0.2.1
|
|
markdownify==0.11.6
|
|
tiktoken==0.5.2
|
|
tqdm==4.66.1
|
|
websockets==12.0
|
|
numpy==1.24.4
|
|
pandas==2.0.3
|
|
einops==0.7.0
|
|
transformers_stream_generator==0.0.4
|
|
vllm==0.2.7; sys_platform == "linux"
|
|
llama-index==0.9.35
|
|
|
|
# jq==1.6.0
|
|
# beautifulsoup4==4.12.2
|
|
# pysrt==1.1.2
|
|
# dashscope==1.13.6 # qwen
|
|
# volcengine==1.0.119 # fangzhou
|
|
# uncomment libs if you want to use corresponding vector store
|
|
# pymilvus==2.3.6
|
|
# psycopg2==2.9.9
|
|
# pgvector>=0.2.4
|
|
# chromadb==0.4.13
|
|
|
|
#flash-attn==2.4.2 # For Orion-14B-Chat and Qwen-14B-Chat
|
|
#autoawq==0.1.8 # For Int4
|
|
#rapidocr_paddle[gpu]==1.3.11 # gpu accelleration for ocr of pdf and image files
|
|
|
|
arxiv==2.1.0
|
|
youtube-search==2.1.2
|
|
duckduckgo-search==3.9.9
|
|
metaphor-python==0.1.23
|
|
|
|
streamlit==1.30.0
|
|
streamlit-option-menu==0.3.12
|
|
streamlit-antd-components==0.3.1
|
|
streamlit-chatbox==1.1.11
|
|
streamlit-modal==0.1.0
|
|
streamlit-aggrid==0.3.4.post3
|
|
|
|
httpx==0.26.0
|
|
httpx_sse==0.4.0
|
|
watchdog==3.0.0
|
|
pyjwt==2.8.0
|