mirror of
https://github.com/aimingmed/aimingmed-ai.git
synced 2026-02-06 23:35:28 +08:00
update
This commit is contained in:
parent
1e242de51d
commit
f1ccaffafe
@ -3,6 +3,7 @@ version: "3.9"
|
|||||||
services:
|
services:
|
||||||
streamlit:
|
streamlit:
|
||||||
build: ./streamlit
|
build: ./streamlit
|
||||||
|
platform: linux/amd64
|
||||||
ports:
|
ports:
|
||||||
- "8501:8501"
|
- "8501:8501"
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -2,12 +2,6 @@ FROM python:3.11-slim
|
|||||||
|
|
||||||
WORKDIR /app/streamlit
|
WORKDIR /app/streamlit
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
build-essential \
|
|
||||||
curl \
|
|
||||||
software-properties-common \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY requirements.txt ./
|
COPY requirements.txt ./
|
||||||
|
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
@ -15,8 +9,10 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|||||||
COPY Chatbot.py .
|
COPY Chatbot.py .
|
||||||
COPY .env .
|
COPY .env .
|
||||||
|
|
||||||
|
# Run python to initialize download of SentenceTransformer model
|
||||||
|
COPY initialize_sentence_transformer.py .
|
||||||
|
RUN python initialize_sentence_transformer.py
|
||||||
|
|
||||||
EXPOSE 8501
|
EXPOSE 8501
|
||||||
|
|
||||||
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
|
ENTRYPOINT ["streamlit", "run", "Chatbot.py"]
|
||||||
|
|
||||||
ENTRYPOINT ["streamlit", "run", "Chatbot.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
|
||||||
@ -1,52 +1,7 @@
|
|||||||
import datetime
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
from streamlit.testing.v1 import AppTest
|
from streamlit.testing.v1 import AppTest
|
||||||
from openai.types.chat import ChatCompletionMessage
|
|
||||||
from openai.types.chat.chat_completion import ChatCompletion, Choice
|
|
||||||
|
|
||||||
|
|
||||||
# See https://github.com/openai/openai-python/issues/715#issuecomment-1809203346
|
|
||||||
def create_chat_completion(response: str, role: str = "assistant") -> ChatCompletion:
|
|
||||||
return ChatCompletion(
|
|
||||||
id="foo",
|
|
||||||
model="gpt-3.5-turbo",
|
|
||||||
object="chat.completion",
|
|
||||||
choices=[
|
|
||||||
Choice(
|
|
||||||
finish_reason="stop",
|
|
||||||
index=0,
|
|
||||||
message=ChatCompletionMessage(
|
|
||||||
content=response,
|
|
||||||
role=role,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
],
|
|
||||||
created=int(datetime.datetime.now().timestamp()),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@patch("langchain_deepseek.ChatDeepSeek.invoke")
|
|
||||||
@patch("langchain_google_genai.ChatGoogleGenerativeAI.invoke")
|
|
||||||
@patch("langchain_community.llms.moonshot.Moonshot.invoke")
|
|
||||||
def test_Chatbot(moonshot_llm, gemini_llm, deepseek_llm):
|
|
||||||
at = AppTest.from_file("Chatbot.py").run()
|
|
||||||
assert not at.exception
|
|
||||||
|
|
||||||
QUERY = "What is the best treatment for hypertension?"
|
|
||||||
RESPONSE = "The best treatment for hypertension is..."
|
|
||||||
|
|
||||||
deepseek_llm.return_value.content = RESPONSE
|
|
||||||
gemini_llm.return_value.content = RESPONSE
|
|
||||||
moonshot_llm.return_value = RESPONSE
|
|
||||||
|
|
||||||
at.chat_input[0].set_value(QUERY).run()
|
|
||||||
|
|
||||||
assert any(mock.called for mock in [deepseek_llm, gemini_llm, moonshot_llm])
|
|
||||||
assert at.chat_message[1].markdown[0].value == QUERY
|
|
||||||
assert at.chat_message[2].markdown[0].value == RESPONSE
|
|
||||||
assert at.chat_message[2].avatar == "assistant"
|
|
||||||
assert not at.exception
|
|
||||||
|
|
||||||
|
|
||||||
@patch("langchain.llms.OpenAI.__call__")
|
@patch("langchain.llms.OpenAI.__call__")
|
||||||
def test_Langchain_Quickstart(langchain_llm):
|
def test_Langchain_Quickstart(langchain_llm):
|
||||||
|
|||||||
7
app/streamlit/initialize_sentence_transformer.py
Normal file
7
app/streamlit/initialize_sentence_transformer.py
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
from decouple import config
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
|
||||||
|
EMBEDDING_MODEL = config("EMBEDDING_MODEL", cast=str, default="paraphrase-multilingual-mpnet-base-v2")
|
||||||
|
|
||||||
|
# Initialize embedding model
|
||||||
|
model = SentenceTransformer(EMBEDDING_MODEL)
|
||||||
Loading…
x
Reference in New Issue
Block a user