diff --git a/app/docker-compose.yml b/app/docker-compose.yml index fb7640a..76a4bbc 100644 --- a/app/docker-compose.yml +++ b/app/docker-compose.yml @@ -7,4 +7,4 @@ services: ports: - "8501:8501" volumes: - - ./llmops/src/rag_cot/chroma_db:/app/llmops/src/rag_cot/chroma_db + - ./llmops/src/rag_cot_evaluation/chroma_db:/app/llmops/src/rag_cot_evaluation/chroma_db diff --git a/app/llmops/components/test_rag_cot/run.py b/app/llmops/components/test_rag_cot/run.py index b18e6aa..6e404a3 100644 --- a/app/llmops/components/test_rag_cot/run.py +++ b/app/llmops/components/test_rag_cot/run.py @@ -11,6 +11,7 @@ from langchain_deepseek import ChatDeepSeek from langchain_community.llms.moonshot import Moonshot import sys + logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s") logger = logging.getLogger() diff --git a/app/llmops/main.py b/app/llmops/main.py index 670a7fa..4bd19ce 100644 --- a/app/llmops/main.py +++ b/app/llmops/main.py @@ -8,7 +8,8 @@ _steps = [ "get_documents", "etl_chromadb_pdf", "etl_chromadb_scanned_pdf", # the performance for scanned pdf may not be good - "rag_cot", + "rag_cot_evaluation", + "test_rag_cot" ] @@ -100,7 +101,7 @@ def go(config: DictConfig): "embedding_model": config["etl"]["embedding_model"] }, ) - if "rag_cot" in active_steps: + if "rag_cot_evaluation" in active_steps: if config["prompt_engineering"]["run_id_chromadb"] == "None": # Look for run_id that has artifact logged as documents @@ -120,7 +121,7 @@ def go(config: DictConfig): run_id = config["prompt_engineering"]["run_id_chromadb"] _ = mlflow.run( - os.path.join(hydra.utils.get_original_cwd(), "src", "rag_cot"), + os.path.join(hydra.utils.get_original_cwd(), "src", "rag_cot_evaluation"), "main", parameters={ "query": config["prompt_engineering"]["query"], @@ -138,7 +139,7 @@ def go(config: DictConfig): "main", parameters={ "query": config["prompt_engineering"]["query"], - "input_chromadb_local": os.path.join(hydra.utils.get_original_cwd(), "src", "rag_cot", "chroma_db"), + "input_chromadb_local": os.path.join(hydra.utils.get_original_cwd(), "src", "rag_cot_evaluation", "chroma_db"), "embedding_model": config["etl"]["embedding_model"], "chat_model_provider": config["prompt_engineering"]["chat_model_provider"] }, diff --git a/app/llmops/src/rag_cot/MLproject b/app/llmops/src/rag_cot_evaluation/MLproject similarity index 100% rename from app/llmops/src/rag_cot/MLproject rename to app/llmops/src/rag_cot_evaluation/MLproject diff --git a/app/llmops/src/rag_cot/python_env.yml b/app/llmops/src/rag_cot_evaluation/python_env.yml similarity index 100% rename from app/llmops/src/rag_cot/python_env.yml rename to app/llmops/src/rag_cot_evaluation/python_env.yml diff --git a/app/llmops/src/rag_cot/run.py b/app/llmops/src/rag_cot_evaluation/run.py similarity index 100% rename from app/llmops/src/rag_cot/run.py rename to app/llmops/src/rag_cot_evaluation/run.py diff --git a/app/streamlit/Chatbot.py b/app/streamlit/Chatbot.py index 16266d4..ce971e5 100644 --- a/app/streamlit/Chatbot.py +++ b/app/streamlit/Chatbot.py @@ -8,14 +8,21 @@ from langchain_google_genai import ChatGoogleGenerativeAI from langchain_deepseek import ChatDeepSeek from langchain_community.llms.moonshot import Moonshot +import torch +torch.classes.__path__ = [os.path.join(torch.__path__[0], torch.classes.__file__)] + +# # # or simply: +# torch.classes.__path__ = [] + + os.environ["TOKENIZERS_PARALLELISM"] = "false" -GEMINI_API_KEY = config("GOOGLE_API_KEY", cast=str) -DEEKSEEK_API_KEY = config("DEEKSEEK_API_KEY", cast=str) -MOONSHOT_API_KEY = config("MOONSHOT_API_KEY", cast=str) -CHAT_MODEL_PROVIDER = config("CHAT_MODEL_PROVIDER", cast=str) -INPUT_CHROMADB_LOCAL = config("INPUT_CHROMADB_LOCAL", cast=str) -EMBEDDING_MODEL = config("EMBEDDING_MODEL", cast=str) -COLLECTION_NAME = config("COLLECTION_NAME", cast=str) +GEMINI_API_KEY = config("GOOGLE_API_KEY", cast=str, default="123456") +DEEKSEEK_API_KEY = config("DEEKSEEK_API_KEY", cast=str, default="123456") +MOONSHOT_API_KEY = config("MOONSHOT_API_KEY", cast=str, default="123456") +CHAT_MODEL_PROVIDER = config("CHAT_MODEL_PROVIDER", cast=str, default="gemini") +INPUT_CHROMADB_LOCAL = config("INPUT_CHROMADB_LOCAL", cast=str, default="../llmops/src/rag_cot_evaluation/chroma_db") +EMBEDDING_MODEL = config("EMBEDDING_MODEL", cast=str, default="paraphrase-multilingual-mpnet-base-v2") +COLLECTION_NAME = config("COLLECTION_NAME", cast=str, default="rag_experiment") st.title("💬 RAG AI for Medical Guideline") st.caption(f"🚀 A RAG AI for Medical Guideline powered by {CHAT_MODEL_PROVIDER}") @@ -24,12 +31,15 @@ if "messages" not in st.session_state: for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) +print('i am here1') # Load data from ChromaDB chroma_client = chromadb.PersistentClient(path=INPUT_CHROMADB_LOCAL) collection = chroma_client.get_collection(name=COLLECTION_NAME) +print('i am here2') # Initialize embedding model model = SentenceTransformer(EMBEDDING_MODEL) +print('i am here3') if CHAT_MODEL_PROVIDER == "deepseek": # Initialize DeepSeek model @@ -78,6 +88,7 @@ Provide the answer with language that is similar to the question asked. """ answer_prompt = PromptTemplate(template=answer_template, input_variables=["cot", "question"]) answer_chain = answer_prompt | llm +print('i am here4') if prompt := st.chat_input(): diff --git a/app/streamlit/Dockerfile b/app/streamlit/Dockerfile index b335644..1c55f0c 100644 --- a/app/streamlit/Dockerfile +++ b/app/streamlit/Dockerfile @@ -9,6 +9,10 @@ RUN pip install --no-cache-dir -r requirements.txt COPY Chatbot.py . COPY .env . +# Run python to initialize download of SentenceTransformer model +COPY initialize_sentence_transformer.py . +RUN python initialize_sentence_transformer.py + EXPOSE 8501 ENTRYPOINT ["streamlit", "run", "Chatbot.py"] \ No newline at end of file diff --git a/app/streamlit/app_test.py b/app/streamlit/app_test.py index dce15fd..f187126 100644 --- a/app/streamlit/app_test.py +++ b/app/streamlit/app_test.py @@ -1,52 +1,7 @@ -import datetime from unittest.mock import patch from streamlit.testing.v1 import AppTest -from openai.types.chat import ChatCompletionMessage -from openai.types.chat.chat_completion import ChatCompletion, Choice -# See https://github.com/openai/openai-python/issues/715#issuecomment-1809203346 -def create_chat_completion(response: str, role: str = "assistant") -> ChatCompletion: - return ChatCompletion( - id="foo", - model="gpt-3.5-turbo", - object="chat.completion", - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - content=response, - role=role, - ), - ) - ], - created=int(datetime.datetime.now().timestamp()), - ) - - -# @patch("langchain_deepseek.ChatDeepSeek.__call__") -# @patch("langchain_google_genai.ChatGoogleGenerativeAI.invoke") -# @patch("langchain_community.llms.moonshot.Moonshot.__call__") -# def test_Chatbot(moonshot_llm, gemini_llm, deepseek_llm): -# at = AppTest.from_file("Chatbot.py").run() -# assert not at.exception - -# QUERY = "What is the best treatment for hypertension?" -# RESPONSE = "The best treatment for hypertension is..." - -# deepseek_llm.return_value.content = RESPONSE -# gemini_llm.return_value.content = RESPONSE -# moonshot_llm.return_value = RESPONSE - -# at.chat_input[0].set_value(QUERY).run() - -# assert any(mock.called for mock in [deepseek_llm, gemini_llm, moonshot_llm]) -# assert at.chat_message[1].markdown[0].value == QUERY -# assert at.chat_message[2].markdown[0].value == RESPONSE -# assert at.chat_message[2].avatar == "assistant" -# assert not at.exception - @patch("langchain.llms.OpenAI.__call__") def test_Langchain_Quickstart(langchain_llm): @@ -59,3 +14,4 @@ def test_Langchain_Quickstart(langchain_llm): at.button[0].set_value(True).run() print(at) assert at.info[0].value == RESPONSE + diff --git a/app/streamlit/initialize_sentence_transformer.py b/app/streamlit/initialize_sentence_transformer.py new file mode 100644 index 0000000..3026701 --- /dev/null +++ b/app/streamlit/initialize_sentence_transformer.py @@ -0,0 +1,7 @@ +from decouple import config +from sentence_transformers import SentenceTransformer + +EMBEDDING_MODEL = config("EMBEDDING_MODEL", cast=str, default="paraphrase-multilingual-mpnet-base-v2") + +# Initialize embedding model +model = SentenceTransformer(EMBEDDING_MODEL) \ No newline at end of file