This commit is contained in:
leehk 2025-03-06 17:43:22 +08:00
parent 947f722de9
commit 84ebb64394
8 changed files with 25 additions and 195 deletions

View File

@ -3,8 +3,8 @@ version: "3.9"
services:
streamlit:
build: ./streamlit
platform: linux/amd64
ports:
- "8501:8501"
volumes:
- ./llmops/src/rag_cot/chroma_db:/app/llmops/src/rag_cot/chroma_db

View File

@ -2,9 +2,9 @@ FROM python:3.11-slim
WORKDIR /app/streamlit
COPY Pipfile ./
COPY requirements.txt ./
RUN pip install pipenv && pipenv install --system --deploy
RUN pip install --no-cache-dir -r requirements.txt
COPY Chatbot.py .
COPY .env .

View File

@ -25,22 +25,27 @@ def create_chat_completion(response: str, role: str = "assistant") -> ChatComple
)
# @patch("openai.resources.chat.Completions.create")
# def test_Chatbot(openai_create):
# at = AppTest.from_file("Chatbot.py").run()
# assert not at.exception
# at.chat_input[0].set_value("Do you know any jokes?").run()
# assert at.info[0].value == "Please add your OpenAI API key to continue."
# JOKE = "Why did the chicken cross the road? To get to the other side."
# openai_create.return_value = create_chat_completion(JOKE)
# at.text_input(key="chatbot_api_key").set_value("sk-...")
# at.chat_input[0].set_value("Do you know any jokes?").run()
# print(at)
# assert at.chat_message[1].markdown[0].value == "Do you know any jokes?"
# assert at.chat_message[2].markdown[0].value == JOKE
# assert at.chat_message[2].avatar == "assistant"
# assert not at.exception
@patch("langchain_deepseek.ChatDeepSeek.__call__")
@patch("langchain_google_genai.ChatGoogleGenerativeAI.invoke")
@patch("langchain_community.llms.moonshot.Moonshot.__call__")
def test_Chatbot(moonshot_llm, gemini_llm, deepseek_llm):
at = AppTest.from_file("Chatbot.py").run()
assert not at.exception
QUERY = "What is the best treatment for hypertension?"
RESPONSE = "The best treatment for hypertension is..."
deepseek_llm.return_value.content = RESPONSE
gemini_llm.return_value.content = RESPONSE
moonshot_llm.return_value = RESPONSE
at.chat_input[0].set_value(QUERY).run()
assert any(mock.called for mock in [deepseek_llm, gemini_llm, moonshot_llm])
assert at.chat_message[1].markdown[0].value == QUERY
assert at.chat_message[2].markdown[0].value == RESPONSE
assert at.chat_message[2].avatar == "assistant"
assert not at.exception
@patch("langchain.llms.OpenAI.__call__")

View File

@ -1,33 +0,0 @@
import streamlit as st
import anthropic
with st.sidebar:
anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password")
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("📝 File Q&A with Anthropic")
uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
question = st.text_input(
"Ask something about the article",
placeholder="Can you give me a short summary?",
disabled=not uploaded_file,
)
if uploaded_file and question and not anthropic_api_key:
st.info("Please add your Anthropic API key to continue.")
if uploaded_file and question and anthropic_api_key:
article = uploaded_file.read().decode()
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{article}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""
client = anthropic.Client(api_key=anthropic_api_key)
response = client.completions.create(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
model="claude-v1", # "claude-2" for Claude 2 model
max_tokens_to_sample=100,
)
st.write("### Answer")
st.write(response.completion)

View File

@ -1,48 +0,0 @@
import streamlit as st
from langchain.agents import initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.tools import DuckDuckGoSearchRun
with st.sidebar:
openai_api_key = st.text_input(
"OpenAI API Key", key="langchain_search_api_key_openai", type="password"
)
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🔎 LangChain - Chat with search")
"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True)
search = DuckDuckGoSearchRun(name="Search")
search_agent = initialize_agent(
[search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response)

View File

@ -1,29 +0,0 @@
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
st.title("🦜🔗 Langchain - Blog Outline Generator App")
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
def blog_outline(topic):
# Instantiate LLM model
llm = OpenAI(model_name="text-davinci-003", openai_api_key=openai_api_key)
# Prompt
template = "As an experienced data scientist and technical writer, generate an outline for a blog about {topic}."
prompt = PromptTemplate(input_variables=["topic"], template=template)
prompt_query = prompt.format(topic=topic)
# Run LLM model
response = llm(prompt_query)
# Print results
return st.info(response)
with st.form("myform"):
topic_text = st.text_input("Enter prompt:", "")
submitted = st.form_submit_button("Submit")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
elif submitted:
blog_outline(topic_text)

View File

@ -1,65 +0,0 @@
from openai import OpenAI
import streamlit as st
from streamlit_feedback import streamlit_feedback
import trubrics
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="feedback_api_key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/5_Chat_with_user_feedback.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("📝 Chat with feedback (Trubrics)")
"""
In this example, we're using [streamlit-feedback](https://github.com/trubrics/streamlit-feedback) and Trubrics to collect and store feedback
from the user about the LLM responses.
"""
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "How can I help you? Leave feedback to help me improve!"}
]
if "response" not in st.session_state:
st.session_state["response"] = None
messages = st.session_state.messages
for msg in messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Tell me a joke about sharks"):
messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
st.session_state["response"] = response.choices[0].message.content
with st.chat_message("assistant"):
messages.append({"role": "assistant", "content": st.session_state["response"]})
st.write(st.session_state["response"])
if st.session_state["response"]:
feedback = streamlit_feedback(
feedback_type="thumbs",
optional_text_label="[Optional] Please provide an explanation",
key=f"feedback_{len(messages)}",
)
# This app is logging feedback to Trubrics backend, but you can send it anywhere.
# The return value of streamlit_feedback() is just a dict.
# Configure your own account at https://trubrics.streamlit.app/
if feedback and "TRUBRICS_EMAIL" in st.secrets:
config = trubrics.init(
email=st.secrets.TRUBRICS_EMAIL,
password=st.secrets.TRUBRICS_PASSWORD,
)
collection = trubrics.collect(
component_name="default",
model="gpt",
response=feedback,
metadata={"chat": messages},
)
trubrics.save(config, collection)
st.toast("Feedback recorded!", icon="📝")

View File

@ -11,4 +11,4 @@ python-decouple
langchain_google_genai
langchain-deepseek
sentence_transformers
mlflow
watchdog