llm-template

This commit is contained in:
leehk 2025-02-11 16:33:29 +08:00
parent f5eb0d3cf5
commit 91662eba15
18 changed files with 2708 additions and 0 deletions

9
.gitignore vendored
View File

@ -192,3 +192,12 @@ data/*
**/output/*.csv
!**/output/.gitkeep
!**/data/.gitkeep
# ignore macos files
.DS_Store
# ignore llmops
app/llmops/*
# ignore wandb files
**/wandb/

19
app/llm-template/Pipfile Normal file
View File

@ -0,0 +1,19 @@
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
langchain = "*"
google-genai = "*"
chromadb = "*"
SQLAlchemy = "*"
wandb = "*"
langchain-google-genai = "*"
openai = "*"
pydantic = "*"
[dev-packages]
[requires]
python_version = "3.10"

2578
app/llm-template/Pipfile.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
# LLM Project Template
This template provides a starting point for building LLM-powered applications with Langchain, AI Agents, RAG, Summarization, Question-Answering with SQL, and LLMOps.
## Requirements
- Python 3.9+
- Pipenv
## Installation
```bash
pipenv install
```
## Usage
1. Set the `OPENAI_API_KEY`, `WANDB_API_KEY`, `GOOGLE_API_KEY`, and `LANGSMITH_API_KEY` environment variables in `config.py`.
2. Run the `main.py` file:
```bash
pipenv run python main.py
```

View File

@ -0,0 +1,3 @@
# agent1.py
# Add your agent implementation here

View File

@ -0,0 +1,3 @@
# agent2.py
# Add your agent implementation here

View File

@ -0,0 +1,3 @@
# qa_chain.py
# Add your question-answering chain implementation here

View File

@ -0,0 +1,3 @@
# rag_chain.py
# Add your RAG chain implementation here

View File

@ -0,0 +1,3 @@
# sql_chain.py
# Add your question-answering with SQL chain implementation here

View File

@ -0,0 +1,3 @@
# summarization_chain.py
# Add your summarization chain implementation here

View File

@ -0,0 +1,9 @@
# config.py
# Add your configuration variables here
OPENAI_API_KEY = "YOUR_OPENAI_API_KEY"
WANDB_API_KEY = "df1c4099caa8afae3fc83140689c7e799fb0cdad"
GOOGLE_API_KEY = "AIzaSyCIRXC8q5-gKjT5TjiqLxEdKFEmOEzebNQ"
LANGSMITH_API_KEY = "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"
LANGCHAIN_PROJECT = 'llmops-demo'

View File

@ -0,0 +1,3 @@
# deployment.py
# Add your deployment scripts and configurations here

View File

@ -0,0 +1,3 @@
# evaluation.py
# Add your evaluation metrics and scripts here

View File

@ -0,0 +1,3 @@
# monitoring.py
# Add your monitoring and logging implementation here

31
app/llm-template/main.py Normal file
View File

@ -0,0 +1,31 @@
# main.py
import os
import wandb
from config import GOOGLE_API_KEY, WANDB_API_KEY, LANGSMITH_API_KEY, LANGCHAIN_PROJECT
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.callbacks import LangChainTracer
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
# Set LangSmith environment variables
os.environ["LANGCHAIN_TRACING"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_PROJECT"] = LANGCHAIN_PROJECT
os.environ["LANGCHAIN_API_KEY"] = LANGSMITH_API_KEY
# Initialize Weights & Biases
wandb.login(key=WANDB_API_KEY)
run = wandb.init(project=LANGCHAIN_PROJECT, entity="aimingmed")
# Initialize Gemini API
tracer = LangChainTracer()
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-001", google_api_key=GOOGLE_API_KEY, callbacks=[tracer])
# Example usage of Gemini API
prompt_template = PromptTemplate(template="Write a short poem about the sun.", input_variables=[])
chain = LLMChain(llm=llm, prompt=prompt_template)
response = chain.run({})
print(response)
wandb.finish()

View File

@ -0,0 +1,6 @@
langchain
google-generativeai
chromadb
SQLAlchemy
weights&biases
langsmith

View File

@ -0,0 +1,3 @@
# data_loader.py
# Add your data loading functions here

View File

@ -0,0 +1,3 @@
# embedding_utils.py
# Add your embedding utilities here