Skip to content

Commit 04608ac

Browse files
using gpt-4o-mini
1 parent a3f6fef commit 04608ac

8 files changed

+62
-45
lines changed

pages/1_💬_basic_chatbot.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import streamlit as st
33
from streaming import StreamHandler
44

5-
from langchain_openai import ChatOpenAI
65
from langchain.chains import ConversationChain
76

87
st.set_page_config(page_title="Chatbot", page_icon="💬")
@@ -13,11 +12,11 @@
1312
class BasicChatbot:
1413

1514
def __init__(self):
16-
self.openai_model = utils.configure_openai()
15+
utils.sync_st_session()
16+
self.llm = utils.configure_llm()
1717

1818
def setup_chain(self):
19-
llm = ChatOpenAI(model_name=self.openai_model, temperature=0, streaming=True)
20-
chain = ConversationChain(llm=llm, verbose=True)
19+
chain = ConversationChain(llm=self.llm, verbose=True)
2120
return chain
2221

2322
@utils.enable_chat_history

pages/2_⭐_context_aware_chatbot.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import streamlit as st
33
from streaming import StreamHandler
44

5-
from langchain_openai import ChatOpenAI
65
from langchain.chains import ConversationChain
76
from langchain.memory import ConversationBufferMemory
87

@@ -14,13 +13,13 @@
1413
class ContextChatbot:
1514

1615
def __init__(self):
17-
self.openai_model = utils.configure_openai()
16+
utils.sync_st_session()
17+
self.llm = utils.configure_llm()
1818

1919
@st.cache_resource
2020
def setup_chain(_self):
2121
memory = ConversationBufferMemory()
22-
llm = ChatOpenAI(model_name=_self.openai_model, temperature=0, streaming=True)
23-
chain = ConversationChain(llm=llm, memory=memory, verbose=True)
22+
chain = ConversationChain(llm=_self.llm, memory=memory, verbose=True)
2423
return chain
2524

2625
@utils.enable_chat_history

pages/3_🌐_chatbot_with_internet_access.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,12 @@
22
import streamlit as st
33

44
from langchain import hub
5-
from langchain_openai import OpenAI
5+
from langchain_openai import ChatOpenAI
66
from langchain.memory import ConversationBufferMemory
77
from langchain_community.tools import DuckDuckGoSearchRun
88
from langchain_community.callbacks import StreamlitCallbackHandler
9-
from langchain.agents import AgentExecutor, Tool, create_react_agent
9+
from langchain.agents import AgentExecutor, create_react_agent
10+
from langchain_core.tools import Tool
1011

1112
st.set_page_config(page_title="ChatWeb", page_icon="🌐")
1213
st.header('Chatbot with Internet Access')
@@ -16,7 +17,8 @@
1617
class InternetChatbot:
1718

1819
def __init__(self):
19-
self.openai_model = utils.configure_openai()
20+
utils.sync_st_session()
21+
self.llm = utils.configure_llm()
2022

2123
@st.cache_resource(show_spinner='Connecting..')
2224
def setup_agent(_self):
@@ -34,9 +36,8 @@ def setup_agent(_self):
3436
prompt = hub.pull("hwchase17/react-chat")
3537

3638
# Setup LLM and Agent
37-
llm = OpenAI(temperature=0, streaming=True)
3839
memory = ConversationBufferMemory(memory_key="chat_history")
39-
agent = create_react_agent(llm, tools, prompt)
40+
agent = create_react_agent(_self.llm, tools, prompt)
4041
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
4142
return agent_executor, memory
4243

pages/4_📄_chat_with_your_documents.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
from langchain.chains import ConversationalRetrievalChain
99
from langchain_community.document_loaders import PyPDFLoader
1010
from langchain_community.embeddings import HuggingFaceEmbeddings
11-
from langchain.text_splitter import RecursiveCharacterTextSplitter
1211
from langchain_community.vectorstores import DocArrayInMemorySearch
12+
from langchain_text_splitters import RecursiveCharacterTextSplitter
1313

1414
st.set_page_config(page_title="ChatPDF", page_icon="📄")
1515
st.header('Chat with your documents (Basic RAG)')
@@ -19,7 +19,8 @@
1919
class CustomDataChatbot:
2020

2121
def __init__(self):
22-
self.openai_model = utils.configure_openai()
22+
utils.sync_st_session()
23+
self.llm = utils.configure_llm()
2324

2425
def save_file(self, file):
2526
folder = 'tmp'
@@ -65,9 +66,8 @@ def setup_qa_chain(self, uploaded_files):
6566
)
6667

6768
# Setup LLM and QA chain
68-
llm = ChatOpenAI(model_name=self.openai_model, temperature=0, streaming=True)
6969
qa_chain = ConversationalRetrievalChain.from_llm(
70-
llm=llm,
70+
llm=self.llm,
7171
retriever=retriever,
7272
memory=memory,
7373
return_source_documents=True,

pages/5_🛢_chat_with_sql_db.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@
1717
class SqlChatbot:
1818

1919
def __init__(self):
20-
self.openai_model = utils.configure_openai()
20+
utils.sync_st_session()
21+
self.llm = utils.configure_llm()
2122

2223
def setup_db(_self, db_uri):
2324
if db_uri == 'USE_SAMPLE_DB':
@@ -33,10 +34,8 @@ def setup_db(_self, db_uri):
3334
return db
3435

3536
def setup_sql_agent(_self, db):
36-
llm = ChatOpenAI(model_name=_self.openai_model, temperature=0, streaming=True)
37-
3837
agent = create_sql_agent(
39-
llm=llm,
38+
llm=_self.llm,
4039
db=db,
4140
top_k=10,
4241
verbose=True,

requirements.txt

+11-10
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
1-
langchain==0.1.17
2-
langchain_community==0.0.36
3-
langchain_openai==0.1.4
4-
langchainhub==0.1.15
5-
streamlit==1.33.0
6-
openai==1.25.0
7-
duckduckgo-search==5.3.0
8-
pypdf==4.2.0
9-
sentence-transformers==2.7.0
1+
langchain==0.2.9
2+
langchain_community==0.2.7
3+
langchain_core==0.2.21
4+
langchain_openai==0.1.17
5+
langchain_text_splitters==0.2.2
6+
openai==1.35.15
7+
SQLAlchemy==2.0.31
8+
streamlit==1.36.0
9+
duckduckgo-search==6.2.1
10+
pypdf==4.3.0
11+
sentence-transformers==3.0.1
1012
docarray==0.40.0
11-
SQLAlchemy==2.0.29

streaming.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from langchain.callbacks.base import BaseCallbackHandler
1+
from langchain_core.callbacks import BaseCallbackHandler
22

33
class StreamHandler(BaseCallbackHandler):
44

utils.py

+31-13
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import os
22
import openai
3-
import random
43
import streamlit as st
54
from datetime import datetime
5+
from langchain_openai import ChatOpenAI
6+
from langchain_community.chat_models import ChatOllama
67

78
#decorator
89
def enable_chat_history(func):
@@ -40,39 +41,56 @@ def display_msg(msg, author):
4041
st.session_state.messages.append({"role": author, "content": msg})
4142
st.chat_message(author).write(msg)
4243

43-
def configure_openai():
44+
def choose_custom_openai_key():
4445
openai_api_key = st.sidebar.text_input(
4546
label="OpenAI API Key",
4647
type="password",
47-
value=st.session_state['OPENAI_API_KEY'] if 'OPENAI_API_KEY' in st.session_state else '',
48-
placeholder="sk-..."
48+
placeholder="sk-...",
49+
key="SELECTED_OPENAI_API_KEY"
4950
)
50-
if openai_api_key:
51-
st.session_state['OPENAI_API_KEY'] = openai_api_key
52-
os.environ['OPENAI_API_KEY'] = openai_api_key
53-
else:
51+
if not openai_api_key:
5452
st.error("Please add your OpenAI API key to continue.")
5553
st.info("Obtain your key from this link: https://platform.openai.com/account/api-keys")
5654
st.stop()
5755

58-
model = "gpt-3.5-turbo"
56+
model = "gpt-4o-mini"
5957
try:
60-
client = openai.OpenAI()
58+
client = openai.OpenAI(api_key=openai_api_key)
6159
available_models = [{"id": i.id, "created":datetime.fromtimestamp(i.created)} for i in client.models.list() if str(i.id).startswith("gpt")]
6260
available_models = sorted(available_models, key=lambda x: x["created"])
6361
available_models = [i["id"] for i in available_models]
6462

6563
model = st.sidebar.selectbox(
6664
label="Model",
6765
options=available_models,
68-
index=available_models.index(st.session_state['OPENAI_MODEL']) if 'OPENAI_MODEL' in st.session_state else 0
66+
key="SELECTED_OPENAI_MODEL"
6967
)
70-
st.session_state['OPENAI_MODEL'] = model
7168
except openai.AuthenticationError as e:
7269
st.error(e.body["message"])
7370
st.stop()
7471
except Exception as e:
7572
print(e)
7673
st.error("Something went wrong. Please try again later.")
7774
st.stop()
78-
return model
75+
return model, openai_api_key
76+
77+
def configure_llm():
78+
available_llms = ["gpt-4o-mini","llama3:8b","use your openai api key"]
79+
llm_opt = st.sidebar.radio(
80+
label="LLM",
81+
options=available_llms,
82+
key="SELECTED_LLM"
83+
)
84+
85+
if llm_opt == "llama3:8b":
86+
llm = ChatOllama(model="llama3", base_url=st.secrets["OLLAMA_ENDPOINT"])
87+
elif llm_opt == "gpt-4o-mini":
88+
llm = ChatOpenAI(model_name=llm_opt, temperature=0, streaming=True, api_key=st.secrets["OPENAI_API_KEY"])
89+
else:
90+
model, openai_api_key = choose_custom_openai_key()
91+
llm = ChatOpenAI(model_name=model, temperature=0, streaming=True, api_key=openai_api_key)
92+
return llm
93+
94+
def sync_st_session():
95+
for k, v in st.session_state.items():
96+
st.session_state[k] = v

0 commit comments

Comments
 (0)