Commit 06a33cc8 by m0hammadjaan

Root

parents
{
"[python]": {
"editor.defaultFormatter": "ms-python.autopep8"
},
"python.formatting.provider": "none"
}
import streamlit as st
st.set_page_config(
page_title="Langchain Chatbot",
page_icon='💬',
layout='wide'
)
st.header("Chatbot Implementations with OpenAI & Langchain")
st.write("""
Langchain is a powerful framework designed to streamline the development of applications using Language Models (LLMs). It provides a comprehensive integration of various components, simplifying the process of assembling them to create robust applications.
Leveraging the power of Langchain, the creation of chatbots becomes effortless. Here are a few examples of chatbot implementations catering to different use cases:
- **Basic Chatbot**: Engage in interactive conversations with the LLM.
- **Chatbot with Web Browser Access**: An internet-enabled chatbot capable of answering user queries about recent events.
- **Chat with your Documents**: Empower the chatbot with the ability to access custom documents, enabling it to provide answers to user queries based on the referenced information.
- **Chat with Youtube Videos**: Engage in interactive conversations with Youtube videos.
To explore sample usage of each chatbot, please navigate to the corresponding chatbot section.
""")
import utils
import streamlit as st
from streaming import StreamHandler
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
st.set_page_config(page_title="Chatbot", page_icon="💬")
st.header('Basic Chatbot')
st.write('Allows users to interact with the OpenAI LLMs')
class Basic:
def __init__(self):
utils.configure_openai_api_key()
self.openai_model = "gpt-3.5-turbo"
def setup_chain(self):
llm = OpenAI(model_name=self.openai_model,
temperature=0, streaming=True)
chain = ConversationChain(llm=llm, verbose=True)
return chain
@utils.enable_chat_history
def main(self):
chain = self.setup_chain()
user_query = st.chat_input(placeholder="Ask me anything!")
if user_query:
utils.display_msg(user_query, 'user')
with st.chat_message("assistant"):
try:
st_cb = StreamHandler(st.empty())
response = chain.run(user_query, callbacks=[st_cb])
st.session_state.messages.append(
{"role": "assistant", "content": response})
except Exception as e:
print(e)
if __name__ == "__main__":
obj = Basic()
obj.main()
import utils
import streamlit as st
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.tools import DuckDuckGoSearchRun
from langchain.agents import initialize_agent, Tool
from langchain.callbacks import StreamlitCallbackHandler
from langchain.utilities import ArxivAPIWrapper
st.set_page_config(page_title="ChatWeb", page_icon="🌐")
st.header('Chatbot with Web Browser Access')
st.write('Equipped with internet agent, enables users to ask questions about recent events')
class ChatbotTools:
def __init__(self):
utils.configure_openai_api_key()
self.openai_model = "gpt-3.5-turbo"
def setup_agent(self):
# Define tool
ddg_search = DuckDuckGoSearchRun()
arxiv = ArxivAPIWrapper()
tools = [
Tool(
name="DuckDuckGoSearch",
func=ddg_search.run,
description="Useful for when you need to answer questions about current events. You should ask targeted questions",
),
Tool(
name="Arxiv",
func=arxiv.run,
description="Useful for when you need to answer questions about research papers, scientific articles, and preprints etc",
)
]
# Setup LLM and Agent
llm = ChatOpenAI(model_name=self.openai_model, streaming=True)
agent = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose=True
)
return agent
@utils.enable_chat_history
def main(self):
agent = self.setup_agent()
user_query = st.chat_input(placeholder="Ask me anything!")
if user_query:
utils.display_msg(user_query, 'user')
with st.chat_message("assistant"):
try:
st_cb = StreamlitCallbackHandler(st.container())
response = agent.run(user_query, callbacks=[st_cb])
st.session_state.messages.append(
{"role": "assistant", "content": response})
st.write(response)
except Exception as e:
print(e)
if __name__ == "__main__":
obj = ChatbotTools()
obj.main()
import os
import utils
import streamlit as st
from streaming import StreamHandler
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
st.set_page_config(page_title="ChatPDF", page_icon="📄")
st.header('Chat with your Documents')
st.write('Has access to custom documents and can respond to user queries by referring to the content within those documents')
class CustomDataChatbot:
def __init__(self):
utils.configure_openai_api_key()
self.openai_model = "gpt-3.5-turbo"
def save_file(self, file):
folder = 'tmp'
if not os.path.exists(folder):
os.makedirs(folder)
file_path = f'./{folder}/{file.name}'
with open(file_path, 'wb') as f:
f.write(file.getvalue())
return file_path
@st.spinner('Analyzing documents..')
def setup_qa_chain(self, uploaded_files):
# Load documents
docs = []
for file in uploaded_files:
file_path = self.save_file(file)
loader = PyPDFLoader(file_path)
docs.extend(loader.load())
# Split documents
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1500,
chunk_overlap=200
)
splits = text_splitter.split_documents(docs)
# Create embeddings and store in vectordb
embeddings = OpenAIEmbeddings()
vectordb = FAISS.from_documents(splits, embeddings)
# Define retriever
retriever = vectordb.as_retriever()
# Setup memory for contextual conversation
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True
)
# Setup LLM and QA chain
llm = ChatOpenAI(model_name=self.openai_model,
temperature=0, streaming=True)
qa_chain = ConversationalRetrievalChain.from_llm(
llm, retriever=retriever, memory=memory, verbose=True)
return qa_chain
@utils.enable_chat_history
def main(self):
# User Inputs
uploaded_files = st.sidebar.file_uploader(label='Upload PDF files', type=[
'pdf'], accept_multiple_files=True)
if not uploaded_files:
st.error("Please upload PDF documents to continue!")
st.stop()
user_query = st.chat_input(placeholder="Ask me anything!")
if uploaded_files and user_query:
qa_chain = self.setup_qa_chain(uploaded_files)
utils.display_msg(user_query, 'user')
with st.chat_message("assistant"):
try:
st_cb = StreamHandler(st.empty())
response = qa_chain.run(user_query, callbacks=[st_cb])
st.session_state.messages.append(
{"role": "assistant", "content": response})
except Exception as e:
print(e)
if __name__ == "__main__":
obj = CustomDataChatbot()
obj.main()
import os
import utils
import streamlit as st
from streaming import StreamHandler
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import YoutubeLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
st.set_page_config(page_title="Chatbot", page_icon="🎥")
st.header('Youtube Chatbot')
st.write('Allows users to chat with youtube videos using the OpenAI LLMs')
class YoutubeChatbot:
def __init__(self):
utils.configure_openai_api_key()
self.openai_model = "gpt-3.5-turbo"
@st.spinner('Fetching transcript...')
def setup_qa_chain(self, url):
# Load documents
docs = []
#Fetching Youtube transcripts
loader = YoutubeLoader.from_youtube_url(url)
docs.extend(loader.load())
# Split documents
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1500,
chunk_overlap=200
)
splits = text_splitter.split_documents(docs)
# Create embeddings and store in vectordb
embeddings = OpenAIEmbeddings()
vectordb = FAISS.from_documents(splits, embeddings)
# Define retriever
retriever = vectordb.as_retriever()
# Setup memory for contextual conversation
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True
)
# Setup LLM and QA chain
llm = ChatOpenAI(model_name=self.openai_model,
temperature=0, streaming=True)
qa_chain = ConversationalRetrievalChain.from_llm(
llm, retriever=retriever, memory=memory, verbose=True)
return qa_chain
@utils.enable_chat_history
def main(self):
# User Inputs
url = st.sidebar.text_input(label='Enter Youtube URLs', value='')
if not url:
st.error("Please add links to continue!")
st.stop()
user_query = st.chat_input(placeholder="Ask me anything!")
if url and user_query:
qa_chain = self.setup_qa_chain(url)
utils.display_msg(user_query, 'user')
with st.chat_message("assistant"):
try:
st_cb = StreamHandler(st.empty())
response = qa_chain.run(user_query, callbacks=[st_cb])
st.session_state.messages.append(
{"role": "assistant", "content": response})
except Exception as e:
print(e)
if __name__ == "__main__":
obj = YoutubeChatbot()
obj.main()
langchain==0.0.312
PyPDF2==3.0.1
python-dotenv==1.0.0
streamlit==1.27.2
openai==0.27.6
altair==4
Pillow==9.5.0
tiktoken==0.5.1
duckduckgo-search==3.9.3
pypdf==3.16.4
faiss-cpu==1.7.4
chromadb
youtube-transcript-api==0.6.1
arxiv==2.0.0
\ No newline at end of file
from langchain.callbacks.base import BaseCallbackHandler
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs):
self.text += token
self.container.markdown(self.text)
import os
import random
import streamlit as st
from PIL import Image
# decorator
def enable_chat_history(func):
if os.environ.get("OPENAI_API_KEY"):
# to clear chat history after swtching chatbot
current_page = func.__qualname__
if "current_page" not in st.session_state:
st.session_state["current_page"] = current_page
if st.session_state["current_page"] != current_page:
try:
st.cache_resource.clear()
del st.session_state["current_page"]
del st.session_state["messages"]
except:
pass
# to show chat history on ui
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state["messages"]:
st.chat_message(msg["role"]).write(msg["content"])
def execute(*args, **kwargs):
func(*args, **kwargs)
return execute
def display_msg(msg, author):
"""Method to display message on the UI
Args:
msg (str): message to display
author (str): author of the message -user/assistant
"""
st.session_state.messages.append({"role": author, "content": msg})
st.chat_message(author).write(msg)
def configure_openai_api_key():
# st.sidebar.image("DSD logo.png", width=300)
# Open the image
image = Image.open("DSD logo.png")
# Resize the image
new_image = image.resize((300, 300))
# Display the resized image in the sidebar
st.sidebar.image(new_image, width=300)
st.sidebar.markdown(
"#### <a href='https://datasciencedojo.com/' style='color:light-blue;'>Powered by Data Science Dojo</a>",
unsafe_allow_html=True
)
openai_api_key = st.sidebar.text_input(
label="OpenAI API Key",
type="password",
value=st.session_state['OPENAI_API_KEY'] if 'OPENAI_API_KEY' in st.session_state else '',
placeholder="sk-..."
)
if openai_api_key:
st.session_state['OPENAI_API_KEY'] = openai_api_key
os.environ['OPENAI_API_KEY'] = openai_api_key
else:
st.error("Please add your OpenAI API key to continue.")
st.stop()
return openai_api_key
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment