Commit 518fddb1 by zaid

initial commits

parents
.env
\ No newline at end of file
# Social-Content-Generator
\ No newline at end of file
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain import PromptTemplate
import streamlit as st
from PyPDF2 import PdfReader
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.redis import Redis
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from docx import Document
from docx.shared import Inches
import io
from PIL import Image
import requests
from dotenv import load_dotenv
load_dotenv()
st.set_page_config(layout="wide",
page_title="Social Content Generator",
page_icon = Image.open('assets/dsd_icon.png'))
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
os.getenv('OPENAI_API_KEY')
embeddings = OpenAIEmbeddings()
#Loading the PDF
def load_pdf_text(pdfs):
text = ""
for pdf in pdfs:
reader = PdfReader(pdf)
for page in reader.pages:
text += page.extract_text()
return text
#Splitting the text into chunks
def pdf_text_chunks(text):
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = splitter.split_text(text)
return chunks
#Embedding the text
def get_db_from_chunks(text_chunks):
db = Redis.from_texts(text_chunks, embeddings, redis_url="redis://localhost:6379")
return db
# Get the answer to the question
def query_response(db, query):
# Search the vector database for the most similar chunks
documents = db.similarity_search(query, k=4)
# Get the text of the most similar chunks and concatenate them
content = " ".join([d.page_content for d in documents])
# Get the large language model (gpt-3.5-turbo)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
# Create the prompt template
prompt_template = """
Imagine you are a skilled blog writer tasked with creating an blog based on the content and data provided in the {documents}.
Start from the background information provided in the documents and use it to create a well-researched article around it and go in depth on the topic.
Your goal is to use only factual information from the documents to craft a comprehensive and informative blog post.
To enhance readability and length, create headings and subheadings based on the factual content within the documents.
If you encounter a situation where you don't have enough information to answer a question, simply state "I don't know."
Avoid using terms like "transcript" or "embeddings" etc; instead, refer to the source material as 'documents'
Ensure that the resulting blog is both lengthy and factually accurate.
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(prompt_template)
user_template = "Answer the following question: {question}"
user_message_prompt = HumanMessagePromptTemplate.from_template(user_template)
# Create the chat prompt (the prompt that will be sent to the language model)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, user_message_prompt])
# Create the chain (that will send the prompt to the language model and return the response)
chain = LLMChain(llm=llm, prompt=chat_prompt)
# Get the response from the chain
response = chain.run(question=query, documents=content)
return response
#Loading the model
def load_llm(max_tokens, prompt_template):
# Load the locally downloaded model here
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
print(llm_chain)
return llm_chain
def get_src_original_url(query):
url = 'https://api.pexels.com/v1/search'
headers = {
'Authorization': "2I5wwGXzrCpMdfNlwoGLOUvXkgKW2pJxx9Zy4EaadsvtbWLDdFE14ZcR",
}
params = {
'query': query,
'per_page': 1,
}
response = requests.get(url, headers=headers, params=params)
# Check if the request was successful (status code 200)
if response.status_code == 200:
data = response.json()
photos = data.get('photos', [])
if photos:
src_original_url = photos[0]['src']['original']
return src_original_url
else:
st.write("No photos found for the given query.")
else:
st.write(f"Error: {response.status_code}, {response.text}")
return None
def create_word_docx(user_input, paragraph, image_input, dojo_img):
# Create a new Word document
doc = Document()
# Brand Logo
image_stream = io.BytesIO()
dojo_img.save(image_stream, format='PNG')
image_stream.seek(0)
doc.add_picture(image_stream, width=Inches(4))
# Add the user input to the document
doc.add_heading(user_input, level=1)
doc.add_paragraph(paragraph)
# Add the image to the document
image_stream = io.BytesIO()
image_input.save(image_stream, format='PNG')
image_stream.seek(0)
doc.add_picture(image_stream, width=Inches(4)) # Adjust the width as needed
return doc
def main():
if 'db' not in st.session_state:
st.session_state.db = None
st.sidebar.image('assets\dsdojo.png')
st.sidebar.title("Blog Generator")
selection = st.sidebar.radio("**Select your Approach to generate the Blog**", ("Utilize Your Private Data", "Blog Powered by ChatGPT"))
if selection == "Utilize Your Private Data":
uploaded_files = st.sidebar.file_uploader("**Upload your files and press process**", type=["pdf"], accept_multiple_files=True)
if uploaded_files is not None:
if st.sidebar.button("Process"):
with st.spinner("Processing..."):
pdf_text = load_pdf_text(uploaded_files)
text_chunks = pdf_text_chunks(pdf_text)
db = get_db_from_chunks(text_chunks)
st.session_state.db = db
query = st.sidebar.text_input("Please enter the idea/topic for the blog you want to generate using the indexed data!")
img_input = st.sidebar.text_input("Please enter the topic for the image you want to fetch!")
if len(query) > 0 and len(img_input) > 0:
with st.spinner("Generating Content..."):
response = query_response(st.session_state.db, query)
if len(response) > 0:
st.info("Content has been been generated successfully!")
st.write(response, unsafe_allow_html = True)
else:
st.error("Content couldn't be generated!")
image_url = get_src_original_url(img_input)
st.image(image_url)
st.markdown("**Download the content**")
doc = create_word_docx(query, response, Image.open(requests.get(image_url, stream=True).raw), Image.open('assets/dsdojo.png'))
# Save the Word document to a BytesIO buffer
doc_buffer = io.BytesIO()
doc.save(doc_buffer)
doc_buffer.seek(0)
# Prepare the download link
st.download_button(
label='Download Blog',
data=doc_buffer,
file_name = query+'.docx',
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)
query = ""
img_input = ""
else:
user_input = st.sidebar.text_input("Please enter the idea/topic for the article you want to generate!")
image_input = st.sidebar.text_input("Please enter the topic for the image you want to fetch!")
if len(user_input) > 0 and len(image_input) > 0:
with st.spinner("Generating Content..."):
prompt_template = """
Imagine you are a skilled blog writer tasked with creating an article/blog based on the {user_input}.
your task is to create a well-researched article around it.
Use factual information and reliable sources to support your content.
Feel free to structure your blog with headings and subheadings to enhance its readability and organization.
Ensure that the resulting blog is informative, engaging, and factually accurate, providing valuable insights on the chosen idea or topic.
"""
llm_call = load_llm(max_tokens=800, prompt_template=prompt_template)
print(llm_call)
result = llm_call(user_input)
st.subheader(user_input)
if len(result) > 0:
st.info("Content has been been generated successfully!")
st.write(result['text'], unsafe_allow_html = True)
else:
st.error("Content couldn't be generated!")
image_url = get_src_original_url(image_input)
st.image(image_url)
st.markdown("**Download the content**")
doc = create_word_docx(user_input, result['text'], Image.open(requests.get(image_url, stream=True).raw), Image.open('assets/dsdojo.png'))
# Save the Word document to a BytesIO buffer
doc_buffer = io.BytesIO()
doc.save(doc_buffer)
doc_buffer.seek(0)
# Prepare the download link
st.download_button(
label='Download Blog',
data=doc_buffer,
file_name= user_input+'.docx',
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)
user_input = ""
image_input = ""
if __name__ == "__main__":
main()
#Ignore env file
.env
\ No newline at end of file
# Youtube-Bot
\ No newline at end of file
#run using "./req2.ps1" in project folder
# List of Python dependencies
$dependencies = @(
"aiohttp==3.8.5",
"aiosignal==1.3.1",
"altair==4.2.2",
"async-timeout==4.0.3",
"attrs==23.1.0",
"blinker==1.6.2",
"cachetools==5.3.1",
"certifi==2023.7.22",
"charset-normalizer==3.2.0",
"click==8.1.7",
"colorama==0.4.6",
"dataclasses-json==0.5.14",
"easyocr==1.7.0",
"entrypoints==0.4",
"faiss-cpu==1.7.4",
"filelock==3.12.2",
"frozenlist==1.4.0",
"gitdb==4.0.10",
"GitPython==3.1.32",
"greenlet==2.0.2",
"idna==3.4",
"imageio==2.31.2",
"importlib-metadata==6.8.0",
"Jinja2==3.1.2",
"jsonschema==4.19.0",
"jsonschema-specifications==2023.7.1",
"langchain==0.0.177",
"lazy_loader==0.3",
"markdown-it-py==3.0.0",
"MarkupSafe==2.1.3",
"marshmallow==3.20.1",
"mdurl==0.1.2",
"mpmath==1.3.0",
"multidict==6.0.4",
"mypy-extensions==1.0.0",
"networkx==3.1",
"ninja==1.11.1",
"numexpr==2.8.5",
"numpy==1.25.2",
"openai==0.27.10",
"openapi-schema-pydantic==1.2.4",
"opencv-python-headless==4.8.0.76",
"packaging==23.1",
"pandas==2.1.0",
"Pillow==10.0.0",
"protobuf==3.20.3",
"pyarrow==13.0.0",
"pyclipper==1.3.0.post4",
"pydantic==1.10.12",
"pydeck==0.8.1b0",
"Pygments==2.16.1",
"Pympler==1.0.1",
"python-bidi==0.4.2",
"python-dateutil==2.8.2",
"python-dotenv==1.0.0",
"pytz==2023.3",
"PyWavelets==1.4.1",
"PyYAML==6.0.1",
"redis==5.0.0",
"referencing==0.30.2",
"regex==2023.8.8",
"requests==2.31.0",
"rich==13.5.2",
"rpds-py==0.10.0",
"scikit-image==0.21.0",
"scipy==1.11.2",
"shapely==2.0.1",
"six==1.16.0",
"smmap==5.0.0",
"SQLAlchemy==2.0.20",
"streamlit==1.22.0",
"streamlit-chat==0.0.2.2",
"sympy==1.12",
"tenacity==8.2.3",
"tifffile==2023.8.30",
"tiktoken==0.4.0",
"toml==0.10.2",
"toolz==0.12.0",
"torch==2.0.1",
"torchvision==0.15.2",
"tornado==6.3.3",
"tqdm==4.66.1",
"typing-inspect==0.8.0",
"typing_extensions==4.5.0",
"tzdata==2023.3",
"tzlocal==5.0.1",
"urllib3==2.0.4",
"validators==0.21.2",
"watchdog==3.0.0",
"yarl==1.9.2",
"youtube-transcript-api==0.6.1",
"zipp==3.16.2"
)
# Loop through each dependency and install it
foreach ($dependency in $dependencies) {
Write-Host "Installing $dependency"
pip install $dependency
}
Write-Host "All Python dependencies installed successfully."
from streamlit_chat import message
import streamlit as st
from langchain.vectorstores import FAISS
from langchain import LLMChain
from langchain.document_loaders import YoutubeLoader
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
import os
from dotenv import load_dotenv
from io import StringIO
from PIL import Image
# Load environment variables from .env file
load_dotenv()
st.set_page_config(
page_title="Youtube Bot",
page_icon = Image.open('assets/dsd_icon.png'))
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.image('assets/dsdojo.webp')
OPENAI_API_KEY = st.text_input("Enter your OpenAI API key", type="password")
if OPENAI_API_KEY:
# OpenAI API key
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
# Create the embeddings object
embeddings = OpenAIEmbeddings()
# *********************** Utils ***********************
def create_db_from_youtube_video_url(video_urls):
# Create an empty list to store the transcripts and documents
transcripts = []
documents = []
# Load the transcript from each video URL and append it to the transcripts list
for url in video_urls:
loader = YoutubeLoader.from_youtube_url(url)
transcript = loader.load()
transcripts.append(transcript)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
# Appending each video transcript to document list
for transcript in transcripts:
documents.append(text_splitter.split_documents(transcript))
documents = [item for sublist in documents for item in sublist]
# Create the vector database
db = FAISS.from_documents(documents, embeddings)
return db
# Get the answer to the question
def get_response_from_query(db, query):
# Search the vector database for the most similar chunks
documents = db.similarity_search(query, k=4)
# Get the text of the most similar chunks and concatenate them
content = " ".join([d.page_content for d in documents])
# Get the large language model (gpt-3.5-turbo)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
# Create the prompt template
prompt_template = """
You are a helpful assistant that that can answer questions about youtube videos
based on the video's transcript: {documents}
Only use the factual information from the transcript to answer the question.
If you feel like you don't have enough information to answer the question, say "I don't know".
Always when answering, dont mention the word "transcript" say "video" instead.
Your answers should be verbose and detailed
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(prompt_template)
user_template = "Answer the following question: {question}"
user_message_prompt = HumanMessagePromptTemplate.from_template(user_template)
# Create the chat prompt (the prompt that will be sent to the language model)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, user_message_prompt])
# Create the chain (that will send the prompt to the language model and return the response)
chain = LLMChain(llm=llm, prompt=chat_prompt)
# Get the response from the chain
response = chain.run(question=query, documents=content)
response = response.replace("\n", "")
return response
def generate_response(query, urls):
# Create the vector database
db = create_db_from_youtube_video_url(urls)
response = get_response_from_query(db, query)
return response
# *********************** Streamlit App ***********************
try:
# Create a button to toggle between "Files" and "Manual" input
input_option = st.radio("Choose input method:", ("Files", "Manual"))
links = []
if 'links' not in st.session_state:
st.session_state['links'] = links
if input_option == "Files":
st.header("File Upload")
# Upload a text file containing the links of the YouTube videos
uploaded_file = st.file_uploader("Upload a text file containing the links of the YouTube videos", type="txt")
# print(uploaded_file)
if uploaded_file is not None:
# Convert the uploaded file to a string
stringio = StringIO(uploaded_file.getvalue().decode('utf-8'))
# Read the links from the string
links = [line.strip() for line in stringio.readlines()]
st.session_state['links'].extend(link for link in links if link not in st.session_state['links'] and link != '')
else:
st.header("Manual Input")
# Get the links of the YouTube videos from the user
links = st.text_area("Enter the links of the YouTube videos (one link per line)", height=200).splitlines()
st.session_state['links'].extend(link for link in links if link not in st.session_state['links'] and link != '')
reply_container = st.container()
container = st.container()
# Storing the chat
if 'question' not in st.session_state:
st.session_state['question'] = []
if 'answer' not in st.session_state:
st.session_state['answer'] = []
# Get the question from the user
with container:
question = st.text_input("Question:", placeholder="Ask about your Documents", key='input')
if question:
res = generate_response(question, st.session_state['links'])
st.session_state['question'].append(question)
st.session_state['answer'].append(res)
if st.session_state['answer']:
with reply_container:
for i in range(len(st.session_state['answer'])):
user_message_key = str(i) + '_user'
answer_message_key = str(i) + '_answer'
message(st.session_state['question'][i], is_user=True, key=user_message_key)
message(st.session_state["answer"][i], key=answer_message_key)
except Exception as e:
st.error(e)
\ No newline at end of file
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea
\ No newline at end of file
import streamlit as st
from streamlit_chat import message
from dotenv import load_dotenv
from PIL import Image
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
import os
def main():
st.set_page_config(page_title="Chat with multiple PDFs",
page_icon=logo)
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "question"not in st.session_state:
st.session_state.question = []
if "answer" not in st.session_state:
st.session_state.answer = []
st.header("Chat with multiple PDFs :books:")
resonse_container, container = st.container(), st.container()
with container:
user_question = st.text_input("Ask a question about your documents:")
with resonse_container:
if user_question:
handle_userinput(user_question)
st.sidebar.image(logo, width=50)
with st.sidebar:
OPENAI_API_KEY = st.text_input('Enter your OpenAI API key',type='password')
if OPENAI_API_KEY:
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
hide_streamlit_style = """
<style>
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
logo = Image.open(r'assets/dsd_icon.png')
logo_path = './assets/dsd_icon.png'
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state['question'].append(user_question)
st.session_state['answer'].append(response['answer'])
for i in range(len(st.session_state['answer'])):
user_message_key = str(i) + '_user'
answer_message_key = str(i) + '_answer'
message(st.session_state['question'][i], is_user=True, key=user_message_key)
message(st.session_state["answer"][i], key=answer_message_key)
if __name__ == '__main__':
main()
css = '''
<style>
.chat-message {
padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
}
.chat-message.user {
background-color: #2b313e
}
.chat-message.bot {
background-color: #475063
}
.chat-message .avatar {
width: 20%;
}
.chat-message .avatar img {
max-width: 78px;
max-height: 78px;
border-radius: 50%;
object-fit: cover;
}
.chat-message .message {
width: 80%;
padding: 0 1.5rem;
color: #fff;
}
'''
bot_template = '''
<div class="chat-message bot">
<div class="avatar">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAwFBMVEX////pfGX8///odl7z1MzqdFv//v/xv7Pyuq/pemP8//37+PTnfWX++/npfGbmd1r58e7rembncVz+5uPodFfodmH9//rqjHn14N3rkn756eTwoZLpfWLqf2rphnHkfmbsmIb67Ortqpv00MXuxr701Mnsl37teWrsxLjstKTjfmDtc1vytKjpcFLnh2/stq347ePmpZH329TmmoLspJf14dTwzsvqak3wrabymY7xxLnuoo7w1tXwnYnvzL/ljXgLRejyAAAJ4ElEQVR4nO2dDXeaSBeA4Y6ggwhih6gEjVGzb42adl3Tvk266f//VwsYTaIM+HHljjk85+xpt+EYHgfm3vnWtJKSkpKSkpKSkpJXTPPd/4AGoJkW2c2cB4ioB4vROGK0CJpWbPlJSAqrWZkM9JYQRowwpgbvDke3yY+Jb+90TNBun7pMMK5/gDPm9ycBwMU/rtbdX8LXe//Td3BszkR7XKe+w9NofL0xuLNr91aUBp80QbuivtEjgfH9NMzQS/BcNqtfYq0T1SCdtvBsL8/Q9jy3N7q8mtWExmTO8+w2iEHz0hQheGC9vQUd2w/vLipwWNqotX8BJs8qb83gkhS/idA+yFAPQ+P6Yh5U0Cbz/Z/QN1j76lJKcTm9z4qBUtx+g/rW98C0YDjt5caIVDy3balfipZWm98cJxiX4uAC3sW7+dF+UdgwJtT3nwcE/vF+UdCw3RG1Qh5//APDxAe4zf3v1ArZTNgpRRjjdj/2eShGJ78tkYctnjRlG8UmtL2TDXXuq5uFw1ic7BfBhtQiUq56h6XbEjwRUJvIGE9PqUc39Njf1CbpmFrv/oRg/0aot25VfBMBfhsYfjFMyczGhF8ob2GMx5WMF00XS1DXpxVqmzTGeIbcX1LbpAADtIc0amNwBRO3BkcJFa+wDrXPLovpDaZhTVMtYED15FbFe/hfoFh1CnCN+BpGeIoJahb0cQ3dJrXSNo3Tm03vcYRyVU0TLWVbGRojxWoa6LRQDXW3Sq20BSxwy1B3Z9RKW0AFpXn/RqhaQx8qyGWooOHnL8NPb/jpaxotQI8WisVD9IjvjlUzNDFbh/FgqWpZG3bmzZlqmbcFS9T2oXqtJw2eUA3ZP9RCu3QE5ptoqJZ4x9hHzTCRoGJPFFwjPqaOrlqsiIAfiHmbr1rOloDYj2FPF9Q2aSDGCx4qOS0aOoijawq+hjF9nBFSnRu31CoS7rBGuYeg5FMapW5tlNzUEYGqM2qQGvpCyVCRYGoD/+SQ4XFH3RlDmnZrnGx4M31SWFCD6skRwx2oNq62xWB6UgJu20zx2ZfQ7J0UMWxxR62QBzwK/fi4z92Z8ktnLO1J5C/nkuFewKoS04Lq/EhFT/xSXzDh2/y4BNXtKpqsbQPa1/nh1Y2ts8EFrCdZ8zI/cGWX44RieSGPaIylPTqHNYdt7tYuaXWeCVp9IA5YYRkye6F8mNgCoqix9/IZZiwvcrFzczDfa6Wz7vYqoGqLMBtY/MldYeJw5tUuJEikcteehk7IUz3tqLrlwqlGD+gFhYkdIHh2GU/bNSIMmS+6Py65/FYANO6WumAft/7gnAm/+9RUcuePYxqoZvC0fPCYcGOEaPm969pvdff8OKZeh/ibaQaLymgUb8DTgGO+KUsrZiX0l4l1cHCOVxTC+rtJ9sAw4cAUJnqaofr7wN97HF/m/Urx4fkKtMc/P4sZsPnietN28qsK8jSTvbOCpcFZUYZ2FMb6L1FFUUxNbwHAYiBcXRfFGeqObfDhYyG/L0r/qg+Cx4PoRRom+fK09/9FUoznyUiS6gm+V7uG+xpLizbUb0LBnOeX4FzvIzQrw36LsXDdBVu4IY+bDv5U8F/fKkFjtZfepkA35RrlnBDFvuRriC9YXyRbkw7J5zQ7X4d9NmVRcvfWXVC44VsOZhgs7P+aPFU6TXO1bWDM6n43f23cdn6MJ9UfUcTfumhzXXLRqLrs9pjrbm/xRmro6CHXQ8MQQhi83x48D4ezb7VqRG02GS6fB91+L8rWVpvuRX8w+/Wi2Sy5qFabDZ+vu+2em3wEiwqOO7vDA3SGW76c++wDfvRPuRex3YtUNTwfpWFpWBrSG7qf3ZAbxRgGP3HnOx9g+LOYFrDWmdiGwXloe7jLRqVinmOHDjP4v78L65izOpMHg7kh5uptOTd2yAz2d6XYjn/Qvr9c30yN3dwRG85E68/skaZb1Qpehn0e5chnkouSb2Z3ZwUX3nuSDpR6MK7JCpJvOPynOg+HT7+bVvS80HX5g2WtWnkSw5t2N6bdbj+kXGG3N6QP3/hx76hprtuWpEgKYTN1BILdAMMHmxv/njpVTKmFQemG/vOmjdvZnZbJu+shQwhSX+QLMIzKcP0GpRuuyzClhEvDoikNS8PSkIjNCKeVGw+1zlxqGEWTIDUeeq11tkYU8q13f5MZNoOEL83K7lp23l399Pa2uUgtw9CvwxoSxbrH/VVWacvmPoXTpBdYuIabMnGIT8VqVJ+lL2GI2oT+GkaxTKjhFNeZ4ZHsqdQ4bcp6aVgaloalYWlYGpaGpWFpWBqWhqVhafhZDDE3TVLSsO7dv/76ULa2ybvfnQGXhi5ZOsTvvdUFPkm3G9Qb9QRrKRlcY7Wr10uyaYwkQ6z3/mT9AbTLTmQ7mrDqThcZmLCLVplKHoLw39UcTepVUbakymH77rkm3b+W9+nHRiPqsn13TjfUbQVWDpmWdG9WBEOmwqlB8g0TEQwNBc4qMWF4TsMnFQ4Klp5NgmDoDukFQZMe1IVgGLYVMKxLZyoiGOrOWW9+Px6lm0BjRIsp9ShpxEhe1SOUoVHU6jE5MDvnU+q547Pe/V48S2cmYjylBv3W7NCX7pqAYcgG57z5vWgwaUsYwdDh/bPe/T4EQrq3B4ah7RMf1W1m1YMYdenNnHgrUxOq7JzvYZx707Z/LW0p3+0SxZBRnwIBf+ST9XEMl8RzoLP6FFEMeZu4lybr4A4UQ50TG2YdL4Nj6BPn3k8Zq0lwDInPEJB3YaAZUufe7axvH8UwJM69s/Z+Rqpp/iWNFk15VoplaD+c1SCPjmzEAc/QCUn7vcdGxvpDJENB2u89S5vejGtI3O89kI6O4hnu/TFnAMxe1kpgLMMlXd6W0Ruc3Fptz3o+2zDKvclGgHPOXUEy1Dlh++kl89BDLEPRpBvFnxTxlOp0xzubOcd0oRnSHfYIN5n7DWAZMrrcu5F99ihaTUO32/73Ygxtun7vilvIU6r7ZLl3NXvfDyzD0CDLvWUT2rDLUJDl3u3sGaZYhp5LlXs3cs7lxDJ0wuV5RaTcCi+zENGeUptmzomZ2RuMaqj7RLvS550AjGc4JTmJLWNCG74h0ZyTbs4uWHiGBk2/N+QdQYJnyGYkVU3WwBqyIR9QGEJmbzCyIU3unTWwhmzocZLce1agoSCZc/JP3haYeIZ6i+RUxNyVXXiGtkGRe9fdvKVdiIaMIvcOcs/+xTMMSSpT+dxgfEPdMYo/0wtmuXuoIxrq8+Jzbxj2I3ox3ivxdketd0z3NITKz9Y28Wd5G3o91iHJ2wCsD1ytaMT/RdT33FnVvEpWIcZcvePjZ5sEhrGR9XrEg5XKnjcFprn9Xb1Dez0ogkKxpKSkpKSkpKSkKP4D7G3QcTxGuaIAAAAASUVORK5CYII=" alt="Bot Image" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
</div>
<div class="message">{{MSG}}</div>
</div>
'''
user_template = '''
<div class="chat-message user">
<div class="avatar">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAwFBMVEX////pfGX8///odl7z1MzqdFv//v/xv7Pyuq/pemP8//37+PTnfWX++/npfGbmd1r58e7rembncVz+5uPodFfodmH9//rqjHn14N3rkn756eTwoZLpfWLqf2rphnHkfmbsmIb67Ortqpv00MXuxr701Mnsl37teWrsxLjstKTjfmDtc1vytKjpcFLnh2/stq347ePmpZH329TmmoLspJf14dTwzsvqak3wrabymY7xxLnuoo7w1tXwnYnvzL/ljXgLRejyAAAJ4ElEQVR4nO2dDXeaSBeA4Y6ggwhih6gEjVGzb42adl3Tvk266f//VwsYTaIM+HHljjk85+xpt+EYHgfm3vnWtJKSkpKSkpKSkpJXTPPd/4AGoJkW2c2cB4ioB4vROGK0CJpWbPlJSAqrWZkM9JYQRowwpgbvDke3yY+Jb+90TNBun7pMMK5/gDPm9ycBwMU/rtbdX8LXe//Td3BszkR7XKe+w9NofL0xuLNr91aUBp80QbuivtEjgfH9NMzQS/BcNqtfYq0T1SCdtvBsL8/Q9jy3N7q8mtWExmTO8+w2iEHz0hQheGC9vQUd2w/vLipwWNqotX8BJs8qb83gkhS/idA+yFAPQ+P6Yh5U0Cbz/Z/QN1j76lJKcTm9z4qBUtx+g/rW98C0YDjt5caIVDy3balfipZWm98cJxiX4uAC3sW7+dF+UdgwJtT3nwcE/vF+UdCw3RG1Qh5//APDxAe4zf3v1ArZTNgpRRjjdj/2eShGJ78tkYctnjRlG8UmtL2TDXXuq5uFw1ic7BfBhtQiUq56h6XbEjwRUJvIGE9PqUc39Njf1CbpmFrv/oRg/0aot25VfBMBfhsYfjFMyczGhF8ob2GMx5WMF00XS1DXpxVqmzTGeIbcX1LbpAADtIc0amNwBRO3BkcJFa+wDrXPLovpDaZhTVMtYED15FbFe/hfoFh1CnCN+BpGeIoJahb0cQ3dJrXSNo3Tm03vcYRyVU0TLWVbGRojxWoa6LRQDXW3Sq20BSxwy1B3Z9RKW0AFpXn/RqhaQx8qyGWooOHnL8NPb/jpaxotQI8WisVD9IjvjlUzNDFbh/FgqWpZG3bmzZlqmbcFS9T2oXqtJw2eUA3ZP9RCu3QE5ptoqJZ4x9hHzTCRoGJPFFwjPqaOrlqsiIAfiHmbr1rOloDYj2FPF9Q2aSDGCx4qOS0aOoijawq+hjF9nBFSnRu31CoS7rBGuYeg5FMapW5tlNzUEYGqM2qQGvpCyVCRYGoD/+SQ4XFH3RlDmnZrnGx4M31SWFCD6skRwx2oNq62xWB6UgJu20zx2ZfQ7J0UMWxxR62QBzwK/fi4z92Z8ktnLO1J5C/nkuFewKoS04Lq/EhFT/xSXzDh2/y4BNXtKpqsbQPa1/nh1Y2ts8EFrCdZ8zI/cGWX44RieSGPaIylPTqHNYdt7tYuaXWeCVp9IA5YYRkye6F8mNgCoqix9/IZZiwvcrFzczDfa6Wz7vYqoGqLMBtY/MldYeJw5tUuJEikcteehk7IUz3tqLrlwqlGD+gFhYkdIHh2GU/bNSIMmS+6Py65/FYANO6WumAft/7gnAm/+9RUcuePYxqoZvC0fPCYcGOEaPm969pvdff8OKZeh/ibaQaLymgUb8DTgGO+KUsrZiX0l4l1cHCOVxTC+rtJ9sAw4cAUJnqaofr7wN97HF/m/Urx4fkKtMc/P4sZsPnietN28qsK8jSTvbOCpcFZUYZ2FMb6L1FFUUxNbwHAYiBcXRfFGeqObfDhYyG/L0r/qg+Cx4PoRRom+fK09/9FUoznyUiS6gm+V7uG+xpLizbUb0LBnOeX4FzvIzQrw36LsXDdBVu4IY+bDv5U8F/fKkFjtZfepkA35RrlnBDFvuRriC9YXyRbkw7J5zQ7X4d9NmVRcvfWXVC44VsOZhgs7P+aPFU6TXO1bWDM6n43f23cdn6MJ9UfUcTfumhzXXLRqLrs9pjrbm/xRmro6CHXQ8MQQhi83x48D4ezb7VqRG02GS6fB91+L8rWVpvuRX8w+/Wi2Sy5qFabDZ+vu+2em3wEiwqOO7vDA3SGW76c++wDfvRPuRex3YtUNTwfpWFpWBrSG7qf3ZAbxRgGP3HnOx9g+LOYFrDWmdiGwXloe7jLRqVinmOHDjP4v78L65izOpMHg7kh5uptOTd2yAz2d6XYjn/Qvr9c30yN3dwRG85E68/skaZb1Qpehn0e5chnkouSb2Z3ZwUX3nuSDpR6MK7JCpJvOPynOg+HT7+bVvS80HX5g2WtWnkSw5t2N6bdbj+kXGG3N6QP3/hx76hprtuWpEgKYTN1BILdAMMHmxv/njpVTKmFQemG/vOmjdvZnZbJu+shQwhSX+QLMIzKcP0GpRuuyzClhEvDoikNS8PSkIjNCKeVGw+1zlxqGEWTIDUeeq11tkYU8q13f5MZNoOEL83K7lp23l399Pa2uUgtw9CvwxoSxbrH/VVWacvmPoXTpBdYuIabMnGIT8VqVJ+lL2GI2oT+GkaxTKjhFNeZ4ZHsqdQ4bcp6aVgaloalYWlYGpaGpWFpWBqWhqVhafhZDDE3TVLSsO7dv/76ULa2ybvfnQGXhi5ZOsTvvdUFPkm3G9Qb9QRrKRlcY7Wr10uyaYwkQ6z3/mT9AbTLTmQ7mrDqThcZmLCLVplKHoLw39UcTepVUbakymH77rkm3b+W9+nHRiPqsn13TjfUbQVWDpmWdG9WBEOmwqlB8g0TEQwNBc4qMWF4TsMnFQ4Klp5NgmDoDukFQZMe1IVgGLYVMKxLZyoiGOrOWW9+Px6lm0BjRIsp9ShpxEhe1SOUoVHU6jE5MDvnU+q547Pe/V48S2cmYjylBv3W7NCX7pqAYcgG57z5vWgwaUsYwdDh/bPe/T4EQrq3B4ah7RMf1W1m1YMYdenNnHgrUxOq7JzvYZx707Z/LW0p3+0SxZBRnwIBf+ST9XEMl8RzoLP6FFEMeZu4lybr4A4UQ50TG2YdL4Nj6BPn3k8Zq0lwDInPEJB3YaAZUufe7axvH8UwJM69s/Z+Rqpp/iWNFk15VoplaD+c1SCPjmzEAc/QCUn7vcdGxvpDJENB2u89S5vejGtI3O89kI6O4hnu/TFnAMxe1kpgLMMlXd6W0Ruc3Fptz3o+2zDKvclGgHPOXUEy1Dlh++kl89BDLEPRpBvFnxTxlOp0xzubOcd0oRnSHfYIN5n7DWAZMrrcu5F99ihaTUO32/73Ygxtun7vilvIU6r7ZLl3NXvfDyzD0CDLvWUT2rDLUJDl3u3sGaZYhp5LlXs3cs7lxDJ0wuV5RaTcCi+zENGeUptmzomZ2RuMaqj7RLvS550AjGc4JTmJLWNCG74h0ZyTbs4uWHiGBk2/N+QdQYJnyGYkVU3WwBqyIR9QGEJmbzCyIU3unTWwhmzocZLce1agoSCZc/JP3haYeIZ6i+RUxNyVXXiGtkGRe9fdvKVdiIaMIvcOcs/+xTMMSSpT+dxgfEPdMYo/0wtmuXuoIxrq8+Jzbxj2I3ox3ivxdketd0z3NITKz9Y28Wd5G3o91iHJ2wCsD1ytaMT/RdT33FnVvEpWIcZcvePjZ5sEhrGR9XrEg5XKnjcFprn9Xb1Dez0ogkKxpKSkpKSkpKSkKP4D7G3QcTxGuaIAAAAASUVORK5CYII=" alt="Bot Image" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
</div>
<div class="message">{{MSG}}</div>
</div>
'''
#run using ~"./install.ps1" in project folder
# List of Python dependencies
$dependencies = @(
"langchain==0.0.184",
"PyPDF2==3.0.1",
"python-dotenv==1.0.0",
"streamlit==1.18.1",
"openai==0.27.6",
"faiss-cpu==1.7.4",
"altair==4",
"tiktoken==0.4.0",
"Pillow==9.5.0",
"streamlit-chat==0.1.1"
)
# Loop through each dependency and install it
foreach ($dependency in $dependencies) {
Write-Host "Installing $dependency"
pip install $dependency
}
Write-Host "All Python dependencies installed successfully."
# MultiPDF Chat App
> You can find the tutorial for this project on [YouTube](https://youtu.be/dXxQ0LR-3Hg).
## Introduction
------------
The MultiPDF Chat App is a Python application that allows you to chat with multiple PDF documents. You can ask questions about the PDFs using natural language, and the application will provide relevant responses based on the content of the documents. This app utilizes a language model to generate accurate answers to your queries. Please note that the app will only respond to questions related to the loaded PDFs.
## How It Works
------------
![MultiPDF Chat App Diagram](./docs/PDF-LangChain.jpg)
The application follows these steps to provide responses to your questions:
1. PDF Loading: The app reads multiple PDF documents and extracts their text content.
2. Text Chunking: The extracted text is divided into smaller chunks that can be processed effectively.
3. Language Model: The application utilizes a language model to generate vector representations (embeddings) of the text chunks.
4. Similarity Matching: When you ask a question, the app compares it with the text chunks and identifies the most semantically similar ones.
5. Response Generation: The selected chunks are passed to the language model, which generates a response based on the relevant content of the PDFs.
## Dependencies and Installation
----------------------------
To install the MultiPDF Chat App, please follow these steps:
1. Clone the repository to your local machine.
2. Install the required dependencies by running the following command:
```
pip install -r requirements.txt
```
3. Obtain an API key from OpenAI and add it to the `.env` file in the project directory.
```commandline
OPENAI_API_KEY=your_secrit_api_key
```
## Usage
-----
To use the MultiPDF Chat App, follow these steps:
1. Ensure that you have installed the required dependencies and added the OpenAI API key to the `.env` file.
2. Run the `main.py` file using the Streamlit CLI. Execute the following command:
```
streamlit run app.py
```
3. The application will launch in your default web browser, displaying the user interface.
4. Load multiple PDF documents into the app by following the provided instructions.
5. Ask questions in natural language about the loaded PDFs using the chat interface.
## Contributing
------------
This repository is intended for educational purposes and does not accept further contributions. It serves as supporting material for a YouTube tutorial that demonstrates how to build this project. Feel free to utilize and enhance the app based on your own requirements.
## License
-------
The MultiPDF Chat App is released under the [MIT License](https://opensource.org/licenses/MIT).
\ No newline at end of file
langchain==0.0.184
PyPDF2==3.0.1
python-dotenv==1.0.0
streamlit==1.18.1
streamlit-chat==0.1.1
openai==0.27.6
faiss-cpu==1.7.4
altair==4
tiktoken==0.4.0
Pillow==9.5.0
# uncomment to use huggingface llms
# huggingface-hub==0.14.1
# uncomment to use instructor embeddings
# InstructorEmbedding==1.0.1
# sentence-transformers==2.2.2
#pip install langchain==0.0.184 PyPDF2==3.0.1 python-dotenv==1.0.0 streamlit==1.18.1 openai==0.27.6 faiss-cpu==1.7.4 altair==4 tiktoken==0.4.0 Pillow==9.5.0
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment