Commit a7753969 by Sanjay Pant

Initial repo for qna_app with and without redis

parents
# Start with a base Python image
FROM python:3.9-slim-buster
# Copy files or folders from source to the dest path in the image's filesystem
COPY requirements.txt ./
# Execute any commands on top of the current image as a new layer and commit the results
RUN pip install -r requirements.txt \
&& rm -rf /root/.cache/pip
# Copy files or folders from source to the dest path in the image's filesystem
COPY . .
EXPOSE 8080
# The command that will be executed when a container is run from the image
ENTRYPOINT ["python", "app.py"]
\ No newline at end of file
import gradio as gr
import openai
Title = "Experience the Power of Knowledge with Smart QnA Bot"
Description = "The smart QnA bot is a tool that can help you find answers to your questions quickly and easily. Using advanced algorithms and a vast knowledge base, the bot can provide comprehensive responses to a wide range of inquiries. However, the accuracy of the answers may vary, and they should always be cross-checked with other sources to ensure their reliability. Whether you're a student, a professional, or just someone seeking information, the QnA bot is a useful resource for exploring different topics and expanding your knowledge. Give it a try today and experience the convenience and efficiency of this smart tool."
def openai_call(question, api_key):
openai.api_key = api_key
response = openai.Completion.create(
model="text-davinci-003",
prompt=question,
temperature=0,
max_tokens=100
)
return response.choices[0].text
open_ai_key = gr.inputs.Textbox(label="Enter the Open AI API Key",type="password")
open_ai_question = gr.inputs.Textbox(lines=10, label="Enter the question")
open_ai_summary = gr.outputs.Label(label="Answer")
demo = gr.Interface(title=Title, description=Description, fn=openai_call,inputs=[open_ai_question,open_ai_key], outputs=[open_ai_summary],css="footer {visibility: hidden}")
demo.launch(server_name="0.0.0.0", server_port=8080)
\ No newline at end of file
gradio
openai
redis
\ No newline at end of file
# Start with a base Python image
FROM python:3.9-slim-buster
# Copy files or folders from source to the dest path in the image's filesystem
COPY requirements.txt ./
# Execute any commands on top of the current image as a new layer and commit the results
RUN pip install -r requirements.txt \
&& rm -rf /root/.cache/pip
# Copy files or folders from source to the dest path in the image's filesystem
COPY . .
# The command that will be executed when a container is run from the image
ENTRYPOINT ["python", "app_redis.py"]
\ No newline at end of file
import gradio as gr
import openai
import redis
Title = "Experience the Power of Knowledge with Smart QnA Bot with Redis"
Description = "The smart QnA bot is a tool that can help you find answers to your questions quickly and easily. Using advanced algorithms and a vast knowledge base, the bot can provide comprehensive responses to a wide range of inquiries. However, the accuracy of the answers may vary, and they should always be cross-checked with other sources to ensure their reliability. Whether you're a student, a professional, or just someone seeking information, the QnA bot is a useful resource for exploring different topics and expanding your knowledge. Give it a try today and experience the convenience and efficiency of this smart tool."
# Connect to Redis
redis_client = redis.Redis(host='redis', port=6379, db=0)
def openai_call(question, api_key):
# Check if the answer is already in Redis cache
cached_answer = redis_client.get(question)
if cached_answer:
return cached_answer.decode('utf-8')
# If answer is not in cache, call OpenAI API
openai.api_key = api_key
response = openai.Completion.create(
model="text-davinci-003",
prompt=question,
temperature=0,
max_tokens=100
)
answer = response.choices[0].text
# Store the answer in Redis cache for future requests
redis_client.set(question, answer)
return answer
open_ai_key = gr.inputs.Textbox(label="Enter the Open AI API Key",type="password")
open_ai_question = gr.inputs.Textbox(lines=10, label="Enter the question")
open_ai_answer = gr.outputs.Label(label="Answer")
demo = gr.Interface(title=Title, description=Description, fn=openai_call,inputs=[open_ai_question,open_ai_key], outputs=[open_ai_answer],css="footer {visibility: hidden}")
demo.launch(server_name="0.0.0.0", server_port=8081)
version: '3'
services:
gradio:
image: "datasciencedojo/app-openai:latest"
container_name: gradiov1
build:
context: .
dockerfile: Dockerfile_Redis
ports:
- "80:8081"
depends_on:
- redis
environment:
- REDIS_HOST=redis
- REDIS_PORT=6379
redis:
image: "redis:latest"
container_name: redisv1
ports:
- "6379:6379"
volumes:
- ./data:/data
command: redis-server --appendonly yes --dir /data
gradio
openai
redis
\ No newline at end of file
  • I’ve been using Magis TV Pro for months and it never disappoints. The channels are stable, and the video quality is crystal clear. I use Magis TV Gratis on my tablet and it works great even with slow internet. The Magis TV APK is easy to download and install. Just click the Magis TV Descargar link aquí and start enjoying your favorite shows anytime!

Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment