import os #importing the OS module
from dotenv import load_dotenv #allows to load environment variables
from langchain_community.embeddings import OllamaEmbeddings
from langchain_chroma import Chroma
from langchain_core.prompts import ChatPromptTemplate #template for convo with LLM
from langchain_groq import ChatGroq #used for using models available on Groq
from langchain.chains import create_retrieval_chain #used for retrieving relevant content
from langchain.chains.combine_documents import create_stuff_documents_chain
#combining retrieved content with embeddings
import streamlit as st #web application for data
load_dotenv()
groq_api_key ="insert your key here" #insert the secret key
model = ChatGroq(model="gemma2-9b-it", groq_api_key=groq_api_key)
embeddings = OllamaEmbeddings(model = 'gemma2:2b')
mydb = Chroma(persist_directory='./chroma_db',embedding_function=embeddings)
retriever = mydb.as_retriever(search_type='similarity', search_kwargs={"k":6})
#using similarity search, we retrieve 6 most similar document chunks
st.title("Welcome to A's Caffe") query = st.chat_input("Ask me anything- ") #title of the app and display message
system_prompt= ( "You are an assitant for question answering task for a restaurant called A's caffe"
"Use the following pieces of retrieved context to answer the question."
"Make sure you talk very polite with the customer and don't write anything bad about the restaurant"
"Your tone of reply should always be exciting and luring to the customers" "\n\n" "{context}" )
#instructions to LLM for the kind of response we want
prompt = ChatPromptTemplate.from_messages([ ('system', system_prompt),('human',"{input}") ]) #combines human and system prompt
if query:
question_answer_chain= create_stuff_documents_chain(model,prompt) #sends relevant information, along with the question to LLM
rag_chain = create_retrieval_chain(retriever, question_answer_chain) #completes the chain with embeddings and question
response = rag_chain.invoke({'input':query}) #prompt to LLM model
st.write(response['answer']) #LLM's reply to query