forked from michelderu/build-your-local-ragstack-chatbot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp_7.py
152 lines (125 loc) · 5.36 KB
/
app_7.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import streamlit as st
import os
import tempfile
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.chat_models.ollama import ChatOllama
from cassandra.cluster import Cluster
from langchain_community.vectorstores import Cassandra
from langchain.schema.runnable import RunnableMap
from langchain.prompts import ChatPromptTemplate
from langchain.callbacks.base import BaseCallbackHandler
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
# Streaming call back handler for responses
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs):
self.text += token
self.container.markdown(self.text + "▌")
# Function for Vectorizing uploaded data into DataStax Enterprise
def vectorize_text(uploaded_file, vector_store):
if uploaded_file is not None:
# Write to temporary file
temp_dir = tempfile.TemporaryDirectory()
file = uploaded_file
temp_filepath = os.path.join(temp_dir.name, file.name)
with open(temp_filepath, 'wb') as f:
f.write(file.getvalue())
# Load the PDF
docs = []
loader = PyPDFLoader(temp_filepath)
docs.extend(loader.load())
# Create the text splitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1500,
chunk_overlap = 100
)
# Vectorize the PDF and load it into the DataStax Enterprise Vector Store
pages = text_splitter.split_documents(docs)
vector_store.add_documents(pages)
st.info(f"{len(pages)} pages loaded.")
# Cache prompt for future runs
@st.cache_data()
def load_prompt():
template = """You're a helpful AI assistent tasked to answer the user's questions.
You're friendly and you answer extensively with multiple sentences. You prefer to use bulletpoints to summarize.
CONTEXT:
{context}
USER'S QUESTION:
{question}
YOUR ANSWER:"""
return ChatPromptTemplate.from_messages([("system", template)])
prompt = load_prompt()
# Cache Mistral Chat Model for future runs
@st.cache_resource()
def load_chat_model():
# parameters for ollama see: https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html
# num_ctx is the context window size
return ChatOllama(model="mistral:latest", num_ctx=18192, base_url=st.secrets['OLLAMA_ENDPOINT'])
chat_model = load_chat_model()
# Cache the DataStax Enterprise Vector Store for future runs
@st.cache_resource(show_spinner='Connecting to Datastax Enterprise v7 with Vector Support')
def load_vector_store():
# Connect to DSE
cluster = Cluster([st.secrets['DSE_ENDPOINT']])
session = cluster.connect()
# Connect to the Vector Store
vector_store = Cassandra(
session=session,
embedding=HuggingFaceEmbeddings(),
keyspace=st.secrets['DSE_KEYSPACE'],
table_name=st.secrets['DSE_TABLE']
)
return vector_store
vector_store = load_vector_store()
# Cache the Retriever for future runs
@st.cache_resource(show_spinner='Getting retriever')
def load_retriever():
# Get the retriever for the Chat Model
retriever = vector_store.as_retriever(
search_kwargs={"k": 5}
)
return retriever
retriever = load_retriever()
# Start with empty messages, stored in session state
if 'messages' not in st.session_state:
st.session_state.messages = []
# Draw a title and some markdown
st.markdown(""" POC on RAG for LIONchat
Connected to the Data """)
st.divider()
# Include the upload form for new data to be Vectorized
with st.sidebar:
st.image("https://1000logos.net/wp-content/uploads/2021/05/ING-logo.png", width=250)
with st.form('upload'):
uploaded_file = st.file_uploader('Upload a document for additional context', type=['pdf'])
submitted = st.form_submit_button('Save to DataStax Enterprise')
if submitted:
vectorize_text(uploaded_file, vector_store)
# Draw all messages, both user and bot so far (every time the app reruns)
for message in st.session_state.messages:
st.chat_message(message['role']).markdown(message['content'])
# Draw the chat input box
if question := st.chat_input("What's up?"):
# Store the user's question in a session object for redrawing next time
st.session_state.messages.append({"role": "human", "content": question})
# Draw the user's question
with st.chat_message('human'):
st.markdown(question)
# UI placeholder to start filling with agent response
with st.chat_message('assistant'):
response_placeholder = st.empty()
# Generate the answer by calling Mistral's Chat Model
inputs = RunnableMap({
'context': lambda x: retriever.get_relevant_documents(x['question']),
'question': lambda x: x['question']
})
chain = inputs | prompt | chat_model
response = chain.invoke({'question': question}, config={'callbacks': [StreamHandler(response_placeholder)]})
answer = response.content
# Store the bot's answer in a session object for redrawing next time
st.session_state.messages.append({"role": "ai", "content": answer})
# Write the final answer without the cursor
response_placeholder.markdown(answer)