cheremnm commited on
Commit
387c8ab
ยท
verified ยท
1 Parent(s): 7e1edc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -101
app.py CHANGED
@@ -6,7 +6,7 @@ from dotenv import load_dotenv # Loading environment variables from a .env file
6
  import json # Parsing and handling JSON data
7
 
8
  # LangChain imports
9
- #from langchain_openai import ChatOpenAI #uncomment to use OPEN AI
10
  from langchain_core.documents import Document # Document data structures
11
  from langchain_core.runnables import RunnablePassthrough # LangChain core library for running pipelines
12
  from langchain_core.output_parsers import StrOutputParser # String output parser
@@ -20,8 +20,6 @@ from langchain.retrievers import ContextualCompressionRetriever # Contextual co
20
  from langchain_community.vectorstores import Chroma # Implementations of vector stores like Chroma
21
  from langchain_community.document_loaders import PyPDFDirectoryLoader, PyPDFLoader # Document loaders for PDFs
22
  from langchain_community.cross_encoders import HuggingFaceCrossEncoder # Cross-encoders from HuggingFace
23
- from langchain_community.llms import HuggingFaceHub #comment to use OPEN AI
24
- from langchain_community.embeddings import HuggingFaceEmbeddings # comment to use OPEN AI
25
  from langchain_experimental.text_splitter import SemanticChunker # Experimental text splitting methods
26
  from langchain.text_splitter import (
27
  CharacterTextSplitter, # Splitting text by characters
@@ -32,8 +30,8 @@ from langchain.agents import create_tool_calling_agent, AgentExecutor
32
  from langchain_core.prompts import ChatPromptTemplate
33
 
34
  # LangChain OpenAI imports
35
- #from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI # OpenAI embeddings and models
36
- #from langchain.embeddings.openai import OpenAIEmbeddings # OpenAI embeddings for text vectors
37
 
38
  # LlamaParse & LlamaIndex imports
39
  from llama_parse import LlamaParse # Document parsing library
@@ -64,42 +62,20 @@ MEM0_api_key = os.environ.get("MEM0_API_KEY")
64
 
65
 
66
  # Initialize the OpenAI Embeddings
67
- #embedding_model = OpenAIEmbeddings(
68
- # openai_api_base=endpoint,
69
- # openai_api_key=api_key,
70
- # model='text-embedding-ada-002'
71
- #)
72
-
73
- # Initialize Hugging Face Embeddings
74
- embedding_model = HuggingFaceEmbeddings(
75
- model_name="sentence-transformers/all-MiniLM-L6-v2" # good balance of speed & quality
76
  )
77
 
78
- # Initialize the Chat OpenAI model
79
- #llm = ChatOpenAI(
80
- # openai_api_base=endpoint,
81
- # openai_api_key=api_key,
82
- # model="gpt-4o-mini",
83
- # streaming=False
84
- #)
85
-
86
- # Initialize Hugging Face LLM
87
- from langchain_community.chat_models import ChatHuggingFace
88
- from transformers import pipeline
89
-
90
- pipe = pipeline(
91
- "text-generation",
92
- model="meta-llama/Meta-Llama-3-8B-Instruct",
93
- token=os.environ["HF_TOKEN"],
94
- max_new_tokens=512,
95
- temperature=0.0
96
- )
97
 
98
- llm = ChatHuggingFace(
99
- pipeline=pipe,
100
- model_id="meta-llama/Meta-Llama-3-8B-Instruct"
 
 
 
101
  )
102
-
103
  # This initializes the Chat OpenAI model with the provided endpoint, API key, deployment name, and a temperature setting of 0 (to control response variability).
104
 
105
  # set the LLM and embedding model in the LlamaIndex settings.
@@ -125,10 +101,8 @@ class AgentState(TypedDict):
125
  def expand_query(state: Dict[str, Any]) -> Dict[str, Any]:
126
  """
127
  Expands the user query to improve retrieval of bible and spiritual information.
128
-
129
  Args:
130
  state: Workflow state containing at least 'query' and 'query_feedback'.
131
-
132
  Returns:
133
  Workflow state with an additional 'expanded_query' key.
134
  """
@@ -136,20 +110,15 @@ def expand_query(state: Dict[str, Any]) -> Dict[str, Any]:
136
 
137
  print("---------Expanding Query---------")
138
  system_message = '''You are an assistant that reformulates vague or short user questions into detailed, domain-specific queries related to bible and spiritual questions.
139
-
140
  Examples:
141
  - Input: "David and Goliath?"
142
  Expanded: "What is the significance of the story of David and Goliath in the context of faith, courage, and divine intervention?"
143
-
144
  - Input: "What does Jesus say about love?"
145
  Expanded: "What teachings did Jesus offer about love in the New Testament, and how do passages like John 13:34โ€“35 and Matthew 22:37โ€“39 reflect those teachings?"
146
-
147
  - Input: "Genesis creation"
148
  Expanded: "How does the Book of Genesis describe the creation of the world, and what are the main theological interpretations of the seven days of creation?"
149
-
150
  - Input: "End times?"
151
  Expanded: "What does the Bible say about the end times, and how do texts like the Book of Revelation, Daniel, and Matthew 24 contribute to Christian eschatology?"
152
-
153
  - Input: "Women in the Bible"
154
  Expanded: "What roles do women play in the Bible, and how are figures such as Mary, Ruth, Esther, and Deborah portrayed in biblical narratives?"
155
  '''
@@ -183,10 +152,8 @@ retriever = vector_store.as_retriever(
183
  def retrieve_context(state):
184
  """
185
  Retrieves context from the vector store using the expanded or original query.
186
-
187
  Args:
188
  state (Dict): The current state of the workflow, containing the query and expanded query.
189
-
190
  Returns:
191
  Dict: The updated state with the retrieved context.
192
  """
@@ -218,17 +185,14 @@ def retrieve_context(state):
218
  def craft_response(state):
219
  """
220
  Generates a response using the retrieved context, focusing on biblical teachings and spiritual guidance.
221
-
222
  Args:
223
  state (Dict): The current state of the workflow, containing the query and retrieved context.
224
-
225
  Returns:
226
  Dict: The updated state with the generated response.
227
  """
228
  print("---------craft_response---------")
229
  system_message = '''You are a helpful AI assistant trained to support users in understanding biblical teachings and spiritual guidance, using context retrieved from the Bible and the book *The Purpose Driven Life* by Rick Warren.
230
  Your responses must strictly adhere to the retrieved context, which is extracted from biblical texts such as the CSB Bible, theological commentaries, or trusted religious sources.
231
-
232
  Do not speculate, interpret creatively, or introduce knowledge not found in the provided context. Focus only on scriptural passages, interpretations, historical backgrounds, or theological themes directly supported by the retrieved content.
233
  If the context does not contain enough information to answer accurately, clearly state that. Aim for clarity, scriptural accuracy, and relevance to the user's query.
234
  '''
@@ -254,10 +218,8 @@ If the context does not contain enough information to answer accurately, clearly
254
  def score_groundedness(state):
255
  """
256
  Checks whether the response is grounded in the retrieved context.
257
-
258
  Args:
259
  state (Dict): The current state of the workflow, containing the response and context.
260
-
261
  Returns:
262
  Dict: The updated state with the groundedness score.
263
  """
@@ -266,12 +228,9 @@ def score_groundedness(state):
266
  system_message = '''You are evaluating whether an AI-generated response is grounded in the retrieved context
267
  provided from biblical texts (such as the CSB Bible) and the book *The Purpose Driven Life* by Rick Warren.
268
  The context includes scripture, commentary, and theological content.
269
-
270
  Your task is to assign a groundedness score between 0 and 1, where:
271
-
272
  - 1.0 means the response is fully supported by the context,
273
  - 0.0 means the response is entirely unsupported.
274
-
275
  Be strict: if even a part of the response is not traceable to the context, reduce the score. Provide only
276
  the numeric score.'''
277
 
@@ -297,22 +256,17 @@ the numeric score.'''
297
  def check_precision(state: Dict) -> Dict:
298
  """
299
  Checks whether the response precisely addresses the userโ€™s query.
300
-
301
  Args:
302
  state (Dict): The current state of the workflow, containing the query and response.
303
-
304
  Returns:
305
  Dict: The updated state with the precision score.
306
  """
307
  print("---------check_precision---------")
308
  system_message = '''You are assessing whether an AI-generated response precisely answers the user's query,
309
  within the domain of biblical interpretation and spiritual guidance drawn from the Bible and *The Purpose Driven Life*.
310
-
311
  Provide a precision score between 0 and 1:
312
-
313
  - 1.0: The response fully and directly answers the query with clear relevance.
314
  - 0.0: The response is vague, unrelated, or fails to address the query.
315
-
316
  Only return a numeric score.'''
317
 
318
  precision_prompt = ChatPromptTemplate.from_messages([
@@ -337,10 +291,8 @@ Only return a numeric score.'''
337
  def refine_response(state: Dict) -> Dict:
338
  """
339
  Suggests improvements for the generated response.
340
-
341
  Args:
342
  state (Dict): The current state of the workflow, containing the query and response.
343
-
344
  Returns:
345
  Dict: The updated state with response refinement suggestions.
346
  """
@@ -371,10 +323,8 @@ Focus on biblical coherence, faith-based reasoning, and alignment with the theme
371
  def refine_query(state: Dict) -> Dict:
372
  """
373
  Suggests improvements for the expanded query.
374
-
375
  Args:
376
  state (Dict): The current state of the workflow, containing the query and expanded query.
377
-
378
  Returns:
379
  Dict: The updated state with query refinement suggestions.
380
  """
@@ -539,11 +489,9 @@ llama_guard_client = Groq(api_key=llama_api_key)
539
  def filter_input_with_llama_guard(user_input, model="meta-llama/llama-guard-4-12b"):
540
  """
541
  Filters user input using Llama Guard to ensure it is safe.
542
-
543
  Parameters:
544
  - user_input: The input provided by the user.
545
  - model: The Llama Guard model to be used for filtering (default is "meta-llama/llama-guard-4-12bb").
546
-
547
  Returns:
548
  - The filtered and safe input.
549
  """
@@ -571,38 +519,19 @@ class SpiritualBot:
571
  self.memory = MemoryClient(api_key=os.environ.get("MEM0_API_KEY")) # Complete the code to define the memory client API key
572
 
573
  # Initialize the OpenAI client using the provided credentials
574
- #self.client = ChatOpenAI(
575
- # model_name="gpt-4o-mini", # Specify the model to use (e.g., GPT-4 optimized version)
576
- # api_key=os.environ.get("API_KEY"), # API key for authentication
577
- # openai_api_base = os.environ.get("OPENAI_API_BASE"),
578
- # temperature=0 # Controls randomness in responses; 0 ensures deterministic results
579
- #)
580
-
581
- # Initialize Hugging Face client
582
- from langchain_community.chat_models import ChatHuggingFace
583
- from transformers import pipeline
584
-
585
- pipe = pipeline(
586
- "text-generation",
587
- model="meta-llama/Meta-Llama-3-8B-Instruct",
588
- token=os.environ["HF_TOKEN"],
589
- max_new_tokens=512,
590
- temperature=0.0
591
  )
592
 
593
- # Assign to self.client
594
- self.client = ChatHuggingFace(
595
- pipeline=pipe,
596
- model_id="meta-llama/Meta-Llama-3-8B-Instruct"
597
- )
598
-
599
  # Define tools available to the chatbot, such as web search
600
  tools = [agentic_rag]
601
 
602
  # Define the system prompt to set the behavior of the chatbot
603
  system_prompt = """You are a compassionate and knowledgeable Spiritual Assistant.
604
  Your purpose is to help users explore biblical teachings and spiritual insights, drawing only from the Bible and *The Purpose Driven Life* by Rick Warren.
605
-
606
  Guidelines for Interaction:
607
  - Maintain a respectful, thoughtful, and non-judgmental tone at all times.
608
  - Ground every response in scripture or the provided spiritual context โ€” never speculate or invent theology.
@@ -611,7 +540,6 @@ Guidelines for Interaction:
611
  - When possible, help the user reflect on how biblical principles can apply to personal growth, purpose, and everyday life.
612
  - Avoid doctrinal debates or denominational bias โ€” focus on shared themes of purpose, love, faith, and spiritual growth.
613
  - If you cannot answer based on the given sources, humbly acknowledge the limitation and suggest scripture or topics the user might explore further.
614
-
615
  Your goal is to walk alongside users on their spiritual journey, offering encouragement, insight, and biblical grounding.
616
  """
617
 
@@ -632,7 +560,6 @@ Your goal is to walk alongside users on their spiritual journey, offering encour
632
  def store_customer_interaction(self, user_id: str, message: str, response: str, metadata: Dict = None):
633
  """
634
  Store customer interaction in memory for future reference.
635
-
636
  Args:
637
  user_id (str): Unique identifier for the customer.
638
  message (str): Customer's query or message.
@@ -663,11 +590,9 @@ Your goal is to walk alongside users on their spiritual journey, offering encour
663
  def get_relevant_history(self, user_id: str, query: str) -> List[Dict]:
664
  """
665
  Retrieve past interactions relevant to the current query.
666
-
667
  Args:
668
  user_id (str): Unique identifier for the customer.
669
  query (str): The customer's current query.
670
-
671
  Returns:
672
  List[Dict]: A list of relevant past interactions.
673
  """
@@ -681,11 +606,9 @@ Your goal is to walk alongside users on their spiritual journey, offering encour
681
  def handle_customer_query(self, user_id: str, query: str) -> str:
682
  """
683
  Process a customer's query and provide a response, taking into account past interactions.
684
-
685
  Args:
686
  user_id (str): Unique identifier for the customer.
687
  query (str): Customer's query.
688
-
689
  Returns:
690
  str: Chatbot's response.
691
  """
@@ -707,9 +630,7 @@ Your goal is to walk alongside users on their spiritual journey, offering encour
707
  prompt = f"""
708
  Context:
709
  {context}
710
-
711
  Current customer query: {query}
712
-
713
  Provide a helpful response that takes into account any relevant past interactions.
714
  """
715
 
@@ -758,12 +679,9 @@ No user data is **stored**, **shared**, or used for **model training** or any ot
758
  st.session_state.chat_history.append({
759
  "role": "assistant",
760
  "content": f"""๐Ÿ‘‹ **Welcome ใ‚ˆใ†ใ“ใ, {user_id}!**
761
-
762
  How can I guide you in your spiritual path today?
763
  ไปŠๆ—ฅใ€ใ‚ใชใŸใฎ็ฒพ็ฅž็š„ใช้“ใ‚’ใฉใฎใ‚ˆใ†ใซๅฐŽใใ“ใจใŒใงใใ‚‹ใงใ—ใ‚‡ใ†ใ‹?
764
-
765
  ---
766
-
767
  ๐Ÿ”’ **Privacy Notice:**
768
  User questions remains private.
769
  All processing occurs **within the current session**.
@@ -820,4 +738,4 @@ No user data is **stored**, **shared**, or used for **model training** or any ot
820
  st.session_state.chat_history.append({"role": "assistant", "content": inappropriate_msg})
821
 
822
  if __name__ == "__main__":
823
- spritual_assistant_streamlit()
 
6
  import json # Parsing and handling JSON data
7
 
8
  # LangChain imports
9
+ from langchain_openai import ChatOpenAI
10
  from langchain_core.documents import Document # Document data structures
11
  from langchain_core.runnables import RunnablePassthrough # LangChain core library for running pipelines
12
  from langchain_core.output_parsers import StrOutputParser # String output parser
 
20
  from langchain_community.vectorstores import Chroma # Implementations of vector stores like Chroma
21
  from langchain_community.document_loaders import PyPDFDirectoryLoader, PyPDFLoader # Document loaders for PDFs
22
  from langchain_community.cross_encoders import HuggingFaceCrossEncoder # Cross-encoders from HuggingFace
 
 
23
  from langchain_experimental.text_splitter import SemanticChunker # Experimental text splitting methods
24
  from langchain.text_splitter import (
25
  CharacterTextSplitter, # Splitting text by characters
 
30
  from langchain_core.prompts import ChatPromptTemplate
31
 
32
  # LangChain OpenAI imports
33
+ from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI # OpenAI embeddings and models
34
+ from langchain.embeddings.openai import OpenAIEmbeddings # OpenAI embeddings for text vectors
35
 
36
  # LlamaParse & LlamaIndex imports
37
  from llama_parse import LlamaParse # Document parsing library
 
62
 
63
 
64
  # Initialize the OpenAI Embeddings
65
+ embedding_model = OpenAIEmbeddings(
66
+ openai_api_base=endpoint,
67
+ openai_api_key=api_key,
68
+ model='text-embedding-ada-002'
 
 
 
 
 
69
  )
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ # Initialize the Chat OpenAI model
73
+ llm = ChatOpenAI(
74
+ openai_api_base=endpoint,
75
+ openai_api_key=api_key,
76
+ model="gpt-4o-mini",
77
+ streaming=False
78
  )
 
79
  # This initializes the Chat OpenAI model with the provided endpoint, API key, deployment name, and a temperature setting of 0 (to control response variability).
80
 
81
  # set the LLM and embedding model in the LlamaIndex settings.
 
101
  def expand_query(state: Dict[str, Any]) -> Dict[str, Any]:
102
  """
103
  Expands the user query to improve retrieval of bible and spiritual information.
 
104
  Args:
105
  state: Workflow state containing at least 'query' and 'query_feedback'.
 
106
  Returns:
107
  Workflow state with an additional 'expanded_query' key.
108
  """
 
110
 
111
  print("---------Expanding Query---------")
112
  system_message = '''You are an assistant that reformulates vague or short user questions into detailed, domain-specific queries related to bible and spiritual questions.
 
113
  Examples:
114
  - Input: "David and Goliath?"
115
  Expanded: "What is the significance of the story of David and Goliath in the context of faith, courage, and divine intervention?"
 
116
  - Input: "What does Jesus say about love?"
117
  Expanded: "What teachings did Jesus offer about love in the New Testament, and how do passages like John 13:34โ€“35 and Matthew 22:37โ€“39 reflect those teachings?"
 
118
  - Input: "Genesis creation"
119
  Expanded: "How does the Book of Genesis describe the creation of the world, and what are the main theological interpretations of the seven days of creation?"
 
120
  - Input: "End times?"
121
  Expanded: "What does the Bible say about the end times, and how do texts like the Book of Revelation, Daniel, and Matthew 24 contribute to Christian eschatology?"
 
122
  - Input: "Women in the Bible"
123
  Expanded: "What roles do women play in the Bible, and how are figures such as Mary, Ruth, Esther, and Deborah portrayed in biblical narratives?"
124
  '''
 
152
  def retrieve_context(state):
153
  """
154
  Retrieves context from the vector store using the expanded or original query.
 
155
  Args:
156
  state (Dict): The current state of the workflow, containing the query and expanded query.
 
157
  Returns:
158
  Dict: The updated state with the retrieved context.
159
  """
 
185
  def craft_response(state):
186
  """
187
  Generates a response using the retrieved context, focusing on biblical teachings and spiritual guidance.
 
188
  Args:
189
  state (Dict): The current state of the workflow, containing the query and retrieved context.
 
190
  Returns:
191
  Dict: The updated state with the generated response.
192
  """
193
  print("---------craft_response---------")
194
  system_message = '''You are a helpful AI assistant trained to support users in understanding biblical teachings and spiritual guidance, using context retrieved from the Bible and the book *The Purpose Driven Life* by Rick Warren.
195
  Your responses must strictly adhere to the retrieved context, which is extracted from biblical texts such as the CSB Bible, theological commentaries, or trusted religious sources.
 
196
  Do not speculate, interpret creatively, or introduce knowledge not found in the provided context. Focus only on scriptural passages, interpretations, historical backgrounds, or theological themes directly supported by the retrieved content.
197
  If the context does not contain enough information to answer accurately, clearly state that. Aim for clarity, scriptural accuracy, and relevance to the user's query.
198
  '''
 
218
  def score_groundedness(state):
219
  """
220
  Checks whether the response is grounded in the retrieved context.
 
221
  Args:
222
  state (Dict): The current state of the workflow, containing the response and context.
 
223
  Returns:
224
  Dict: The updated state with the groundedness score.
225
  """
 
228
  system_message = '''You are evaluating whether an AI-generated response is grounded in the retrieved context
229
  provided from biblical texts (such as the CSB Bible) and the book *The Purpose Driven Life* by Rick Warren.
230
  The context includes scripture, commentary, and theological content.
 
231
  Your task is to assign a groundedness score between 0 and 1, where:
 
232
  - 1.0 means the response is fully supported by the context,
233
  - 0.0 means the response is entirely unsupported.
 
234
  Be strict: if even a part of the response is not traceable to the context, reduce the score. Provide only
235
  the numeric score.'''
236
 
 
256
  def check_precision(state: Dict) -> Dict:
257
  """
258
  Checks whether the response precisely addresses the userโ€™s query.
 
259
  Args:
260
  state (Dict): The current state of the workflow, containing the query and response.
 
261
  Returns:
262
  Dict: The updated state with the precision score.
263
  """
264
  print("---------check_precision---------")
265
  system_message = '''You are assessing whether an AI-generated response precisely answers the user's query,
266
  within the domain of biblical interpretation and spiritual guidance drawn from the Bible and *The Purpose Driven Life*.
 
267
  Provide a precision score between 0 and 1:
 
268
  - 1.0: The response fully and directly answers the query with clear relevance.
269
  - 0.0: The response is vague, unrelated, or fails to address the query.
 
270
  Only return a numeric score.'''
271
 
272
  precision_prompt = ChatPromptTemplate.from_messages([
 
291
  def refine_response(state: Dict) -> Dict:
292
  """
293
  Suggests improvements for the generated response.
 
294
  Args:
295
  state (Dict): The current state of the workflow, containing the query and response.
 
296
  Returns:
297
  Dict: The updated state with response refinement suggestions.
298
  """
 
323
  def refine_query(state: Dict) -> Dict:
324
  """
325
  Suggests improvements for the expanded query.
 
326
  Args:
327
  state (Dict): The current state of the workflow, containing the query and expanded query.
 
328
  Returns:
329
  Dict: The updated state with query refinement suggestions.
330
  """
 
489
  def filter_input_with_llama_guard(user_input, model="meta-llama/llama-guard-4-12b"):
490
  """
491
  Filters user input using Llama Guard to ensure it is safe.
 
492
  Parameters:
493
  - user_input: The input provided by the user.
494
  - model: The Llama Guard model to be used for filtering (default is "meta-llama/llama-guard-4-12bb").
 
495
  Returns:
496
  - The filtered and safe input.
497
  """
 
519
  self.memory = MemoryClient(api_key=os.environ.get("MEM0_API_KEY")) # Complete the code to define the memory client API key
520
 
521
  # Initialize the OpenAI client using the provided credentials
522
+ self.client = ChatOpenAI(
523
+ model_name="gpt-4o-mini", # Specify the model to use (e.g., GPT-4 optimized version)
524
+ api_key=os.environ.get("API_KEY"), # API key for authentication
525
+ openai_api_base = os.environ.get("OPENAI_API_BASE"),
526
+ temperature=0 # Controls randomness in responses; 0 ensures deterministic results
 
 
 
 
 
 
 
 
 
 
 
 
527
  )
528
 
 
 
 
 
 
 
529
  # Define tools available to the chatbot, such as web search
530
  tools = [agentic_rag]
531
 
532
  # Define the system prompt to set the behavior of the chatbot
533
  system_prompt = """You are a compassionate and knowledgeable Spiritual Assistant.
534
  Your purpose is to help users explore biblical teachings and spiritual insights, drawing only from the Bible and *The Purpose Driven Life* by Rick Warren.
 
535
  Guidelines for Interaction:
536
  - Maintain a respectful, thoughtful, and non-judgmental tone at all times.
537
  - Ground every response in scripture or the provided spiritual context โ€” never speculate or invent theology.
 
540
  - When possible, help the user reflect on how biblical principles can apply to personal growth, purpose, and everyday life.
541
  - Avoid doctrinal debates or denominational bias โ€” focus on shared themes of purpose, love, faith, and spiritual growth.
542
  - If you cannot answer based on the given sources, humbly acknowledge the limitation and suggest scripture or topics the user might explore further.
 
543
  Your goal is to walk alongside users on their spiritual journey, offering encouragement, insight, and biblical grounding.
544
  """
545
 
 
560
  def store_customer_interaction(self, user_id: str, message: str, response: str, metadata: Dict = None):
561
  """
562
  Store customer interaction in memory for future reference.
 
563
  Args:
564
  user_id (str): Unique identifier for the customer.
565
  message (str): Customer's query or message.
 
590
  def get_relevant_history(self, user_id: str, query: str) -> List[Dict]:
591
  """
592
  Retrieve past interactions relevant to the current query.
 
593
  Args:
594
  user_id (str): Unique identifier for the customer.
595
  query (str): The customer's current query.
 
596
  Returns:
597
  List[Dict]: A list of relevant past interactions.
598
  """
 
606
  def handle_customer_query(self, user_id: str, query: str) -> str:
607
  """
608
  Process a customer's query and provide a response, taking into account past interactions.
 
609
  Args:
610
  user_id (str): Unique identifier for the customer.
611
  query (str): Customer's query.
 
612
  Returns:
613
  str: Chatbot's response.
614
  """
 
630
  prompt = f"""
631
  Context:
632
  {context}
 
633
  Current customer query: {query}
 
634
  Provide a helpful response that takes into account any relevant past interactions.
635
  """
636
 
 
679
  st.session_state.chat_history.append({
680
  "role": "assistant",
681
  "content": f"""๐Ÿ‘‹ **Welcome ใ‚ˆใ†ใ“ใ, {user_id}!**
 
682
  How can I guide you in your spiritual path today?
683
  ไปŠๆ—ฅใ€ใ‚ใชใŸใฎ็ฒพ็ฅž็š„ใช้“ใ‚’ใฉใฎใ‚ˆใ†ใซๅฐŽใใ“ใจใŒใงใใ‚‹ใงใ—ใ‚‡ใ†ใ‹?
 
684
  ---
 
685
  ๐Ÿ”’ **Privacy Notice:**
686
  User questions remains private.
687
  All processing occurs **within the current session**.
 
738
  st.session_state.chat_history.append({"role": "assistant", "content": inappropriate_msg})
739
 
740
  if __name__ == "__main__":
741
+ spritual_assistant_streamlit()