> loading_
# To see the new AI guardrail in action, trace how a query is processed in `agent/src/main.py`.
# This logic is implemented as a pre-processing step before the main RAG chain.
# 1. Define a query that seeks a ruling without using obvious keywords like 'fatwa'.
ruling_query = "Is it permissible for me to combine my prayers while traveling?"
info_query = "What are the conditions for combining prayers while traveling?"
# 2. The agent first passes the query to the new intent classification function.
# This function uses a few-shot prompt to ask the LLM to classify the intent.
#
# PROMPT (Simplified):
# Classify the user's intent as 'ruling_seeking' or 'information_seeking'.
#
# User: "What is the fatwa on fasting on Fridays?"
# Intent: ruling_seeking
#
# User: "Can you explain the historical context of fasting on Fridays?"
# Intent: information_seeking
#
# User: "{user_query}"
# Intent:
def classify_intent(query):
# ... implementation uses Ollama/Qwen to get classification ...
# This is a simulation of the LLM's expected response.
if "permissible for me" in query:
return "ruling_seeking"
else:
return "information_seeking"
# 3. The agent checks the classified intent from the LLM.
intent = classify_intent(ruling_query)
print(f"Query: '{ruling_query}'")
print(f"Detected Intent: {intent}")
# 4. If the intent is 'ruling_seeking', the RAG chain is bypassed entirely.
if intent == "ruling_seeking":
# A predefined response is returned immediately.
print("Response: 'I cannot provide religious rulings. Please consult a qualified scholar.'")
# ... execution stops here.
else:
# If the intent is 'information_seeking', processing continues to the RAG chain.
print("Response: 'Proceeding to RAG chain to find relevant information...'")
# rag_chain.invoke(query)